code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/perl
use strict;
use warnings;
use Image::Magick;
my $DEBUG = 0;
my $err = '';
if (scalar(@ARGV) != 5){
print " usage:\n $0 inputfile outputfile stepcount direction width_or_height\n";
exit;
}
# move to command line args:
my $image_src_file = $ARGV[0]; # button source filename
my $image_dst_file = $ARGV[1]; # destination filename. Folders are required to exist
my $step_count = $ARGV[2]; # granularity of the fader movement
my $direction = $ARGV[3]; # horisontal or vertical
my $user_size = $ARGV[4]; # desired width (for horizontal) or height (for vertical)
# read button image
my $button = Image::Magick->new;
$err = $button->Read($image_src_file);
die "$err" if "$err";
my $command = 'convert -size ';
my $options = '';
my $dst_width;
my $dst_height;
if ($direction eq 'horizontal'){
# determine dst height
$dst_height = $step_count * $button->Get('height');
# determine last position of button:
my $last_position = $user_size - $button->Get('width');
#determine the interval between each step
my $step_interval = $last_position / $step_count;
# prepare options
my $x = 0;
my $y = 0;
for(my $i = 0; $i<$step_count; $i++)
{
$options .= $image_src_file .' -geometry +'.$x.'+'.$y.' -composite -compose src-over ';
$x += $step_interval;
$y += $button->Get('height');
}
$dst_width = $user_size;
}
elsif($direction eq 'vertical'){
# determine dst height
$dst_height = $step_count * $user_size;
#determine the interval between each step
my $step_interval = $user_size - (($user_size - $button->Get('height')) / $step_count);
print "step_interval: $step_interval\n" if $DEBUG;
# prepare options
my $x = 0;
my $y = $user_size - $button->Get('height');
for(my $i = 0; $i < $step_count; $i++)
{
print "y: $y\n" if $DEBUG;
$options .= $image_src_file .' -geometry +'.$x.'+'.$y.' -composite -compose src-over ';
$y += $step_interval;
}
$dst_width = $button->Get('width');
}
else
{
die('Illegal direction specified');
}
$command .= $dst_width.'x'.$dst_height. ' xc:transparent '.$options.' '.$image_dst_file;
print 'command: '.$command."\n" if $DEBUG;
# ok, let's roll
my $result = `$command`;
print $result ? $result."\n" : "done\n";
exit; | hhapps/makefader_perl | makefader.pl | Perl | mit | 2,231 |
#!/usr/bin/env perl
use strict;
use warnings;
use DateTime;
use LWP::Simple;
use App::Prove;
use Data::Dumper;
use Pod::Usage;
use Getopt::Long;
use File::Slurp;
use Config::Any;
use File::Basename qw(dirname);
use Cwd qw(abs_path);
use Catalyst::ScriptRunner;
use lib 'lib';
use SGN::Devel::MyDevLibs;
my $verbose = 0;
my $nocleanup;
my $noserver;
my $dumpupdatedfixture;
my $noparallel = 0;
my $nopatch;
my $list_config = "";
my $logfile = "logfile.$$.txt";
my $print_environment;
# relative to `sgn/ (or parent of wherever this script is located)
my $fixture_path = 't/data/fixture/cxgn_fixture.sql';
GetOptions(
"carpalways" => \(my $carpalways = 0),
"verbose" => \$verbose,
"nocleanup" => \$nocleanup,
"dumpupdatedfixture" => \$dumpupdatedfixture,
"noserver" => \$noserver,
"noparallel" => \$noparallel,
"nopatch" => \$nopatch,
"fixture_path" => \$fixture_path,
"list_config" => \$list_config,
"logfile=s" => \$logfile,
"env" => \$print_environment,
);
require Carp::Always if $carpalways;
if ($print_environment) { print STDERR "CURRENT ENV: ".Dumper(\%ENV); }
my @prove_args = @ARGV;
if(@prove_args){
@prove_args = map {abs_path($_)} @prove_args;
}
#Change cwd to `sgn/` (or parent of wherever this script is located)
my $sgn_dir = abs_path(dirname(abs_path($0))."/../");
print STDERR "####### ".$sgn_dir." #######";
chdir($sgn_dir);
@prove_args = ( 't' ) unless @prove_args;
#my $parallel = (grep /^-j\d*$/, @ARGV) ? 1 : 0;
$ENV{SGN_CONFIG_LOCAL_SUFFIX} = 'fixture';
#my $conf_file_base = 'sgn_local.conf'; # which conf file the sgn_fixture.conf should be based on
# relative to `sgn/`
my $conf_file_base = $ENV{SGN_TEST_CONF} || 'sgn_test.conf';
my $template_file = 'sgn_fixture_template.conf';
# get some defaults from sgn_local.conf
#
my $cfg = Config::Any->load_files({files=> [$conf_file_base, $template_file], use_ext=>1 });
my $config = $cfg->[0]->{$conf_file_base};
my $template = $cfg->[1]->{$template_file};
if ($list_config) {
print STDERR Dumper($cfg);
}
my $db_user_password = $config->{dbpass};
my $dbhost = $config->{dbhost} || 'localhost';
my $dbport = $config->{dbport} || '5432';
my $db_postgres_password = $config->{DatabaseConnection}->{sgn_test}->{password};
print STDERR "Using $dbhost:$dbport\n";
my $test_dsn = $config->{DatabaseConnection}->{sgn_test}->{dsn};
my $catalyst_server_port = 3010;
# replace the keys in the sgn local file with what's in the template
#
foreach my $k (keys %{$template}) {
#print STDERR "Replacing key $k : $config->{$k} with $template->{$k}\n";
$config->{$k} = $template->{$k};
}
# load the database fixture
#
my $dbname;
if ($ENV{TEST_DB_NAME}) { $dbname = $ENV{TEST_DB_NAME}; }
else {
my $now = DateTime->now();
$dbname = join "_", map { $now->$_ } (qw | year month day hour minute |);
$dbname = 'test_db_'.$dbname;
$dbname .= $$;
}
print STDERR "# Writing a .pgpass file... ";
# format = hostname:port:database:username:password
open(my $PGPASS, ">", "$ENV{HOME}/.pgpass") || die "Can't open .pgpass for writing.";
print $PGPASS "$dbhost:$dbport:$dbname:web_usr:$db_user_password\n";
print $PGPASS "$dbhost:$dbport:*:postgres:$db_postgres_password\n";
close($PGPASS);
system("chmod 0600 $ENV{HOME}/.pgpass");
print STDERR "Done.\n";
# load fixture only if no TEST_DB_NAME env var was provided
if (! $ENV{TEST_DB_NAME}) {
my $database_fixture_dump = $ENV{DATABASE_FIXTURE_PATH} || $fixture_path;
print STDERR "# Loading database fixture... $database_fixture_dump ... ";
system("createdb -h $config->{dbhost} -U postgres -T template0 -E UTF8 --no-password $dbname");
# will emit an error if web_usr role already exists, but that's OK
system("psql -h $config->{dbhost} -U postgres $dbname -c \"CREATE USER web_usr PASSWORD '$db_user_password'\"");
system("cat $database_fixture_dump | psql -h $config->{dbhost} -U postgres $dbname > /dev/null");
print STDERR "Done.\n";
}
print STDERR "# Creating sgn_fixture.conf file... ";
$config->{dbname} = $dbname;
$test_dsn =~ s/dbname=(.*)$/dbname=$dbname/;
$config->{DatabaseConnection}->{sgn_test}->{dsn} = $test_dsn;
#print STDERR Dumper($config);
my $new_conf = hash2config($config);
open(my $NEWCONF, ">", "sgn_fixture.conf") || die "Can't open sgn_fixture.conf for writing";
print $NEWCONF $new_conf;
close($NEWCONF);
#run fixture and db patches.
if (! $nopatch) {
system("t/data/fixture/patches/run_fixture_and_db_patches.pl -u postgres -p $db_postgres_password -h $dbhost -d $dbname -e janedoe -s 145");
}
# run the materialized views creation script
#
print STDERR "Running matview refresh with -H $dbhost -D $dbname -U postgres -P $db_postgres_password -m fullview\n";
system("perl bin/refresh_matviews.pl -H $dbhost -D $dbname -U postgres -P $db_postgres_password -m fullview");
if ($dumpupdatedfixture){
print STDERR "Dumping new updated fixture with all patches run on it to t/data/fixture/cxgn_fixture.sql\n";
system("pg_dump -h $config->{dbhost} -U postgres $dbname > t/data/fixture/cxgn_fixture.sql");
}
print STDERR "Done.\n";
# start the test web server
#
my $server_pid;
if ($noserver) {
print STDERR "# [ --noserver option: not starting web server]\n";
}
else {
$server_pid = fork;
unless( $server_pid ) {
# web server process
#
#$ENV{SGN_TEST_MODE} = 1;
@ARGV = (
-p => $catalyst_server_port,
( $noparallel ? () : ('--fork') ),
);
if (!$verbose) {
print STDERR "# [Server logfile at $logfile]\n";
open (STDERR, ">$logfile") || die "can't open logfile.";
}
Catalyst::ScriptRunner->run('SGN', 'Server');
if (!$nocleanup) {
print STDERR "# Removing test database ($dbname)... ";
if ($noserver) {
print STDERR "# [ --noserver option: No logfile to remove]\n";
}
else {
print STDERR "# Delete server logfile... ";
close($logfile);
unlink $logfile;
print STDERR "Done.\n";
}
}
exit;
}
print STDERR "# Starting web server (PID=$server_pid)... ";
}
# wait for the test server to start
#
{
local $SIG{CHLD} = sub {
waitpid $server_pid, 0;
die "\nTest server failed to start. Aborting.\n";
};
print STDERR "Done.\n";
if (!$noserver) {
sleep 1 until !kill(0, $server_pid) || get "http://localhost:$catalyst_server_port";
}
}
my $prove_pid = fork;
unless( $prove_pid ) {
# test harness process
#
print STDERR "# Starting tests... \n";
# set up env vars for prove and the tests
#
$ENV{SGN_TEST_SERVER} ||= "http://localhost:$catalyst_server_port";
if(! $noparallel ) {
$ENV{SGN_PARALLEL_TESTING} = 1;
$ENV{SGN_SKIP_LEAK_TEST} = 1;
}
# now run the tests against it
#
my $app = App::Prove->new;
my $v = $verbose ? 'v' : '';
$app->process_args(
'-lr'.$v,
( map { -I => $_ } @INC ),
@prove_args
);
exit( $app->run ? 0 : 1 );
}
#$SIG{CHLD} = 'IGNORE'; # problematic
$SIG{INT} = sub { kill 15, $server_pid, $prove_pid };
$SIG{KILL} = sub { kill 9, $server_pid, $prove_pid };
print STDERR "# Start prove (PID $prove_pid)... \n";
waitpid $prove_pid, 0;
my $prove_pid_exit_status = $? >> 8;
print STDERR "# Prove finished, stopping web server PID $server_pid... ";
END { kill 15, $server_pid if $server_pid }
waitpid $server_pid, 0;
sleep(3);
print STDERR "Done.\n";
if (!$nocleanup) {
if ($ENV{TEST_DB_NAME}) {
print STDERR "Not removing test database (TEST_DB_NAME = $ENV{TEST_DB_NAME} is set.\n";
}
else {
print STDERR "# Removing test database ($dbname)... ";
system("dropdb -h $config->{dbhost} -U postgres --no-password $dbname");
print STDERR "Done.\n";
}
if ($noserver) {
print STDERR "# [ --noserver option: No logfile to remove]\n";
}
else {
# print STDERR "# Delete server logfile... ";
# close($logfile);
# unlink $logfile;
# print STDERR "Done.\n";
print STDERR "# Delete fixture conf file... ";
unlink "sgn_fixture.conf";
print STDERR "Done.\n";
}
}
else {
print STDERR "# --nocleanup option: not removing db or files.\n";
}
print STDERR "# Test run complete.\n\n";
exit($prove_pid_exit_status); # exit with non-zero exit status if any tests failed
sub hash2config {
my $hash = shift;
my $s = "";
foreach my $k (keys(%$hash)) {
if (ref($hash->{$k}) eq "ARRAY") {
foreach my $v (@{$hash->{$k}}) {
$s .= "$k $v\n";
}
}
elsif (ref($hash->{$k}) eq "HASH") {
foreach my $n (keys(%{$hash->{$k}})) {
if (ref($hash->{$k}->{$n}) eq "HASH") {
$s .= "<$k $n>\n";
$s .= hash2config($hash->{$k}->{$n});
}
else {
$s .= "<$k>\n";
$s .= hash2config($hash->{$k});
}
$s .= "</$k>\n";
}
}
else {
$s .= "$k $hash->{$k}\n";
}
}
# if nothing matched the replace keys, add them here
#
# if (exists($hash->{dbname})) {
# $s .= "dbname $dbname\n";
# }
return $s;
}
__END__
=head1 NAME
test_fixture.pl - start a dev server and run tests against it
=head1 SYNOPSIS
t/test_fixture.pl --carpalways -- -v -j5 t/mytest.t t/mydiroftests/
=head1 OPTIONS
-v verbose - the output of the server is not re-directed to file,
but rather output to the screen.
--carpalways Load Carp::Always in both the server and the test process
to force backtraces of all warnings and errors
--nocleanup Do not clean up database and logfile
--noserver Do not start webserver (if running unit_fixture tests only)
--noparallel Do not run the server in parallel mode.
--nopatch Do not run fixture and database patches
--fixture_path specify a path to the fixture different from the default
(t/data/fixture/cxgn_fixture.pl). Note: You can also set the env
variable DATABASE_FIXTURE_PATH, which will overrule this
option.
--list_config lists the configuration information
-- -v options specified after two dashes will be passed to prove
directly, such -v will run prove in verbose mode.
By default, the configuration will be taken from the file sgn_test.conf. To use another configuration file, set the environment variable SGN_TEST_CONF to the name of the file you would like to use.
To use an existing database as the fixture, set the environment variable TEST_DB_NAME to the name of the database you would like to use.
=head1 AUTHORS
Robert Buels (initial script)
Lukas Mueller <lam87@cornell.edu> (fixture implementation)
=cut
| solgenomics/sgn | t/test_fixture.pl | Perl | mit | 10,638 |
if(! -e fftease){
mkdir("fftease");
}
while(<*>){
chomp;
if(/darwin$/ || /libfftease.pd_darwin.dylib/){
`mv $_ fftease`;
}
}
| ericlyon/fftease3.0-32bit | collect.pl | Perl | mit | 142 |
#!/usr/bin/perl
use strict;
use Data::Dumper;
use warnings;
use JSON::XS qw(encode_json decode_json);
my $waf_ip = $ARGV[0];
my $waf_port = $ARGV[1] || 8000;
my $username = $ARGV[2];
my $password = $ARGV[3];
my $svr_ip = $ARGV[4];
my $svr_port = $ARGV[5];
my $action = $ARGV[6];
my $login_token = `curl http://$waf_ip:$waf_port/restapi/v3/login -X POST -H Content-Type:application/json -d '{"username":"$username","password":"$password"}'`;
my $token_str = decode_json($login_token);
if (!defined $token_str->{token} && defined $token_str->{errors}) {
my $error_msg = $token_str->{errors}->{msg};
my $error_type = $token_str->{errors}->{type};
print "$error_type:$error_msg.\n";
exit 0;
}
$token_str = Data::Dumper::qquote($token_str->{token});
$token_str =~ s/"//g;
print "Login Successful.\n";
print "Login token - ", $token_str, "\n";
my $result = `curl -s http://$waf_ip:$waf_port/restapi/v3/services?parameters=name -u '$token_str:' -X GET `;
$result = decode_json($result);
my $data_hash = $result->{data};
my @svc_arr = ();
for (keys %$data_hash){
push(@svc_arr, $data_hash->{$_}->{name});
}
for (@svc_arr){
my $result = `curl -s http://$waf_ip:$waf_port/restapi/v3/services/$_/servers?parameters=name,ip-address,port -u '$token_str:' -X GET `;
$result = decode_json($result);
my $data = $result->{data};
my $svc = $_;
# print STDERR "***************** SERVER DETAILS FOR SERVICE $_ ****************\n\n";
for (keys %$data){
# print STDERR "\nSERVER NAME => $data->{$_}->{name}"."\nSERVER IP => $data->{$_}->{'ip-address'} \n"."SERVER PORT => $data->{$_}->{port}\n";
if ($data->{$_}->{'ip-address'} eq $svr_ip && $data->{$_}->{port} == $svr_port){
remove_svr_from_svc($svc, $svr_ip, $svr_port, $data->{$_}->{'name'});
}
}
}
sub remove_svr_from_svc {
my ($svc, $svr_ip, $svr_port, $svr_name) = @_;
print STDERR "Making changes for server $svr_name ($svr_ip, $svr_port) in service $svc\nSetting server status as $action\n";
my $result = `curl -s http://$waf_ip:$waf_port/restapi/v3/services/$svc/servers/$svr_name -u '$token_str:' -X PUT -H Content-Type:application/json -d '{"status":"$action"}'`;
$result = decode_json($result);
print STDERR $result->{msg}."\n\n";
}
| barracudanetworks/waf-automation | waf-api/perl/Set_Server_Maintenance_Mode/bulkedit_server.pl | Perl | mit | 2,302 |
#!/usr/bin/perl -w
#### DEBUG
my $DEBUG = 0;
#$DEBUG = 1;
=head2
APPLICATION starcluster.pl
PURPOSE
DRIVE TESTS OF StarCluster.pm, WHICH PERFORMS THE FOLLOWING TASKS:
1. MOUNT BY DEFAULT /agua, /data AND /nethome ON STARCLUSTER NODES
2. SHUT DOWN CLUSTER WHEN ALL WORKFLOWS ARE COMPLETED
3. ALLOW USERS TO RUN JOBS ON SMALL, MEDIUM OR LARGE CLUSTERS
4. RUN ONE CLUSTER FOR EACH PROJECT OR SHARE IT WITH MULTIPLE PROJECTS
USAGE
./starcluster.pl <mode> [additional_arguments - SEE Agua API]
EXAMPLE
./starcluster.pl start \
--username admin \
--cluster smallcluster \
--privatekey /nethome/admin/.keypairs/private.pem \
--publiccert /nethome/admin/.keypairs/public.pem \
--keyname admin-key
=cut
use strict;
#### USE LIBS
use FindBin qw($Bin);
use lib "$Bin/../../../lib/";
my $configfile = "$Bin/../../../conf/default.conf";
my $logfile = "/tmp/agua-starcluster.log";
my $log = 2;
my $printlog = 5;
use Conf::Agua;
my $conf = Conf::Agua->new(
inputfile => $configfile,
backup => 1,
separator => "\t",
spacer => "\\s\+",
logfile => $logfile,
log => $log,
printlog => $printlog
);
#### INTERNAL MODULES
use Agua::StarCluster;
my $starcluster = Agua::StarCluster->new(
conf => $conf
);
#### GET MODE AND ARGUMENTS
my @arguments = @ARGV;
my $mode = shift @ARGV;
#### PRINT HELP
if ( $mode eq "-h" or $mode eq "--help" ) { help(); }
#### FLUSH BUFFER
$| =1;
#### RUN QUERY
no strict;
eval { $starcluster->$mode() };
if ( $@ ){
print "Error - mode '$mode' might not be supported\nDetailed error output:\n$@\n";
}
| aguadev/aguadev | t/unit/bin/Doc/inputs/bin/cluster/starcluster.pl | Perl | mit | 1,571 |
=begin comment
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
=end comment
=cut
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# Do not edit the class manually.
# Ref: https://openapi-generator.tech
#
package WWW::OpenAPIClient::Object::NullSCM;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base ("Class::Accessor", "Class::Data::Inheritable");
#
#
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech). Do not edit the class manually.
# REF: https://openapi-generator.tech
#
=begin comment
Swaggy Jenkins
Jenkins API clients generated from Swagger / Open API specification
The version of the OpenAPI document: 1.1.2-pre.0
Contact: blah@cliffano.com
Generated by: https://openapi-generator.tech
=end comment
=cut
#
# NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
# Do not edit the class manually.
# Ref: https://openapi-generator.tech
#
__PACKAGE__->mk_classdata('attribute_map' => {});
__PACKAGE__->mk_classdata('openapi_types' => {});
__PACKAGE__->mk_classdata('method_documentation' => {});
__PACKAGE__->mk_classdata('class_documentation' => {});
# new plain object
sub new {
my ($class, %args) = @_;
my $self = bless {}, $class;
$self->init(%args);
return $self;
}
# initialize the object
sub init
{
my ($self, %args) = @_;
foreach my $attribute (keys %{$self->attribute_map}) {
my $args_key = $self->attribute_map->{$attribute};
$self->$attribute( $args{ $args_key } );
}
}
# return perl hash
sub to_hash {
my $self = shift;
my $_hash = decode_json(JSON->new->convert_blessed->encode($self));
return $_hash;
}
# used by JSON for serialization
sub TO_JSON {
my $self = shift;
my $_data = {};
foreach my $_key (keys %{$self->attribute_map}) {
if (defined $self->{$_key}) {
$_data->{$self->attribute_map->{$_key}} = $self->{$_key};
}
}
return $_data;
}
# from Perl hashref
sub from_hash {
my ($self, $hash) = @_;
# loop through attributes and use openapi_types to deserialize the data
while ( my ($_key, $_type) = each %{$self->openapi_types} ) {
my $_json_attribute = $self->attribute_map->{$_key};
if ($_type =~ /^array\[(.+)\]$/i) { # array
my $_subclass = $1;
my @_array = ();
foreach my $_element (@{$hash->{$_json_attribute}}) {
push @_array, $self->_deserialize($_subclass, $_element);
}
$self->{$_key} = \@_array;
} elsif ($_type =~ /^hash\[string,(.+)\]$/i) { # hash
my $_subclass = $1;
my %_hash = ();
while (my($_key, $_element) = each %{$hash->{$_json_attribute}}) {
$_hash{$_key} = $self->_deserialize($_subclass, $_element);
}
$self->{$_key} = \%_hash;
} elsif (exists $hash->{$_json_attribute}) { #hash(model), primitive, datetime
$self->{$_key} = $self->_deserialize($_type, $hash->{$_json_attribute});
} else {
$log->debugf("Warning: %s (%s) does not exist in input hash\n", $_key, $_json_attribute);
}
}
return $self;
}
# deserialize non-array data
sub _deserialize {
my ($self, $type, $data) = @_;
$log->debugf("deserializing %s with %s",Dumper($data), $type);
if ($type eq 'DateTime') {
return DateTime->from_epoch(epoch => str2time($data));
} elsif ( grep( /^$type$/, ('int', 'double', 'string', 'boolean'))) {
return $data;
} else { # hash(model)
my $_instance = eval "WWW::OpenAPIClient::Object::$type->new()";
return $_instance->from_hash($data);
}
}
__PACKAGE__->class_documentation({description => '',
class => 'NullSCM',
required => [], # TODO
} );
__PACKAGE__->method_documentation({
'_class' => {
datatype => 'string',
base_name => '_class',
description => '',
format => '',
read_only => '',
},
});
__PACKAGE__->openapi_types( {
'_class' => 'string'
} );
__PACKAGE__->attribute_map( {
'_class' => '_class'
} );
__PACKAGE__->mk_accessors(keys %{__PACKAGE__->attribute_map});
1;
| cliffano/swaggy-jenkins | clients/perl/generated/lib/WWW/OpenAPIClient/Object/NullSCM.pm | Perl | mit | 4,647 |
#!/usr/bin/perl
use strict;
use warnings;
use feature 'say';
my $fname = shift;
open my $fh, "<", $fname
or die "Can't open $fname: $!";
chomp(my $steps = <$fh>);
my @buffer = (0);
my $pos = 0;
for my $num (1..2017) {
$pos = ($pos + $steps) % @buffer;
splice @buffer, $pos+1, 0, $num;
$pos++;
}
say $buffer[$pos+1];
| bewuethr/advent_of_code | 2017/day17/day17a.pl | Perl | mit | 340 |
=pod
=head1 NAME
B<pcx2hgr> - convert PCX files to Apple II HGR format
=head1 SYNOPSIS
B<pcx2hgr> file.pcx > file.hgr
=head1 DESCRIPTION
B<pcx2hgr> converts a PCX file to a BLOADable Apple II HGR or HGR2
image, including palette conversion. Input is from the given filename,
and output is to standard output.
No scaling is done. The input file's X resolution must be either 140 or
280 pixels, and the Y resolution should be either 160 or 192 pixels for
proper display on the Apple II.
The output is prefixed with a standard Apple II 4-byte BLOAD header. To
display the image, use BASIC code like:
=head1 SEE ALSO
=over
=item dos33fsprogs(1)
=item a2tools(1)
=back
=head1 WEBSITE
http://www.deater.net/weave/vmwprod/apple/
=head1 AUTHORS
B<dos33fstools> written by Vince Weaver <vince _at_ deater.net>.
This manual page written by B. Watson for the SlackBuilds.org project,
but it may be used by anyone.
| panosmdma/SlackOnly-SlackBuilds | system/dos33fsprogs/man/pcx2hgr.pod | Perl | mit | 923 |
# -*- perl -*-
# !!! DO NOT EDIT !!!
# This file was automatically generated.
package Net::Amazon::Validate::ItemSearch::ca::Condition;
use 5.006;
use strict;
use warnings;
sub new {
my ($class , %options) = @_;
my $self = {
'_default' => 'Books',
%options,
};
push @{$self->{_options}}, 'Books';
push @{$self->{_options}}, 'Classical';
push @{$self->{_options}}, 'DVD';
push @{$self->{_options}}, 'ForeignBooks';
push @{$self->{_options}}, 'Music';
push @{$self->{_options}}, 'Software';
push @{$self->{_options}}, 'SoftwareVideoGames';
push @{$self->{_options}}, 'VHS';
push @{$self->{_options}}, 'Video';
push @{$self->{_options}}, 'VideoGames';
bless $self, $class;
}
sub user_or_default {
my ($self, $user) = @_;
if (defined $user && length($user) > 0) {
return $self->find_match($user);
}
return $self->default();
}
sub default {
my ($self) = @_;
return $self->{_default};
}
sub find_match {
my ($self, $value) = @_;
for (@{$self->{_options}}) {
return $_ if lc($_) eq lc($value);
}
die "$value is not a valid value for ca::Condition!\n";
}
1;
__END__
=head1 NAME
Net::Amazon::Validate::ItemSearch::ca::Condition;
=head1 DESCRIPTION
The default value is Books, unless mode is specified.
The list of available values are:
Books
Classical
DVD
ForeignBooks
Music
Software
SoftwareVideoGames
VHS
Video
VideoGames
=cut
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Net/Amazon/Validate/ItemSearch/ca/Condition.pm | Perl | mit | 1,509 |
#=======================================================================
# ____ ____ _____ _ ____ ___ ____
# | _ \| _ \| ___| _ _ / \ | _ \_ _| |___ \
# | |_) | | | | |_ (_) (_) / _ \ | |_) | | __) |
# | __/| |_| | _| _ _ / ___ \| __/| | / __/
# |_| |____/|_| (_) (_) /_/ \_\_| |___| |_____|
#
# A Perl Module Chain to faciliate the Creation and Modification
# of High-Quality "Portable Document Format (PDF)" Files.
#
# Copyright 1999-2005 Alfred Reibenschuh <areibens@cpan.org>.
#
#=======================================================================
#
# THIS LIBRARY IS FREE SOFTWARE; YOU CAN REDISTRIBUTE IT AND/OR
# MODIFY IT UNDER THE TERMS OF THE GNU LESSER GENERAL PUBLIC
# LICENSE AS PUBLISHED BY THE FREE SOFTWARE FOUNDATION; EITHER
# VERSION 2 OF THE LICENSE, OR (AT YOUR OPTION) ANY LATER VERSION.
#
# THIS FILE IS DISTRIBUTED IN THE HOPE THAT IT WILL BE USEFUL,
# AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
# SHALL THE AUTHORS AND COPYRIGHT HOLDERS AND THEIR CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS FILE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# SEE THE GNU LESSER GENERAL PUBLIC LICENSE FOR MORE DETAILS.
#
# YOU SHOULD HAVE RECEIVED A COPY OF THE GNU LESSER GENERAL PUBLIC
# LICENSE ALONG WITH THIS LIBRARY; IF NOT, WRITE TO THE
# FREE SOFTWARE FOUNDATION, INC., 59 TEMPLE PLACE - SUITE 330,
# BOSTON, MA 02111-1307, USA.
#
# $Id: neTrueType.pm,v 1.2 2008/01/04 08:10:42 areibens Exp $
#
#=======================================================================
package PDF::API2::Resource::Font::neTrueType;
=head1 NAME
PDF::API2::Resource::Font::neTrueType - Module for using 8bit nonembedded truetype Fonts.
=head1 SYNOPSIS
#
use PDF::API2;
#
$pdf = PDF::API2->new;
$cft = $pdf->nettfont('Times-Roman.ttf', -encode => 'latin1');
#
=head1 METHODS
=over 4
=cut
BEGIN {
use utf8;
use Encode qw(:all);
use File::Basename;
use vars qw( @ISA $fonts $alias $subs $encodings $VERSION );
use PDF::API2::Resource::Font;
use PDF::API2::Util;
use PDF::API2::Basic::PDF::Utils;
@ISA=qw(PDF::API2::Resource::Font);
( $VERSION ) = sprintf '%i.%03i', split(/\./,('$Revision: 1.2 $' =~ /Revision: (\S+)\s/)[0]); # $Date: 2008/01/04 08:10:42 $
}
no warnings qw[ deprecated recursion uninitialized ];
=item $font = PDF::API2::Resource::Font::neTrueType->new $pdf, $fontfile, %options
Returns a corefont object.
=cut
=pod
Valid %options are:
I<-encode>
... changes the encoding of the font from its default.
See I<perl's Encode> for the supported values.
I<-pdfname> ... changes the reference-name of the font from its default.
The reference-name is normally generated automatically and can be
retrived via $pdfname=$font->name.
=cut
sub unpack_fixed
{
my ($dat) = @_;
my ($res, $frac) = unpack("nn", $dat);
$res -= 65536 if $res > 32767;
$res += $frac / 65536.;
return($res);
}
sub unpack_f2dot14
{
my ($dat) = @_;
my $res = unpack("n", $dat);
my $frac = $res & 0x3fff;
$res >>= 14;
$res -= 4 if $res > 1;
$res += $frac / 16384.;
return($res);
}
sub unpack_long
{
my ($dat) = @_;
my $res = unpack("N", $dat);
$res -= (1 << 32) if ($res >= 1 << 31);
return($res);
}
sub unpack_ulong
{
my ($dat) = @_;
my $res = unpack("N", $dat);
return($res);
}
sub unpack_short
{
my ($dat) = @_;
my $res = unpack("n", $dat);
$res -= 65536 if ($res >= 32768);
return($res);
}
sub unpack_ushort
{
my ($dat) = @_;
my $res = unpack("n", $dat);
return($res);
}
sub read_name_table
{
my ($data, $fh, $num, $stroff, $buf) = @_;
# read name table
seek($fh,$data->{name}->{OFF},0);
read($fh,$buf, 6);
($num, $stroff) = unpack("x2nn", $buf);
$data->{name}->{ARR}=[];
for (my $i = 0; $i < $num; $i++)
{
read($fh,$buf, 12);
my ($pid, $eid, $lid, $nid, $len, $off) = unpack("n6", $buf);
push @{$data->{name}->{ARR}},[$pid, $eid, $lid, $nid, $len, $off];
}
foreach my $arr ( @{$data->{name}->{ARR}} ) {
my ($pid, $eid, $lid, $nid, $len, $off) = @{$arr};
seek($fh,$data->{name}->{OFF} + $stroff + $off, 0);
read($fh, $buf, $len);
if ($pid == 0 || $pid == 3 || ($pid == 2 && $eid == 1))
{ $buf = pack('C*',map { $_>255 ? 20 : $_ } unpack('n*',$buf)); }
$data->{name}->{strings}[$nid][$pid][$eid]{$lid} = $buf;
}
}
sub read_os2_table
{
my ($data, $fh, $buf) = @_;
# read OS/2 table
seek($fh,$data->{'OS/2'}->{OFF},0);
read($fh,$buf, 2);
my $os2ver=unpack_ushort($buf);
seek($fh,$data->{'OS/2'}->{OFF}+4,0);
read($fh,$buf, 4);
($data->{V}->{usWeightClass},$data->{V}->{usWidthClass})=unpack('nn',$buf);
seek($fh,$data->{'OS/2'}->{OFF}+30,0);
read($fh,$buf, 12);
$data->{V}->{panoseHex}=unpack('H*',$buf);
$data->{V}->{panose}=$buf;
($data->{V}->{sFamilyClass}, $data->{V}->{bFamilyType}, $data->{V}->{bSerifStyle}, $data->{V}->{bWeight},
$data->{V}->{bProportion}, $data->{V}->{bContrast}, $data->{V}->{bStrokeVariation}, $data->{V}->{bArmStyle},
$data->{V}->{bLetterform}, $data->{V}->{bMidline}, $data->{V}->{bXheight}) = unpack('nC*',$buf);
$data->{V}->{flags} = 0;
$data->{V}->{flags} |= 1 if ($data->{V}->{'bProportion'} == 9);
$data->{V}->{flags} |= 2 unless ($data->{V}->{'bSerifStyle'} > 10 && $data->{V}->{'bSerifStyle'} < 14);
$data->{V}->{flags} |= 8 if ($data->{V}->{'bFamilyType'} == 2);
$data->{V}->{flags} |= 32; # if ($data->{V}->{'bFamilyType'} > 3);
$data->{V}->{flags} |= 64 if ($data->{V}->{'bLetterform'} > 8);
seek($fh,$data->{'OS/2'}->{OFF}+42,0);
read($fh,$buf, 16);
$data->{V}->{ulUnicodeRange}=[ unpack('NNNN',$buf) ];
my @ulCodePageRange=();
if($os2ver>0) {
seek($fh,$data->{'OS/2'}->{OFF}+78,0);
read($fh,$buf, 8);
$data->{V}->{ulCodePageRange}=[ unpack('NN',$buf) ];
read($fh,$buf, 4);
($data->{V}->{xHeight},$data->{V}->{CapHeight})=unpack('nn',$buf);
}
}
sub read_head_table
{
my ($data, $fh, $buf) = @_;
seek($fh,$data->{'head'}->{OFF}+18,0);
read($fh,$buf, 2);
$data->{V}->{upem}=unpack_ushort($buf);
$data->{V}->{upemf}=1000/$data->{V}->{upem};
seek($fh,$data->{'head'}->{OFF}+36,0);
read($fh,$buf, 2);
$data->{V}->{xMin}=unpack_short($buf);
read($fh,$buf, 2);
$data->{V}->{yMin}=unpack_short($buf);
read($fh,$buf, 2);
$data->{V}->{xMax}=unpack_short($buf);
read($fh,$buf, 2);
$data->{V}->{yMax}=unpack_short($buf);
$data->{V}->{fontbbox}=[
int($data->{V}->{'xMin'} * $data->{V}->{upemf}),
int($data->{V}->{'yMin'} * $data->{V}->{upemf}),
int($data->{V}->{'xMax'} * $data->{V}->{upemf}),
int($data->{V}->{'yMax'} * $data->{V}->{upemf})
];
seek($fh,$data->{'head'}->{OFF}+50,0);
read($fh,$data->{'head'}->{indexToLocFormat}, 2);
$data->{'head'}->{indexToLocFormat}=unpack_ushort($data->{'head'}->{indexToLocFormat});
}
sub read_maxp_table
{
my ($data, $fh, $buf) = @_;
seek($fh,$data->{'maxp'}->{OFF}+4,0);
read($fh,$buf, 2);
$data->{V}->{numGlyphs}=unpack_ushort($buf);
$data->{maxp}->{numGlyphs}=$data->{V}->{numGlyphs};
}
sub read_hhea_table
{
my ($data, $fh, $buf) = @_;
seek($fh,$data->{'hhea'}->{OFF}+4,0);
read($fh,$buf, 2);
$data->{V}->{ascender}=unpack_short($buf);
read($fh,$buf, 2);
$data->{V}->{descender}=unpack_short($buf);
read($fh,$buf, 2);
$data->{V}->{linegap}=unpack_short($buf);
read($fh,$buf, 2);
$data->{V}->{advancewidthmax}=unpack_short($buf);
seek($fh,$data->{'hhea'}->{OFF}+34,0);
read($fh,$buf, 2);
$data->{V}->{numberOfHMetrics}=unpack_ushort($buf);
}
sub read_hmtx_table
{
my ($data, $fh, $buf) = @_;
seek($fh,$data->{'hmtx'}->{OFF},0);
$data->{hmtx}->{wx}=[];
foreach (1..$data->{V}->{numberOfHMetrics})
{
read($fh,$buf, 2);
my $wx=int(unpack_ushort($buf)*1000/$data->{V}->{upem});
push @{$data->{hmtx}->{wx}},$wx;
read($fh,$buf, 2);
}
$data->{V}->{missingwidth}=$data->{hmtx}->{wx}->[-1];
}
sub read_cmap_table
{
my ($data, $fh, $buf) = @_;
my $cmap=$data->{cmap};
seek($fh,$cmap->{OFF},0);
read($fh,$buf,4);
$cmap->{Num} = unpack("x2n", $buf);
$cmap->{Tables} = [];
foreach my $i (0..$cmap->{Num})
{
my $s = {};
read($fh,$buf,8);
($s->{Platform}, $s->{Encoding}, $s->{LOC}) = (unpack("nnN", $buf));
$s->{LOC} += $cmap->{OFF};
push(@{$cmap->{Tables}}, $s);
}
foreach my $i (0..$cmap->{Num})
{
my $s = $cmap->{Tables}[$i];
seek($fh,$s->{LOC}, 0);
read($fh,$buf, 2);
$s->{Format} = unpack("n", $buf);
if ($s->{Format} == 0)
{
my $len;
$fh->read($buf, 4);
($len, $s->{Ver}) = unpack('n2', $buf);
$s->{val}={};
foreach my $j (0..255)
{
read($fh,$buf, 1);
$s->{val}->{$j}=unpack('C',$buf);
}
}
elsif ($s->{Format} == 2)
{
# cjk euc ?
}
elsif ($s->{Format} == 4)
{
my ($len,$count);
$fh->read($buf, 12);
($len, $s->{Ver},$count) = unpack('n3', $buf);
$count >>= 1;
$s->{val}={};
read($fh, $buf, $len - 14);
foreach my $j (0..$count-1)
{
my $end = unpack("n", substr($buf, $j << 1, 2));
my $start = unpack("n", substr($buf, ($j << 1) + ($count << 1) + 2, 2));
my $delta = unpack("n", substr($buf, ($j << 1) + ($count << 2) + 2, 2));
$delta -= 65536 if $delta > 32767;
my $range = unpack("n", substr($buf, ($j << 1) + $count * 6 + 2, 2));
foreach my $k ($start..$end)
{
my $id=undef;
if ($range == 0 || $range == 65535) # support the buggy FOG with its range=65535 for final segment
{
$id = $k + $delta;
}
else
{
$id = unpack("n",
substr($buf, ($j << 1) + $count * 6 +
2 + ($k - $start) * 2 + $range, 2)) + $delta;
}
$id -= 65536 if($id >= 65536);
$s->{val}->{$k} = $id if($id);
}
}
}
elsif ($s->{Format} == 6)
{
my ($len,$start,$count);
$fh->read($buf, 8);
($len, $s->{Ver},$start,$count) = unpack('n4', $buf);
$s->{val}={};
foreach my $j (0..$count-1)
{
read($fh,$buf, 2);
$s->{val}->{$start+$j}=unpack('n',$buf);
}
}
elsif ($s->{Format} == 10)
{
my ($len,$start,$count);
$fh->read($buf, 18);
($len, $s->{Ver},$start,$count) = unpack('x2N4', $buf);
$s->{val}={};
foreach my $j (0..$count-1)
{
read($fh,$buf, 2);
$s->{val}->{$start+$j}=unpack('n',$buf);
}
}
elsif ($s->{Format} == 8 || $s->{Format} == 12)
{
my ($len,$count);
$fh->read($buf, 10);
($len, $s->{Ver}) = unpack('x2N2', $buf);
$s->{val}={};
if($s->{Format} == 8)
{
read($fh, $buf, 8192);
read($fh, $buf, 4);
}
else
{
read($fh, $buf, 4);
}
$count = unpack('N', $buf);
foreach my $j (0..$count-1)
{
read($fh,$buf, 12);
my ($start,$end,$cid)=unpack('N3',$buf);
foreach my $k ($start..$end)
{
$s->{val}->{$k}=$cid+$k-$start;
}
}
}
}
my $alt;
foreach my $s (@{$cmap->{Tables}})
{
if($s->{Platform} == 3)
{
$cmap->{mstable} = $s;
last if(($s->{Encoding} == 1) || ($s->{Encoding} == 0));
}
elsif($s->{Platform} == 0 || ($s->{Platform} == 2 && $s->{Encoding} == 1))
{
$alt = $s;
}
}
$cmap->{mstable}||=$alt if($alt);
$data->{V}->{uni}=[];
foreach my $i (keys %{$cmap->{mstable}->{val}})
{
$data->{V}->{uni}->[$cmap->{mstable}->{val}->{$i}]=$i;
}
foreach my $i (0..$data->{V}->{numGlyphs})
{
$data->{V}->{uni}->[$i]||=0;
}
}
sub read_post_table
{
my ($data, $fh, $buf) = @_;
my $post=$data->{post};
seek($fh,$post->{OFF},0);
my @base_set=qw[
.notdef .null nonmarkingreturn space exclam quotedbl numbersign dollar
percent ampersand quotesingle parenleft parenright asterisk plus comma
hyphen period slash zero one two three four five six seven eight nine
colon semicolon less equal greater question at A B C D E F G H I J K L
M N O P Q R S T U V W X Y Z bracketleft backslash bracketright
asciicircum underscore grave a b c d e f g h i j k l m n o p q r s t u
v w x y z braceleft bar braceright asciitilde Adieresis Aring Ccedilla
Eacute Ntilde Odieresis Udieresis aacute agrave acircumflex adieresis
atilde aring ccedilla eacute egrave ecircumflex edieresis iacute
igrave icircumflex idieresis ntilde oacute ograve ocircumflex
odieresis otilde uacute ugrave ucircumflex udieresis dagger degree
cent sterling section bullet paragraph germandbls registered copyright
trademark acute dieresis notequal AE Oslash infinity plusminus
lessequal greaterequal yen mu partialdiff summation product pi
integral ordfeminine ordmasculine Omega ae oslash questiondown
exclamdown logicalnot radical florin approxequal Delta guillemotleft
guillemotright ellipsis nonbreakingspace Agrave Atilde Otilde OE oe
endash emdash quotedblleft quotedblright quoteleft quoteright divide
lozenge ydieresis Ydieresis fraction currency guilsinglleft
guilsinglright fi fl daggerdbl periodcentered quotesinglbase
quotedblbase perthousand Acircumflex Ecircumflex Aacute Edieresis
Egrave Iacute Icircumflex Idieresis Igrave Oacute Ocircumflex apple
Ograve Uacute Ucircumflex Ugrave dotlessi circumflex tilde macron breve
dotaccent ring cedilla hungarumlaut ogonek caron Lslash lslash Scaron
scaron Zcaron zcaron brokenbar Eth eth Yacute yacute Thorn thorn minus
multiply onesuperior twosuperior threesuperior onehalf onequarter
threequarters franc Gbreve gbreve Idotaccent Scedilla scedilla Cacute
cacute Ccaron ccaron dcroat
];
read($fh,$buf, 4);
$post->{Format}=unpack('N',$buf);
read($fh,$buf,4);
$data->{V}->{italicangle}=unpack_fixed($buf);
read($fh,$buf,2);
$data->{V}->{underlineposition}=unpack_f2dot14($buf)*1000;
read($fh,$buf,2);
$data->{V}->{underlinethickness}=unpack_f2dot14($buf)*1000;
read($fh,$buf,4);
$data->{V}->{isfixedpitch}=unpack_ulong($buf);
read($fh,$buf,16);
if($post->{Format} == 0x00010000)
{
$post->{Format}='10';
$post->{val}=[ @base_set ];
$post->{strings}={};
foreach my $i (0..257)
{
$post->{strings}->{$post->{val}->[$i]}=$i;
}
}
elsif($post->{Format} == 0x00020000)
{
$post->{Format}='20';
$post->{val}=[];
$post->{strings}={};
read($fh,$buf,2);
$post->{numGlyphs}=unpack_ushort($buf);
foreach my $i (0..$post->{numGlyphs}-1)
{
read($fh,$buf,2);
$post->{val}->[$i]=unpack_ushort($buf);
}
while(tell($fh) < $post->{OFF}+$post->{LEN})
{
read($fh,$buf,1);
my $strlen=unpack('C',$buf);
read($fh,$buf,$strlen);
push(@base_set,$buf);
}
foreach my $i (0..$post->{numGlyphs}-1)
{
$post->{val}->[$i]=$base_set[$post->{val}->[$i]];
$post->{strings}->{$post->{val}->[$i]}||=$i;
}
}
elsif($post->{Format} == 0x00025000)
{
$post->{Format}='25';
$post->{val}=[];
$post->{strings}={};
read($fh,$buf,2);
my $num=unpack_ushort($buf);
foreach my $i (0..$num)
{
read($fh,$buf,1);
$post->{val}->[$i]=$base_set[$i+unpack('c',$buf)];
$post->{strings}->{$post->{val}->[$i]}||=$i;
}
}
elsif($post->{Format} == 0x00030000)
{
$post->{Format}='30';
$post->{val}=[];
$post->{strings}={};
}
$data->{V}->{name}=[];
foreach my $i (0..$data->{V}->{numGlyphs})
{
$data->{V}->{name}->[$i] = $post->{val}->[$i]
|| nameByUni($data->{V}->{uni}->[$i])
|| '.notdef';
}
$data->{V}->{n2i}={};
foreach my $i (0..$data->{V}->{numGlyphs})
{
$data->{V}->{n2i}->{$data->{V}->{name}->[$i]}||=$i;
}
}
sub read_loca_table
{
my ($data, $fh, $buf) = @_;
seek($fh,$data->{'loca'}->{OFF},0);
my $ilen=$data->{'head'}->{indexToLocFormat} ? 4 : 2;
my $ipak=$data->{'head'}->{indexToLocFormat} ? 'N' : 'n';
my $isif=$data->{'head'}->{indexToLocFormat} ? 0 : 1;
$data->{'loca'}->{gOFF}=[];
for(my $i=0; $i<$data->{'maxp'}->{numGlyphs}+1; $i++)
{
read($fh, $buf, $ilen);
$buf=unpack($ipak,$buf);
$buf<<=$isif;
push @{$data->{'loca'}->{gOFF}},$buf;
}
}
sub read_glyf_table
{
my ($data, $fh, $buf) = @_;
$data->{'glyf'}->{glyphs}=[];
for(my $i=0; $i<$data->{'maxp'}->{numGlyphs}; $i++)
{
my $G={};
$data->{'glyf'}->{glyphs}->[$i]=$G;
next if($data->{'loca'}->{gOFF}->[$i]-$data->{'loca'}->{gOFF}->[$i+1] == 0);
seek($fh,$data->{'loca'}->{gOFF}->[$i]+$data->{'glyf'}->{OFF},0);
read($fh, $buf, 2);
$G->{numOfContours}=unpack_short($buf);
read($fh, $buf, 2);
$G->{xMin}=unpack_short($buf);
read($fh, $buf, 2);
$G->{yMin}=unpack_short($buf);
read($fh, $buf, 2);
$G->{xMax}=unpack_short($buf);
read($fh, $buf, 2);
$G->{yMax}=unpack_short($buf);
}
}
sub find_name
{
my ($self, $nid) = @_;
my ($res, $pid, $eid, $lid, $look, $k);
my (@lookup) = ([3, 1, 1033], [3, 1, -1], [2, 1, -1], [2, 2, -1], [2, 0, -1],
[0, 0, 0], [1, 0, 0]);
foreach $look (@lookup)
{
($pid, $eid, $lid) = @$look;
if ($lid == -1)
{
foreach $k (keys %{$self->{'strings'}->[$nid]->[$pid]->[$eid]})
{
if (($res = $self->{strings}->[$nid]->[$pid]->[$eid]->{$k}) ne '')
{
$lid = $k;
last;
}
}
} else
{ $res = $self->{strings}->[$nid]->[$pid]->[$eid]->{$lid} }
if ($res ne '')
{ return wantarray ? ($res, $pid, $eid, $lid) : $res; }
}
return '';
}
sub readcffindex
{
my ($fh,$off,$buf)=@_;
my @idx=();
my $index=[];
seek($fh,$off,0);
read($fh,$buf,3);
my ($count,$offsize)=unpack('nC',$buf);
foreach (0..$count)
{
read($fh,$buf,$offsize);
$buf=substr("\x00\x00\x00$buf",-4,4);
my $id=unpack('N',$buf);
push @idx,$id;
}
my $dataoff=tell($fh)-1;
foreach my $i (0..$count-1)
{
push @{$index},{ 'OFF' => $dataoff+$idx[$i], 'LEN' => $idx[$i+1]-$idx[$i] };
}
return($index);
}
sub readcffdict
{
my ($fh,$off,$len,$foff,$buf)=@_;
my @idx=();
my $dict={};
seek($fh,$off,0);
my @st=();
while(tell($fh)<($off+$len))
{
read($fh,$buf,1);
my $b0=unpack('C',$buf);
my $v='';
if($b0==12) # two byte commands
{
read($fh,$buf,1);
my $b1=unpack('C',$buf);
if($b1==0)
{
$dict->{Copyright}={ 'SID' => splice(@st,-1) };
}
elsif($b1==1)
{
$dict->{isFixedPitch}=splice(@st,-1);
}
elsif($b1==2)
{
$dict->{ItalicAngle}=splice(@st,-1);
}
elsif($b1==3)
{
$dict->{UnderlinePosition}=splice(@st,-1);
}
elsif($b1==4)
{
$dict->{UnderlineThickness}=splice(@st,-1);
}
elsif($b1==5)
{
$dict->{PaintType}=splice(@st,-1);
}
elsif($b1==6)
{
$dict->{CharstringType}=splice(@st,-1);
}
elsif($b1==7)
{
$dict->{FontMatrix}=[ splice(@st,-4) ];
}
elsif($b1==8)
{
$dict->{StrokeWidth}=splice(@st,-1);
}
elsif($b1==20)
{
$dict->{SyntheticBase}=splice(@st,-1);
}
elsif($b1==21)
{
$dict->{PostScript}={ 'SID' => splice(@st,-1) };
}
elsif($b1==22)
{
$dict->{BaseFontName}={ 'SID' => splice(@st,-1) };
}
elsif($b1==23)
{
$dict->{BaseFontBlend}=[ splice(@st,0) ];
}
elsif($b1==24)
{
$dict->{MultipleMaster}=[ splice(@st,0) ];
}
elsif($b1==25)
{
$dict->{BlendAxisTypes}=[ splice(@st,0) ];
}
elsif($b1==30)
{
$dict->{ROS}=[ splice(@st,-3) ];
}
elsif($b1==31)
{
$dict->{CIDFontVersion}=splice(@st,-1);
}
elsif($b1==32)
{
$dict->{CIDFontRevision}=splice(@st,-1);
}
elsif($b1==33)
{
$dict->{CIDFontType}=splice(@st,-1);
}
elsif($b1==34)
{
$dict->{CIDCount}=splice(@st,-1);
}
elsif($b1==35)
{
$dict->{UIDBase}=splice(@st,-1);
}
elsif($b1==36)
{
$dict->{FDArray}={ 'OFF' => $foff+splice(@st,-1) };
}
elsif($b1==37)
{
$dict->{FDSelect}={ 'OFF' => $foff+splice(@st,-1) };
}
elsif($b1==38)
{
$dict->{FontName}={ 'SID' => splice(@st,-1) };
}
elsif($b1==39)
{
$dict->{Chameleon}=splice(@st,-1);
}
next;
}
elsif($b0<28) # commands
{
if($b0==0)
{
$dict->{Version}={ 'SID' => splice(@st,-1) };
}
elsif($b0==1)
{
$dict->{Notice}={ 'SID' => splice(@st,-1) };
}
elsif($b0==2)
{
$dict->{FullName}={ 'SID' => splice(@st,-1) };
}
elsif($b0==3)
{
$dict->{FamilyName}={ 'SID' => splice(@st,-1) };
}
elsif($b0==4)
{
$dict->{Weight}={ 'SID' => splice(@st,-1) };
}
elsif($b0==5)
{
$dict->{FontBBX}=[ splice(@st,-4) ];
}
elsif($b0==13)
{
$dict->{UniqueID}=splice(@st,-1);
}
elsif($b0==14)
{
$dict->{XUID}=[splice(@st,0)];
}
elsif($b0==15)
{
$dict->{CharSet}={ 'OFF' => $foff+splice(@st,-1) };
}
elsif($b0==16)
{
$dict->{Encoding}={ 'OFF' => $foff+splice(@st,-1) };
}
elsif($b0==17)
{
$dict->{CharStrings}={ 'OFF' => $foff+splice(@st,-1) };
}
elsif($b0==18)
{
$dict->{Private}={ 'LEN' => splice(@st,-1), 'OFF' => $foff+splice(@st,-1) };
}
next;
}
elsif($b0==28) # int16
{
read($fh,$buf,2);
$v=unpack('n',$buf);
$v=-(0x10000-$v) if($v>0x7fff);
}
elsif($b0==29) # int32
{
read($fh,$buf,4);
$v=unpack('N',$buf);
$v=-$v+0xffffffff+1 if($v>0x7fffffff);
}
elsif($b0==30) # float
{
$e=1;
while($e)
{
read($fh,$buf,1);
$v0=unpack('C',$buf);
foreach my $m ($v0>>8,$v0&0xf)
{
if($m<10)
{
$v.=$m;
}
elsif($m==10)
{
$v.='.';
}
elsif($m==11)
{
$v.='E+';
}
elsif($m==12)
{
$v.='E-';
}
elsif($m==14)
{
$v.='-';
}
elsif($m==15)
{
$e=0;
last;
}
}
}
}
elsif($b0==31) # command
{
$v="c=$b0";
next;
}
elsif($b0<247) # 1 byte signed
{
$v=$b0-139;
}
elsif($b0<251) # 2 byte plus
{
read($fh,$buf,1);
$v=unpack('C',$buf);
$v=($b0-247)*256+($v+108);
}
elsif($b0<255) # 2 byte minus
{
read($fh,$buf,1);
$v=unpack('C',$buf);
$v=-($b0-251)*256-$v-108;
}
push @st,$v;
}
return($dict);
}
sub get_otf_data {
my $file=shift @_;
my $filename=basename($file);
my $fh=IO::File->new($file);
my $data={};
binmode($fh,':raw');
my($buf,$ver,$num,$i);
read($fh,$buf, 12);
($ver, $num) = unpack("Nn", $buf);
$ver == 1 << 16 # TTF version 1
|| $ver == 0x74727565 # support Mac sfnts
|| $ver == 0x4F54544F # OpenType with diverse Outlines
or next; #die "$file not a valid true/opentype font";
for ($i = 0; $i < $num; $i++)
{
read($fh,$buf, 16) || last; #die "Reading table entry";
my ($name, $check, $off, $len) = unpack("a4NNN", $buf);
$data->{$name} = {
OFF => $off,
LEN => $len,
};
}
next unless(defined $data->{name} && defined $data->{'OS/2'});
$data->{V}={};
read_name_table($data,$fh);
read_os2_table($data,$fh);
read_maxp_table($data,$fh);
read_head_table($data,$fh);
read_hhea_table($data,$fh);
read_hmtx_table($data,$fh);
read_cmap_table($data,$fh);
read_post_table($data,$fh);
if(0)
{
read_loca_table($data,$fh);
read_glyf_table($data,$fh);
}
$data->{V}->{fontfamily}=find_name($data->{name},1);
$data->{V}->{fontname}=find_name($data->{name},4);
$data->{V}->{stylename}=find_name($data->{name},2);
my $name = lc find_name($data->{name},1);
my $subname = lc find_name($data->{name},2);
my $slant='';
if (defined $subname) {
$weight_name = "$subname";
} else {
$weight_name = "Regular";
}
$weight_name =~ s/-/ /g;
$_ = $weight_name;
if (/^(regular|normal|medium)$/i) {
$weight_name = "Regular";
$slant = "";
$subname='';
} elsif (/^bold$/i) {
$weight_name = "Bold";
$slant = "";
$subname='';
} elsif (/^bold *(italic|oblique)$/i) {
$weight_name = "Bold";
$slant = "-Italic";
$subname='';
} elsif (/^(italic|oblique)$/i) {
$weight_name = "Regular";
$slant = "-Italic";
$subname='';
} else {
# we need to find it via the OS/2 table
if($data->{V}->{usWeightClass} == 0) {
$weight_name = "Regular";
} elsif($data->{V}->{usWeightClass} < 150) {
$weight_name = "Thin";
} elsif($data->{V}->{usWeightClass} < 250) {
$weight_name = "ExtraLight";
} elsif($data->{V}->{usWeightClass} < 350) {
$weight_name = "Light";
} elsif($data->{V}->{usWeightClass} < 450) {
$weight_name = "Regular";
} elsif($data->{V}->{usWeightClass} < 550) {
$weight_name = "Regular";
} elsif($data->{V}->{usWeightClass} < 650) {
$weight_name = "SemiBold";
} elsif($data->{V}->{usWeightClass} < 750) {
$weight_name = "Bold";
} elsif($data->{V}->{usWeightClass} < 850) {
$weight_name = "ExtraBold";
} else {
$weight_name = "Black";
}
# $slant = "";
# $subname='';
}
$data->{V}->{fontweight}=$data->{V}->{usWeightClass};
if($data->{V}->{usWidthClass} == 1) {
$setwidth_name = "-UltraCondensed";
$data->{V}->{fontstretch}="UltraCondensed";
} elsif($data->{V}->{usWidthClass} == 2) {
$setwidth_name = "-ExtraCondensed";
$data->{V}->{fontstretch}="ExtraCondensed";
} elsif($data->{V}->{usWidthClass} == 3) {
$setwidth_name = "-Condensed";
$data->{V}->{fontstretch}="Condensed";
} elsif($data->{V}->{usWidthClass} == 4) {
$setwidth_name = "-SemiCondensed";
$data->{V}->{fontstretch}="SemiCondensed";
} elsif($data->{V}->{usWidthClass} == 5) {
$setwidth_name = "";
$data->{V}->{fontstretch}="Normal";
} elsif($data->{V}->{usWidthClass} == 6) {
$setwidth_name = "-SemiExpanded";
$data->{V}->{fontstretch}="SemiExpanded";
} elsif($data->{V}->{usWidthClass} == 7) {
$setwidth_name = "-Expanded";
$data->{V}->{fontstretch}="Expanded";
} elsif($data->{V}->{usWidthClass} == 8) {
$setwidth_name = "-ExtraExpanded";
$data->{V}->{fontstretch}="ExtraExpanded";
} elsif($data->{V}->{usWidthClass} == 9) {
$setwidth_name = "-UltraExpanded";
$data->{V}->{fontstretch}="UltraExpanded";
} else {
$setwidth_name = ""; # normal | condensed | narrow | semicondensed
$data->{V}->{fontstretch}="Normal";
}
$data->{V}->{fontname}=$name;
$data->{V}->{subname}="$weight_name$slant$setwidth_name";
$data->{V}->{subname}=~s|\-| |g;
if(defined $data->{'CFF '})
{
# read CFF table
seek($fh,$data->{'CFF '}->{OFF},0);
read($fh,$buf, 4);
my ($cffmajor,$cffminor,$cffheadsize,$cffglobaloffsize)=unpack('C4',$buf);
$data->{'CFF '}->{name}=readcffindex($fh,$data->{'CFF '}->{OFF}+$cffheadsize);
foreach my $dict (@{$data->{'CFF '}->{name}})
{
seek($fh,$dict->{OFF},0);
read($fh,$dict->{VAL},$dict->{LEN});
}
$data->{'CFF '}->{topdict}=readcffindex($fh,$data->{'CFF '}->{name}->[-1]->{OFF}+$data->{'CFF '}->{name}->[-1]->{LEN});
foreach my $dict (@{$data->{'CFF '}->{topdict}})
{
$dict->{VAL}=readcffdict($fh,$dict->{OFF},$dict->{LEN},$data->{'CFF '}->{OFF});
}
$data->{'CFF '}->{string}=readcffindex($fh,$data->{'CFF '}->{topdict}->[-1]->{OFF}+$data->{'CFF '}->{topdict}->[-1]->{LEN});
foreach my $dict (@{$data->{'CFF '}->{string}})
{
seek($fh,$dict->{OFF},0);
read($fh,$dict->{VAL},$dict->{LEN});
}
push @{$data->{'CFF '}->{string}},{ 'VAL' => '001.000' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => '001.001' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => '001.002' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => '001.003' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Black' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Bold' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Book' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Light' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Medium' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Regular' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Roman' };
push @{$data->{'CFF '}->{string}},{ 'VAL' => 'Semibold' };
foreach my $dict (@{$data->{'CFF '}->{topdict}})
{
foreach my $k (keys %{$dict->{VAL}})
{
my $dt=$dict->{VAL}->{$k};
if($k eq 'ROS')
{
$dict->{VAL}->{$k}->[0]=$data->{'CFF '}->{string}->[$dict->{VAL}->{$k}->[0]-391]->{VAL};
$dict->{VAL}->{$k}->[1]=$data->{'CFF '}->{string}->[$dict->{VAL}->{$k}->[1]-391]->{VAL};
$data->{V}->{$k}=$dict->{VAL}->{$k};
next;
}
next unless(ref($dt) eq 'HASH' && defined $dt->{SID});
if($dt->{SID}>=379)
{
$dict->{VAL}->{$k}=$data->{'CFF '}->{string}->[$dt->{SID}-391]->{VAL};
}
}
}
}
close($fh);
nameByUni();
my $g = scalar @{$data->{V}->{uni}};
$data->{V}->{wx}={};
for(my $i = 0; $i<$g ; $i++)
{
if(defined $data->{hmtx}->{wx}->[$i])
{
$data->{V}->{wx}->{nameByUni($data->{V}->{uni}->[$i])} = $data->{hmtx}->{wx}->[$i];
}
else
{
$data->{V}->{wx}->{nameByUni($data->{V}->{uni}->[$i])} = $data->{hmtx}->{wx}->[-1];
}
}
$data->{V}->{glyphs}=$data->{glyf}->{glyphs};
$data=$data->{V};
$data->{firstchar}=0;
$data->{lastchar}=255;
$data->{flags} |= 1 if($data->{isfixedpitch} > 0);
$data->{flags} |= 64 if($data->{italicangle} != 0);
$data->{flags} |= (1<<18) if($data->{usWeightClass} >= 600);
return($data);
}
sub new
{
my ($class,$pdf,$name,%opts) = @_;
my ($self,$data);
$data=get_otf_data($name);
$class = ref $class if ref $class;
$self = $class->SUPER::new($pdf, $data->{apiname}.pdfkey().'~'.time());
$pdf->new_obj($self) unless($self->is_obj($pdf));
$self->{' data'}=$data;
$self->{-dokern}=1 if($opts{-dokern});
$self->{'Subtype'} = PDFName('TrueType');
if($opts{-fontname})
{
$self->{'BaseFont'} = PDFName($opts{-fontname});
}
else
{
my $fn=$data->{fontfamily};
$fn=~s|\s+||go;
if(($data->{stylename}=~m<(italic|oblique)>i) && ($data->{usWeightClass}>600))
{
$fn.=',BoldItalic';
}
elsif($data->{stylename}=~m<(italic|oblique)>i)
{
$fn.=',Italic';
}
elsif($data->{usWeightClass}>600)
{
$fn.=',Bold';
}
$self->{'BaseFont'} = PDFName($fn);
}
if($opts{-pdfname})
{
$self->name($opts{-pdfname});
}
$self->{FontDescriptor}=$self->descrByData();
$self->encodeByData($opts{-encode});
return($self);
}
=item $font = PDF::API2::Resource::Font::neTrueType->new_api $api, $fontname, %options
Returns a ne-truetype 8bit only object. This method is different from 'new' that
it needs an PDF::API2-object rather than a PDF::API2::PDF::File-object.
=cut
sub new_api
{
my ($class,$api,@opts)=@_;
my $obj=$class->new($api->{pdf},@opts);
$api->{pdf}->new_obj($obj) unless($obj->is_obj($api->{pdf}));
$api->{pdf}->out_obj($api->{pages});
return($obj);
}
1;
__END__
=back
=head1 AUTHOR
alfred reibenschuh
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/PDF/API2/Resource/Font/neTrueType.pm | Perl | mit | 38,052 |
#!/usr/bin/perl
use strict;
use lib '..', '../web';
use Data::Dumper;
use MemberRecordType;
use Singleton;
use ServerDefs;
use Log;
my @realm_list = (35,);
my $record_limit = 1000000;
my ($HOST, $USER, $PWD, $dbi, $mysql, $DB)=('192.168.200.240', $ServerDefs::DB_USER, $ServerDefs::DB_PASSWD, split(':', $ServerDefs::DB_DSN));
#my ($HOST, $USER, $PWD, $DB)=("192.168.200.240", "root", "qweasd", "prod_regoSWM_20131027");
#my ($HOST, $USER, $PWD, $DB)=("localhost", "root", "", "prod_regoSWM_20140223");
#my $dbh = DBI->connect("DBI:mysql:$DB:$HOST", $USER, $PWD) or die "Connect database failed";
my $dbh = get_dbh();
# global cache for record type
my $record_type_cache = {};
sub print_sql {
print STDOUT @_, "\n";
}
sub get_record_type_id {
my ($realm_id, $realm_sub_id, $name) = @_;
my $key = sprintf("%s-%s-%s", $realm_id, $realm_sub_id, lc $name);
if (not exists $record_type_cache->{$key}) {
my $s;
if (defined($realm_id) and defined($realm_sub_id)) {
my $sql = qq{
SELECT intMemberRecordTypeID FROM tblMemberRecordType
WHERE strName = ? AND intRealmID = ? AND intSubRealmID = ?
};
$s = $dbh->prepare($sql);
$s->execute($name, $realm_id, $realm_sub_id);
} elsif (defined($realm_id)) {
my $sql = qq{
SELECT intMemberRecordTypeID FROM tblMemberRecordType
WHERE strName = ? AND intRealmID = ?
};
$s = $dbh->prepare($sql);
$s->execute($name, $realm_id);
} else {
$record_type_cache->{$key} = -1;
return -1;
}
my @row = $s->fetchrow_array();
if (@row == 0) {
ERROR "# record type id not found for name: $name, realm: $realm_id, sub_realm: $realm_sub_id)\n";
}
$record_type_cache->{$key} = @row>0 ? $row[0] : -1;
$s->finish();
}
return $record_type_cache->{$key};
}
# prepare template table
print_sql("# PREPARE TABLE TEMPLATE");
print_sql(`cat ../db_setup/tblMemberRecords.sql`);
# clear and prepare the taget table
for my $realm_id (@realm_list) {
my $dst_table_name = "tblMemberRecords_$realm_id";
print_sql("# PREPARE TABLE;");
print_sql("DROP TABLE IF EXISTS $dst_table_name;");
print_sql("CREATE TABLE $dst_table_name LIKE tblMemberRecords;");
print_sql("ALTER TABLE $dst_table_name AUTO_INCREMENT = 1;");
print_sql("");
print_sql("# PREPARE DATA");
# get the realm id dataset
my $sth = $dbh->prepare(qq{
SELECT ms.*, s.strSeasonName, s.intRealmID, s.intRealmSubTypeID
FROM tblMember_Seasons_$realm_id ms
LEFT JOIN tblSeasons s ON ms.intSeasonID = s.intSeasonID
LIMIT $record_limit
});
# WHERE ms.intAssocID = 12607
# LEFT JOIN tblAssoc ass ON ms.intAssocID = ass.intAssocID
# LEFT JOIN tblMember m ON ms.intMemberID = m.intMemberID
$sth->execute() or die "SQL error";
my $count = 0;
my $total = $sth->rows;
while (my $row = $sth->fetchrow_hashref()) {
$count += 1;
print STDERR "\rprocessing $count / $total";
my $entity_type_id = ($row->{'intClubID'} != 0 ? 3 : 5);
my $entity_id = $row->{'intClubID'} != 0 ? $row->{'intClubID'} : $row->{'intAssocID'};
for my $type (qw/Player Coach Umpire Other1 Other2/) {
if ($row->{"int${type}Status"} == 1) {
my $record_type_id = get_record_type_id($row->{'intRealmID'}, $row->{'intSubRealmID'}, $type);
my @values = (
$record_type_id,
$row->{'intMemberID'},
$entity_type_id,
$entity_id,
$row->{'intSeasonID'},
$row->{'intPlayerAgeGroupID'},
$row->{"dtIn$type"},
$row->{"dtOut$type"},
$row->{"int${type}FinancialStatus"},
$row->{"int${type}Status"},
$row->{"intUsedRegoForm"},
);
$values[6] = $values[6]? "'$values[6]'" : 'NULL';
$values[7] = $values[7]? "'$values[7]'" : 'NULL';
print_sql("INSERT INTO $dst_table_name (intMemberRecordTypeID, intMemberID, intEntityTypeID, intEntityID, intSeasonID, intAgeGroupID, dtIn, dtOut, intFinancialStatus, intStatus, intFromRegoForm)
VALUES (", join(", ", @values), "); # $count");
}
}
}
$sth->finish();
}
$dbh->disconnect();
INFO "\nDone. Now you can import the batch data by the following command: \n";
INFO " mysql -h$HOST -u$USER -p$PWD $DB < SQL_FILE\n\n";
# vim: set et sw=4 ts=4:
| facascante/slimerp | fifs/misc/migrate_member_data.pl | Perl | mit | 4,766 |
#!/usr/bin/perl
$url = $ARGV[0];
$url =~ /^(([^:\/?#]+):)?(\/\/([^\/?#]*))?([^?#]*)(\?([^#]*))?(#(.*))?/;
print "ARGV[1] = ".$url."\n";
print "Protocol With Colon: $1\n";
print "Protocol Without Colon: $2\n";
print "Host (with slashes): $3\n";
print "Host (w/o slashes): $4\n";
print "path: $5\n";
print "query string: $6\n";
print "query: $7\n";
print "hash: $8\n";
print "hash-sans #: $9\n";
print "scheme ". $2."\n";
print "authority ". $4."\n";
print "path ". $5."\n";
print "query ". $7."\n";
print "fragment ". $9."\n";
| nathanwiegand/urlparser | parser.pl | Perl | mit | 545 |
#!/usr/bin/env perl
use strict;
use Warnings::Version 'all';
$SIG{FOOBAR} = "IGNORE";
| gitpan/Warnings-Version | t/10-helpers/signal.pl | Perl | mit | 88 |
#!/usr/bin/perl
use strict;
use Digest::MD5 qw(md5_hex);
sub sy{ print join(" ",@_),"\n"; system @_ and die $?; }
my $exec = sub{ print join(" ",@_),"\n"; exec @_; die 'exec failed' };
my $env = sub{ $ENV{$_[0]} || die "no $_[0]" };
my $put_text = sub{
my($fn,$content)=@_;
open FF,">:encoding(UTF-8)",$fn and print FF $content and close FF or die "put_text($!)($fn)";
};
my @tasks;
###
my $handle_build = sub{
my ($arg) = @_;
my($full_img,$img,$tag,$base,$mode,$checkout) =
$arg=~/^build\s+(([\w\-\.\:\/]+)\:(([\w\-\.]+)\.(\w+)\.([\w\-]+)))\s*$/ ?
($1,$2,$3,$4,$5,$6) : die "can not [$arg]";
#we can implement fork after checkout later and unshare ctx_dir
my $builder = md5_hex($full_img)."-".time;
my $host = &$env("C4CI_HOST");
my $ctx_dir = &$env("C4CI_CTX_DIR");
my %repo_dirs = &$env("C4CI_REPO_DIRS")=~/(\S+)/g;
my $repo_dir = $repo_dirs{$img} || die "no repo for $img";
my $args = " --build-arg C4CI_BASE_TAG=$base";
my @commands = (
"set -x",
"(test -e $ctx_dir && rm -r $ctx_dir; true)",
"mkdir $ctx_dir",
"cd $repo_dir && git fetch && git fetch --tags",
"git --git-dir=$repo_dir/.git --work-tree=$ctx_dir checkout $checkout -- .",
"docker build -t builder:$tag -f $ctx_dir/build.$mode.dockerfile $args $ctx_dir",
"rm -r $ctx_dir",
"docker create --name $builder builder:$tag",
"docker cp $builder:/c4/res $ctx_dir",
"docker rm -f $builder",
"docker build -t $full_img $ctx_dir",
$img=~m{/} ? "docker push $full_img" : (),
);
&$put_text("/tmp/build.sh", join " && ",@commands);
sy("ssh c4\@$host sh < /tmp/build.sh");
};
push @tasks, [ci=>sub{
my $tgz = &$env("C4CI_KEY_TGZ");
my $dir = "/c4/.ssh";
sy("mkdir -p $dir && cd $dir && chmod 0700 . && tar -xzf $tgz");
my $port = &$env("C4CI_PORT");
&$exec('socat', "tcp-l:$port,reuseaddr,fork", "exec:perl /ci.pl ci_handle");
}];
push @tasks, [ci_handle=>sub{
my $arg = <STDIN>;
&$handle_build($arg);
}];
push @tasks, [frpc=>sub{
&$exec("/tools/frp/frpc", "-c", &$env("C4FRPC_INI"));
}];
push @tasks, [ci_arg=>sub{
my($arg)=@_;
&$handle_build($arg);
}];
###
my($cmd,@args)=@ARGV;
($cmd||'def') eq $$_[0] and $$_[1]->(@args) for @tasks;
| wregs/c4proto | ci.pl | Perl | apache-2.0 | 2,310 |
package EBiSC::Utils::IMS;
use Time::Moment;
use strict;
use warnings;
sub sync_db {
my (%options) = @_;
my $api = $options{api};
my $db = $options{db};
$db->ims_line->reset;
$db->ims_withdrawn->reset;
$options{now} //= Time::Moment->now_utc;
my $cursor = $api->lines;
while (my $next = $cursor->next) {
my $c = $next->{availability} && $next->{availability} eq 'Withdrawn' ? $db->ims_withdrawn->c : $db->ims_line->c;
my $res = $c->insert_one({
date => $options{now},
obj => $next,
name => $next->{name},
});
die "Database insert error" if !$res->acknowledged;
$res->assert;
}
}
1;
| EMBL-EBI-GCA/ebisc_tracker_2 | tracker/lib/EBiSC/Utils/IMS.pm | Perl | apache-2.0 | 640 |
#!/software/bin/env/usr/bin/env perl
# Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<helpdesk.org>.
=cut
use strict;
use Data::Dumper;
# define the names of the columns we want, as the order may change
our @COLS = (
'PUBMEDID',
'Disease/Trait',
'Reported Gene(s)',
'Strongest SNP-Risk Allele',
'SNPs',
'Risk Allele Frequency',
'p-Value',
);
parse_file();
process_file();
sub parse_file {
die "You must provide a filename to $0 to be parsed" unless @ARGV;
open IN, $ARGV[0];
# get header
my $header = <IN>;
chomp $header;
my @headers = split /\t/, $header;
my %hash;
$hash{$headers[$_]} = $_ for (0..$#headers);
open OUT, ">initial_data_new";
# read file
while(<IN>) {
chomp;
my @data = split /\t/, $_;
# check number of cols
next unless scalar @data == scalar @headers;
print OUT (join "\t", map {$data[$hash{$_}]} @COLS);
print OUT "\n";
}
}
sub process_file {# join pubmed_id together and for each rsname in name list,put it in separate entry
open IN, "initial_data_new";
open OUT, ">initial_data2";
while (<IN>) {
next if /^PubMedID/;
chomp;
my @all=split /\t/, $_;
$all[0] = "pubmed/$all[0]";
my @rsnames = split /\,/, $all[4] if $all[4];
foreach my $n (@rsnames) {
print OUT join ("\t", $n,@all)."\n";
}
}
}
| dbolser-ebi/ensembl-variation | scripts/import/parse-gwas.pl | Perl | apache-2.0 | 2,143 |
#
# (c) Jan Gehring <jan.gehring@gmail.com>
#
# vim: set ts=2 sw=2 tw=0:
# vim: set expandtab:
#
# Some of the code is based on Net::Amazon::EC2
#
package Rex::Cloud::Amazon;
use strict;
use warnings;
our $VERSION = '0.56.1'; # VERSION
use Rex::Logger;
use Rex::Cloud::Base;
use base qw(Rex::Cloud::Base);
BEGIN {
use Rex::Require;
LWP::UserAgent->use;
Digest::HMAC_SHA1->use;
HTTP::Date->use(qw(time2isoz));
MIME::Base64->use(qw(encode_base64 decode_base64));
XML::Simple->require;
}
use Data::Dumper;
sub new {
my $that = shift;
my $proto = ref($that) || $that;
my $self = {@_};
bless( $self, $proto );
#$self->{"__version"} = "2009-11-30";
$self->{"__version"} = "2011-05-15";
$self->{"__signature_version"} = 1;
$self->{"__endpoint"} = "us-east-1.ec2.amazonaws.com";
Rex::Logger::debug(
"Creating new Amazon Object, with endpoint: " . $self->{"__endpoint"} );
Rex::Logger::debug( "Using API Version: " . $self->{"__version"} );
return $self;
}
sub set_auth {
my ( $self, $access_key, $secret_access_key ) = @_;
$self->{"__access_key"} = $access_key;
$self->{"__secret_access_key"} = $secret_access_key;
}
sub set_endpoint {
my ( $self, $endpoint ) = @_;
Rex::Logger::debug("Setting new endpoint to $endpoint");
$self->{'__endpoint'} = $endpoint;
}
sub timestamp {
my $t = time2isoz();
chop($t);
$t .= ".000Z";
$t =~ s/\s+/T/g;
return $t;
}
sub run_instance {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to start a new Amazon instance with data:");
Rex::Logger::debug( " $_ -> " . ( $data{$_} ? $data{$_} : "undef" ) )
for keys %data;
my $security_groups;
if ( ref( $data{security_group} ) eq "ARRAY" ) {
$security_groups = $data{security_group};
}
elsif ( exists $data{security_groups} ) {
$security_groups = $data{security_groups};
}
else {
$security_groups = $data{security_group};
}
my %security_group = ();
if ( ref($security_groups) eq "ARRAY" ) {
my $i = 0;
for my $sg ( @{$security_groups} ) {
$security_group{"SecurityGroup.$i"} = $sg;
$i++;
}
}
else {
$security_group{SecurityGroup} = $security_groups || "default";
}
my $xml = $self->_request(
"RunInstances",
ImageId => $data{"image_id"},
MinCount => 1,
MaxCount => 1,
KeyName => $data{"key"},
InstanceType => $data{"type"} || "m1.small",
"Placement.AvailabilityZone" => $data{"zone"} || "",
%security_group
);
my $ref = $self->_xml($xml);
if ( exists $data{"name"} ) {
$self->add_tag(
id => $ref->{"instancesSet"}->{"item"}->{"instanceId"},
name => "Name",
value => $data{"name"}
);
}
my ($info) =
grep { $_->{"id"} eq $ref->{"instancesSet"}->{"item"}->{"instanceId"} }
$self->list_instances();
while ( $info->{"state"} ne "running" ) {
Rex::Logger::debug("Waiting for instance to be created...");
($info) =
grep { $_->{"id"} eq $ref->{"instancesSet"}->{"item"}->{"instanceId"} }
$self->list_instances();
sleep 1;
}
if ( exists $data{"volume"} ) {
$self->attach_volume(
volume_id => $data{"volume"},
instance_id => $ref->{"instancesSet"}->{"item"}->{"instanceId"},
name => "/dev/sdh", # default for new instances
);
}
return $info;
}
sub attach_volume {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to attach a new volume");
$self->_request(
"AttachVolume",
VolumeId => $data{"volume_id"},
InstanceId => $data{"instance_id"},
Device => $data{"name"} || "/dev/sdh"
);
}
sub detach_volume {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to detach a volume");
$self->_request( "DetachVolume", VolumeId => $data{"volume_id"}, );
}
sub delete_volume {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to delete a volume");
$self->_request( "DeleteVolume", VolumeId => $data{"volume_id"}, );
}
sub terminate_instance {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to terminate an instance");
$self->_request( "TerminateInstances",
"InstanceId.1" => $data{"instance_id"} );
}
sub start_instance {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to start an instance");
$self->_request( "StartInstances", "InstanceId.1" => $data{instance_id} );
my ($info) =
grep { $_->{"id"} eq $data{"instance_id"} } $self->list_instances();
while ( $info->{"state"} ne "running" ) {
Rex::Logger::debug("Waiting for instance to be started...");
($info) =
grep { $_->{"id"} eq $data{"instance_id"} } $self->list_instances();
sleep 5;
}
}
sub stop_instance {
my ( $self, %data ) = @_;
Rex::Logger::debug("Trying to stop an instance");
$self->_request( "StopInstances", "InstanceId.1" => $data{instance_id} );
my ($info) =
grep { $_->{"id"} eq $data{"instance_id"} } $self->list_instances();
while ( $info->{"state"} ne "stopped" ) {
Rex::Logger::debug("Waiting for instance to be stopped...");
($info) =
grep { $_->{"id"} eq $data{"instance_id"} } $self->list_instances();
sleep 5;
}
}
sub add_tag {
my ( $self, %data ) = @_;
Rex::Logger::debug( "Adding a new tag: "
. $data{id} . " -> "
. $data{name} . " -> "
. $data{value} );
$self->_request(
"CreateTags",
"ResourceId.1" => $data{"id"},
"Tag.1.Key" => $data{"name"},
"Tag.1.Value" => $data{"value"}
);
}
sub create_volume {
my ( $self, %data ) = @_;
Rex::Logger::debug("Creating a new volume");
my $xml = $self->_request(
"CreateVolume",
"Size" => $data{"size"} || 1,
"AvailabilityZone" => $data{"zone"},
);
my $ref = $self->_xml($xml);
return $ref->{"volumeId"};
my ($info) = grep { $_->{"id"} eq $ref->{"volumeId"} } $self->list_volumes();
while ( $info->{"status"} ne "available" ) {
Rex::Logger::debug("Waiting for volume to become ready...");
($info) = grep { $_->{"id"} eq $ref->{"volumeId"} } $self->list_volumes();
sleep 1;
}
}
sub list_volumes {
my ($self) = @_;
my $xml = $self->_request("DescribeVolumes");
my $ref = $self->_xml($xml);
return unless ($ref);
return unless ( exists $ref->{"volumeSet"}->{"item"} );
if ( ref( $ref->{"volumeSet"}->{"item"} ) eq "HASH" ) {
$ref->{"volumeSet"}->{"item"} = [ $ref->{"volumeSet"}->{"item"} ];
}
my @volumes;
for my $vol ( @{ $ref->{"volumeSet"}->{"item"} } ) {
push(
@volumes,
{
id => $vol->{"volumeId"},
status => $vol->{"status"},
zone => $vol->{"availabilityZone"},
size => $vol->{"size"},
attached_to => $vol->{"attachmentSet"}->{"item"}->{"instanceId"},
}
);
}
return @volumes;
}
sub _make_instance_map {
my ( $self, $instance_set ) = @_;
return (
ip => $_[1]->{"ipAddress"},
id => $_[1]->{"instanceId"},
image_id => $_[1]->{"imageId"},
architecture => $_[1]->{"architecture"},
type => $_[1]->{"instanceType"},
dns_name => $_[1]->{"dnsName"},
state => $_[1]->{"instanceState"}->{"name"},
launch_time => $_[1]->{"launchTime"},
(
name => exists( $instance_set->{"tagSet"}->{"item"}->{"value"} )
? $instance_set->{"tagSet"}->{"item"}->{"value"}
: $instance_set->{"tagSet"}->{"item"}->{"Name"}->{"value"}
),
private_ip => $_[1]->{"privateIpAddress"},
(
security_group => ref $_[1]->{"groupSet"}->{"item"} eq 'ARRAY' ? join ',',
map { $_->{groupName} } @{ $_[1]->{"groupSet"}->{"item"} }
: $_[1]->{"groupSet"}->{"item"}->{"groupName"}
),
(
security_groups => ref $_[1]->{"groupSet"}->{"item"} eq 'ARRAY'
? [ map { $_->{groupName} } @{ $_[1]->{"groupSet"}->{"item"} } ]
: [ $_[1]->{"groupSet"}->{"item"}->{"groupName"} ]
),
(
tags => {
map {
if ( ref $instance_set->{"tagSet"}->{"item"}->{$_} eq 'HASH' ) {
$_ => $instance_set->{"tagSet"}->{"item"}->{$_}->{value};
}
else {
$instance_set->{"tagSet"}->{"item"}->{key} =>
$instance_set->{"tagSet"}->{"item"}->{value};
}
} keys %{ $instance_set->{"tagSet"}->{"item"} }
}
),
);
}
sub list_instances {
my ($self) = @_;
my @ret;
my $xml = $self->_request("DescribeInstances");
my $ref = $self->_xml($xml);
return unless ($ref);
return unless ( exists $ref->{"reservationSet"} );
return unless ( exists $ref->{"reservationSet"}->{"item"} );
if ( ref $ref->{"reservationSet"}->{"item"} eq "HASH" ) {
# if only one instance is returned, turn it to an array
$ref->{"reservationSet"}->{"item"} = [ $ref->{"reservationSet"}->{"item"} ];
}
for my $instance_set ( @{ $ref->{"reservationSet"}->{"item"} } ) {
# push(@ret, $instance_set);
my $isi = $instance_set->{"instancesSet"}->{"item"};
if ( ref $isi eq 'HASH' ) {
push( @ret, { $self->_make_instance_map($isi) } );
}
elsif ( $isi eq 'ARRAY' ) {
for my $iset (@$isi) {
push( @ret, { $self->_make_instance_map($iset) } );
}
}
}
return @ret;
}
sub list_running_instances {
my ($self) = @_;
return grep { $_->{"state"} eq "running" } $self->list_instances();
}
sub get_regions {
my ($self) = @_;
my $content = $self->_request("DescribeRegions");
my %items =
( $content =~
m/<regionName>([^<]+)<\/regionName>\s+<regionEndpoint>([^<]+)<\/regionEndpoint>/gsim
);
return %items;
}
sub get_availability_zones {
my ($self) = @_;
my $xml = $self->_request("DescribeAvailabilityZones");
my $ref = $self->_xml($xml);
my @zones;
for my $item ( @{ $ref->{"availabilityZoneInfo"}->{"item"} } ) {
push(
@zones,
{
zone_name => $item->{"zoneName"},
region_name => $item->{"regionName"},
zone_state => $item->{"zoneState"},
}
);
}
return @zones;
}
sub _request {
my ( $self, $action, %args ) = @_;
my $ua = LWP::UserAgent->new;
$ua->env_proxy;
my %param = $self->_sign( $action, %args );
Rex::Logger::debug( "Sending request to: https://" . $self->{'__endpoint'} );
Rex::Logger::debug( " $_ -> " . $param{$_} ) for keys %param;
my $res = $ua->post( "https://" . $self->{'__endpoint'}, \%param );
if ( $res->code >= 500 ) {
Rex::Logger::info( "Error on request", "warn" );
Rex::Logger::debug( $res->content );
return;
}
else {
my $ret;
eval {
no warnings;
$ret = $res->content;
Rex::Logger::debug($ret);
use warnings;
};
return $ret;
}
}
sub _sign {
my ( $self, $action, %o_args ) = @_;
my %args;
for my $key ( keys %o_args ) {
next unless $key;
next unless $o_args{$key};
$args{$key} = $o_args{$key};
}
my %sign_hash = (
AWSAccessKeyId => $self->{"__access_key"},
Action => $action,
Timestamp => $self->timestamp(),
Version => $self->{"__version"},
SignatureVersion => $self->{"__signature_version"},
%args
);
my $sign_this;
foreach my $key ( sort { lc($a) cmp lc($b) } keys %sign_hash ) {
$sign_this .= $key . $sign_hash{$key};
}
Rex::Logger::debug("Signed: $sign_this");
my $encoded = $self->_hash($sign_this);
my %params = (
Action => $action,
SignatureVersion => $self->{"__signature_version"},
AWSAccessKeyId => $self->{"__access_key"},
Timestamp => $self->timestamp(),
Version => $self->{"__version"},
Signature => $encoded,
%args
);
return %params;
}
sub _hash {
my ( $self, $query_string ) = @_;
my $hashed = Digest::HMAC_SHA1->new( $self->{"__secret_access_key"} );
$hashed->add($query_string);
return encode_base64( $hashed->digest, "" );
}
sub _xml {
my ( $self, $xml ) = @_;
my $x = XML::Simple->new;
my $res = $x->XMLin($xml);
if ( defined $res->{"Errors"} ) {
if ( ref( $res->{"Errors"} ) ne "ARRAY" ) {
$res->{"Errors"} = [ $res->{"Errors"} ];
}
my @error_msg = ();
for my $error ( @{ $res->{"Errors"} } ) {
push( @error_msg,
$error->{"Error"}->{"Message"}
. " (Code: "
. $error->{"Error"}->{"Code"}
. ")" );
}
die( join( "\n", @error_msg ) );
}
return $res;
}
1;
| gitpan/Rex | lib/Rex/Cloud/Amazon.pm | Perl | apache-2.0 | 12,472 |
package Paws::IoT::DescribeThing;
use Moose;
has ThingName => (is => 'ro', isa => 'Str', traits => ['ParamInURI'], uri_name => 'thingName', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DescribeThing');
class_has _api_uri => (isa => 'Str', is => 'ro', default => '/things/{thingName}');
class_has _api_method => (isa => 'Str', is => 'ro', default => 'GET');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::IoT::DescribeThingResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::IoT::DescribeThing - Arguments for method DescribeThing on Paws::IoT
=head1 DESCRIPTION
This class represents the parameters used for calling the method DescribeThing on the
AWS IoT service. Use the attributes of this class
as arguments to method DescribeThing.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DescribeThing.
As an example:
$service_obj->DescribeThing(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> ThingName => Str
The name of the thing.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DescribeThing in L<Paws::IoT>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/IoT/DescribeThing.pm | Perl | apache-2.0 | 1,754 |
package Paws::CodeStar::DescribeProject;
use Moose;
has Id => (is => 'ro', isa => 'Str', traits => ['NameInRequest'], request_name => 'id' , required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DescribeProject');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::CodeStar::DescribeProjectResult');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::CodeStar::DescribeProject - Arguments for method DescribeProject on Paws::CodeStar
=head1 DESCRIPTION
This class represents the parameters used for calling the method DescribeProject on the
AWS CodeStar service. Use the attributes of this class
as arguments to method DescribeProject.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DescribeProject.
As an example:
$service_obj->DescribeProject(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> Id => Str
The ID of the project.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DescribeProject in L<Paws::CodeStar>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/CodeStar/DescribeProject.pm | Perl | apache-2.0 | 1,628 |
=head1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::ORM::Component::DbFrontend::ConfirmDelete;
### NAME: EnsEMBL::ORM::Component::DbFrontend::ConfirmDelete
### Creates a page asking for a confirmation to delete the record
### STATUS: Under development
### Note: This module should not be modified!
### To customise, either extend this module in your component, or EnsEMBL::Web::Object::DbFrontend in your E::W::object
use strict;
use warnings;
use base qw(EnsEMBL::ORM::Component::DbFrontend);
sub content_tree {
## Generates a DOM tree for content HTML
## Override this one in the child class and do the DOM manipulation on the DOM tree if required
## Flags are set on required HTML elements for 'selection and manipulation' purposes in child classes (get_nodes_by_flag)
my $self = shift;
my $hub = $self->hub;
my $object = $self->object;
my $record = $object->rose_object;
my $function = $hub->function || '';
my $content = $self->dom->create_element('div', {'class' => $self->_JS_CLASS_RESPONSE_ELEMENT});
if ($object->permit_delete =~ /^(delete|retire)$/) {
$content->inner_HTML(
sprintf('<div class="dbf-padded"><p>%s</p><p>Are you sure you want to continue?</p><div class="dbf-confirm-buttons"><a class="%s" href="%s">Yes</a><a class="%s" href="%s">No</a></div></div>',
$1 && $1 eq 'delete'
? sprintf('This will permanently remove %s (%s) from the database.', $object->record_name->{'singular'}, $record->get_title)
: sprintf('%s (%s) will still remain in the database but will no longer be accessible.', ucfirst $object->record_name->{'singular'}, $record->get_title),
$self->_JS_CLASS_DELETE_BUTTON,
$hub->url({'action' => 'Delete', 'function' => $function, 'id' => $record->get_primary_key_value}),
$self->_JS_CLASS_CANCEL_BUTTON,
$hub->referer->{'uri'} || $hub->url({'action' => $object->default_action, 'function' => $function})
));
}
else {
$content->inner_HTML(sprintf('<div class="dbf-padded"><p>You do not have the permission to delete this %s</p></div>', $object->record_name->{'singular'}));
}
return $content;
}
1; | andrewyatz/public-plugins | orm/modules/EnsEMBL/ORM/Component/DbFrontend/ConfirmDelete.pm | Perl | apache-2.0 | 2,769 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::a10::ax::snmp::mode::disk;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub custom_usage_perfdata {
my ($self, %options) = @_;
$self->{output}->perfdata_add(label => 'used', unit => 'B',
nlabel => $self->{nlabel},
value => $self->{result_values}->{used},
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . $self->{thlabel}, total => $self->{result_values}->{total}, cast_int => 1),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . $self->{thlabel}, total => $self->{result_values}->{total}, cast_int => 1),
min => 0, max => $self->{result_values}->{total});
}
sub custom_usage_threshold {
my ($self, %options) = @_;
my $exit = $self->{perfdata}->threshold_check(value => $self->{result_values}->{prct_used}, threshold => [ { label => 'critical-' . $self->{thlabel}, exit_litteral => 'critical' }, { label => 'warning-' . $self->{thlabel}, exit_litteral => 'warning' } ]);
return $exit;
}
sub custom_usage_output {
my ($self, %options) = @_;
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total});
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used});
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free});
my $msg = sprintf("Disk Total: %s Used: %s (%.2f%%) Free: %s (%.2f%%)",
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free});
return $msg;
}
sub custom_usage_calc {
my ($self, %options) = @_;
$self->{result_values}->{total} = $options{new_datas}->{$self->{instance} . '_total'};
$self->{result_values}->{free} = $options{new_datas}->{$self->{instance} . '_free'};
$self->{result_values}->{used} = $self->{result_values}->{total} - $self->{result_values}->{free};
$self->{result_values}->{prct_used} = $self->{result_values}->{used} * 100 / $self->{result_values}->{total};
$self->{result_values}->{prct_free} = 100 - $self->{result_values}->{prct_used};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'disk', type => 0 }
];
$self->{maps_counters}->{disk} = [
{ label => 'usage', nlabel => 'disk.usage.bytes', set => {
key_values => [ { name => 'free' }, { name => 'total' } ],
closure_custom_calc => $self->can('custom_usage_calc'),
closure_custom_output => $self->can('custom_usage_output'),
closure_custom_perfdata => $self->can('custom_usage_perfdata'),
closure_custom_threshold_check => $self->can('custom_usage_threshold'),
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $oid_axSysDiskFreeSpace = '.1.3.6.1.4.1.22610.2.4.1.4.2.0'; # in MB
my $oid_axSysDiskTotalSpace = '.1.3.6.1.4.1.22610.2.4.1.4.1.0'; # in MB
my $snmp_result = $options{snmp}->get_leef(oids => [$oid_axSysDiskFreeSpace, $oid_axSysDiskTotalSpace],
nothing_quit => 1);
$self->{disk} = { free => $snmp_result->{$oid_axSysDiskFreeSpace} * 1024 * 1024, total => $snmp_result->{$oid_axSysDiskTotalSpace} * 1024 * 1024 };
}
1;
__END__
=head1 MODE
Check disk usage.
=over 8
=item B<--warning-usage>
Threshold warning (in percent).
=item B<--critical-usage>
Threshold critical (in percent).
=back
=cut
| Tpo76/centreon-plugins | network/a10/ax/snmp/mode/disk.pm | Perl | apache-2.0 | 5,001 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
####################################################################
# pairwise projections using an EPO alignment
# Used for https://doi.org/10.1101/2020.05.31.126169
####################################################################
use strict;
use warnings;
use Getopt::Long;
use Bio::EnsEMBL::Registry;
use Bio::AlignIO;
use Bio::EnsEMBL::Utils::Exception qw(throw);
# input and output
my $query_name;
my $query_bed;
my $target_name;
my $aln_name = 'mammals';
my $out;
# global variables
my $genomicalign_adaptor;
my $genomedb_adaptor;
my $methodlink_adaptor;
my $slice_adaptor;
######################## usage and options ###############################
my $USAGE=("Given input bed files for a query species, will project the query species' genomic coordinates to a chosen target genome.
Valid species names are Ensembl database species names (eg. homo_sapiens).
Takes into account only 1-to-1 pairwise alignments using the EPO alignment.
Output file is a bed file of projected positions, and a column corresponding to original query positions:
targetSpecies_projected_chromosome\treferenceSpecies_projected_start\treferenceSpecies_projected_end\tquerySpecies_position
where querSpecies_position is in format chromosome:start-end
Run with following options:
scripts/examples/dna_projectCoordinates.pl -query_name query_species -query_bed query.bed -target_name target_species -out out_file\n\n");
GetOptions(
'query_name=s' => \$query_name,
'query_bed=s' => \$query_bed,
'target_name=s' => \$target_name,
'aln_name=s' => \$aln_name,
'out=s' => \$out,
);
die $USAGE unless ($query_name and $query_bed and $target_name and $out);
########################### subroutines ##########################
# get the number of alignments in restricted genomic block
# arg0: query species name
# arg1: target species name
# arg2: query species slice
# returns the genomic align blocks
sub blocks {
my $query = $_[0];
my $target = $_[1];
my $slice = $_[2];
my $querydb = $genomedb_adaptor->fetch_by_name_assembly($query);
my $targetdb = $genomedb_adaptor->fetch_by_name_assembly($target);
my $alignment = $methodlink_adaptor->fetch_by_method_link_type_species_set_name("EPO", $aln_name);
my $genomicalign_block = $genomicalign_adaptor->fetch_all_by_MethodLinkSpeciesSet_Slice( $alignment, $slice);
return $genomicalign_block;
}
########################### main program ##########################
my $registry = 'Bio::EnsEMBL::Registry';
$registry->load_registry_from_db(
-host => 'ensembldb.ensembl.org',
-user => 'anonymous'
);
# get the GenomicAlignBlock adaptor for Compara database
$genomicalign_adaptor = $registry->get_adaptor(
"Multi", "Compara", "GenomicAlignBlock");
# get the GenomeDB adaptor of Compara database
$genomedb_adaptor = $registry->get_adaptor("Multi", "Compara", "GenomeDB");
# get the MethodLinkSpeciesSet adaptor of Compara
$methodlink_adaptor = $registry->get_adaptor("Multi", "Compara", "MethodLinkSpeciesSet");
print "Projecting query species: ", $query_name, "\tto target: ",
$target_name, "\n\tquery file: ", $query_bed, "\n";
open (my $out_fh, '>', $out) or die "Cannot make outfile: $out\n";
print "\tmaking output: $out\n\n";
# make header for output file, including target (projected) and query positions
print $out_fh "#", $target_name,"_projected_chr\t", $target_name, "_projected_start\t",
$target_name, "_projected_end\t", $query_name, "_position\n";
# open in bed file
open (my $in_fh, '<', $query_bed) or die "cannot open $query_bed\n";
# get slice adaptor using query species
$slice_adaptor = Bio::EnsEMBL::Registry->get_adaptor($query_name, "Core", "Slice");
while (<$in_fh>) {
chomp;
# split bed file
my @col = split(/\t/,$_);
# make query slice using genomic coordinates
my $query_slice = $slice_adaptor->fetch_by_region("toplevel", $col[0], $col[1], $col[2]);
throw("No Slice can be created with coordinates $col[0]:$col[1]-$col[2]") if (!$query_slice);
# pass to subroutine to get all possible alignment blocks
my $all_blocks = blocks($query_name,$target_name,$query_slice);
# check that there is only one alignment (block)
if ( scalar @$all_blocks == 1 ) {
# go through the block
foreach my $block (@$all_blocks) {
next if (!defined $block);
# restrict blok to position of slice
my $restricted = $block->restrict_between_reference_positions(
$query_slice->start(), $query_slice->end(), undef, 1);
next if (!defined $restricted);
# get all the alignments in the block
my $align_list = $restricted->genomic_align_array();
# go thorugh the alignments
foreach my $align (@$align_list) {
next if (!$align);
#get the alignment from target species
if ($align->genome_db()->name() eq $target_name) {
# print out target (projected) and query coordinates
print $out_fh $align->dnafrag->name(), "\t", $align->dnafrag_start(),
"\t", $align->dnafrag_end(), "\t",
$query_slice->seq_region_name(), ":",
$query_slice->start(), "-", $query_slice->end(), "\n";
}
}
}
}
}
close($out_fh);
close($in_fh);
| Ensembl/ensembl-compara | scripts/examples/dna_projectCoordinates.pl | Perl | apache-2.0 | 6,172 |
package OpenXPKI::Server::API2::Plugin::Token::list_active_aliases;
use OpenXPKI::Server::API2::EasyPlugin;
=head1 NAME
OpenXPKI::Server::API2::Plugin::Token::list_active_aliases
=cut
# Project modules
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Server::API2::Plugin::Token::Util;
=head1 COMMANDS
=head2 list_active_aliases
Returns an I<ArrayRef> of I<HashRefs> with all tokens from the given group,
which are/were valid within the given validity period:
[
{
alias => '...', # full alias name
identifier => '...', # certificate identifier
notbefore => '...', # certificate validity (UNIX epoch timestamp)
notafter => '...', # certificate validity (UNIX epoch timestamp)
status => '...', # verbose status of the token: ONLINE, OFFLINE or UNKNOWN
},
{
...
},
]
Dates are taken from the alias table and might differ from the certificates
validity!
The list is sorted by I<notbefore> date, starting with the newest date.
B<Parameters>
=over
=item * C<group> I<Str> - Token group. Default: none
=item * C<type> I<Str> - Token type, might be specified instead of token group
to query one of the predefined token types (for possible values see
L<OpenXPKI::Server::API2::Types/TokenType>). Default: none
=item * C<pki_realm> I<Str> - PK realm, specify this to query another realm.
Default: current session's realm.
=item * C<validity> I<HashRef> - two datetime objects, given as hash keys
I<notbefore> and I<notafter>. Hash values of C<undef> will be interpreted as
"now". Default: current time
=item * C<check_online> I<Bool> - Set to 1 to get the token online status
(L<is_token_usable|OpenXPKI::Server::API2::Plugin::Token::is_token_usable/is_token_usable> is
called for each alias). The status check is only possible from within the
current session's realm, for requests regarding another realm the status is
always C<UNKNOWN>. Default: 0
=back
=cut
command "list_active_aliases" => {
group => { isa => 'AlphaPunct', },
type => { isa => 'TokenType', },
pki_realm => { isa => 'AlphaPunct', default => sub { CTX('session')->data->pki_realm }, },
validity => { isa => 'HashRef', default => sub { { notbefore => undef, notafter => undef } }, },
check_online => { isa => 'Bool', default => 0 },
} => sub {
my ($self, $params) = @_;
my $group; $group = $params->group if $params->has_group;
my $pki_realm = $params->pki_realm;
if (not $params->has_group) {
OpenXPKI::Exception->throw( message => "Token type or group must be given" ) unless $params->has_type;
$group = CTX('config')->get("realm.$pki_realm.crypto.type.".$params->type)
or OpenXPKI::Exception->throw(
message => "Could not find token group by type",
params => { type => $params->type },
);
}
my $validity = OpenXPKI::Server::API2::Plugin::Token::Util->validity_to_epoch($params->validity);
my $aliases = CTX('dbi')->select(
from => 'aliases',
columns => [
'aliases.notbefore',
'aliases.notafter',
'aliases.alias',
'aliases.identifier',
],
where => {
'aliases.pki_realm' => $pki_realm,
'aliases.group_id' => $group,
'aliases.notbefore' => { '<' => $validity->{notbefore} },
'aliases.notafter' => { '>' => $validity->{notafter} },
},
order_by => [ '-aliases.notbefore' ],
);
my @result;
while (my $row = $aliases->fetchrow_hashref) {
my $item = {
alias => $row->{alias},
identifier => $row->{identifier},
notbefore => $row->{notbefore},
notafter => $row->{notafter},
};
# security check: only do online/offline check if we check the session PKI realm
if ($params->check_online) {
if ($params->pki_realm eq CTX('session')->data->pki_realm) {
$item->{status} = $self->api->is_token_usable(alias => $row->{alias})
? 'ONLINE'
: 'OFFLINE';
}
else {
$item->{status} = 'UNKNOWN';
CTX('log')->application->warn("API command 'list_active_aliases' was called to query another realm's tokens with 'check_online = 1'. This is forbidden, 'status' will be set to UNKNOWN.");
}
}
push @result, $item;
}
return \@result;
};
__PACKAGE__->meta->make_immutable;
| openxpki/openxpki | core/server/OpenXPKI/Server/API2/Plugin/Token/list_active_aliases.pm | Perl | apache-2.0 | 4,611 |
new13(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=4,
1*D=0,
1*A+ -1*F= -1,
1*B+ -1*G= -3,
new3(F,G,C,D,E).
new13(A,B,C,D,E) :-
-1*A>= -3,
-3*A+1*B>=1,
1*A>=0,
1*D=0,
new3(A,B,C,D,E).
new12(A,B,C,D,E) :-
-1*C>=1,
-3*A+1*B>=1,
1*A>=0,
1*D=0,
new13(A,B,C,D,E).
new12(A,B,C,D,E) :-
1*C>=1,
-3*A+1*B>=1,
1*A>=0,
1*D=0,
new13(A,B,C,D,E).
new12(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
1*C=0,
1*D=0,
new3(A,B,C,D,E).
new11(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
1*D=0,
new12(A,B,F,D,E).
new8(A,B,C,D,E) :-
-3*A+1*B>=2,
-1*D>=1,
1*A>=0,
1*A+ -1*F= -1,
1*B+ -1*G= -2,
new3(F,G,C,D,E).
new8(A,B,C,D,E) :-
-3*A+1*B>=2,
1*D>=1,
1*A>=0,
1*A+ -1*F= -1,
1*B+ -1*G= -2,
new3(F,G,C,D,E).
new8(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
1*D=0,
new11(A,B,C,D,E).
new7(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
1*E=0.
new5(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
new8(A,B,C,F,E).
new4(A,B,C,D,E) :-
-1*E>=1,
-3*A+1*B>=1,
1*A>=0,
new5(A,B,C,D,E).
new4(A,B,C,D,E) :-
1*E>=1,
-3*A+1*B>=1,
1*A>=0,
new5(A,B,C,D,E).
new4(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
1*E=0,
new7(A,B,C,D,E).
new3(A,B,C,D,E) :-
-3*A+1*B>=1,
1*A>=0,
new4(A,B,C,D,F).
| bishoksan/RAHFT | benchmarks_scp/misc/programs-clp-specialised/MAP-interpolants_needed-pepm-proc.c.map.c.map.pl.pe.pl | Perl | apache-2.0 | 1,272 |
package Search::Elasticsearch::Client::1_0::Direct::Snapshot;
use Moo;
with 'Search::Elasticsearch::Role::API::1_0';
with 'Search::Elasticsearch::Role::Client::Direct';
__PACKAGE__->_install_api('snapshot');
1;
__END__
# ABSTRACT: A client for managing snapshot/restore
=head1 DESCRIPTION
This module provides methods to manage snapshot/restore, or backups.
It can create, get and delete configured backup repositories, and
create, get, delete and restore snapshots of your cluster or indices.
It does L<Search::Elasticsearch::Role::Client::Direct>.
=head1 METHODS
=head2 C<create_repository()>
$e->snapshot->create_repository(
repository => 'repository', # required
body => { defn } # required
);
Create a repository for backups.
Query string parameters:
C<master_timeout>,
C<timeout>,
C<verify>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<get_repository()>
$e->snapshot->get_repository(
repository => 'repository' | \@repositories # optional
);
Retrieve existing repositories.
Query string parameters:
C<local>,
C<master_timeout>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<verify_repository()>
$e->snapshot->verify_repository(
repository => 'repository' # required
);
Verify existing repository.
Query string parameters:
C<master_timeout>,
C<timeout>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<delete_repository()>
$e->snapshot->delete_repository(
repository => 'repository' | \@repositories # required
);
Delete repositories by name.
Query string parameters:
C<master_timeout>,
C<timeout>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<create()>
$e->snapshot->create(
repository => 'repository', # required
snapshot => 'snapshot', # required,
body => { snapshot defn } # optional
);
Create a snapshot of the whole cluster or individual indices in the named
repository.
Query string parameters:
C<master_timeout>,
C<wait_for_completion>
=head2 C<get()>
$e->snapshot->get(
repository => 'repository' # required
snapshot => 'snapshot' | \@snapshots # required
);
Retrieve snapshots in the named repository.
Query string parameters:
C<master_timeout>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<delete()>
$e->snapshot->delete(
repository => 'repository', # required
snapshot => 'snapshot' # required
);
Delete snapshot in the named repository.
Query string parameters:
C<master_timeout>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<restore()>
$e->snapshot->restore(
repository => 'repository', # required
snapshot => 'snapshot' # required
body => { what to restore } # optional
);
Restore a named snapshot.
Query string parameters:
C<master_timeout>,
C<wait_for_completion>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
=head2 C<status()>
$result = $e->snapshot->status(
repository => 'repository', # optional
snapshot => 'snapshot' | \@snapshots # optional
);
Returns status information about the specified snapshots.
Query string parameters:
C<master_timeout>
See the L<"snapshot/restore docs"|http://www.elastic.co/guide/en/elasticsearch/reference/current/modules-snapshot.html>
for more information.
| jeteve/elasticsearch-perl | lib/Search/Elasticsearch/Client/1_0/Direct/Snapshot.pm | Perl | apache-2.0 | 4,251 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# holds the object that does the second round of filtering
package Bio::EnsEMBL::Pipeline::Tools::Pmatch::Second_PMF;
use warnings ;
use strict ;
use Bio::EnsEMBL::Utils::Argument qw (rearrange);
sub new {
my ($class, @args) = @_;
my $self = bless {}, $class;
my ($phits) = rearrange(['PHITS'], @args);
$self->throw("No pmatch hits data") unless defined $phits;
$self->phits($phits);
#print STDERR "SECOND PMF\n";
#foreach my $hit(@$phits){
#print STDERR $hit->chr_name . ":" .$hit->qstart . "," .$hit->qend . ":" . $hit->protein_id . ":" .$hit->coverage . "\n";
#}
#print STDERR "SECOND PMF\n";
my %proteins = (); # hash of arrays of MergedHits, indexed by protin name
$self->{_proteins} = \%proteins;
return $self;
}
sub phits {
my ($self, $phits) = @_;
# $phits is an array reference
if(defined($phits)){
if (ref($phits) eq "ARRAY") {
$self->{_phits} = $phits;
}
else {
$self->throw("[$phits] is not an array ref.");
}
}
return $self->{_phits};
}
sub run {
my ($self) = @_;
# group hits by protein
my %prots = %{$self->{_proteins}}; # just makes it a bit easier to follow
foreach my $hit(@{$self->phits}){
# print the details of all the constituent coord pairs separated by white space
push (@{$prots{$hit->protein_id}}, $hit);
}
$self->{_proteins} = \%prots;
# prune and store the hits
$self->prune_hits;
}
sub prune_hits {
my ($self) = @_;
my %prots = %{$self->{_proteins}}; # just makes it a bit easier to follow
PROTEIN:
foreach my $p(keys %prots){
my @chosen = ();
my @allhits = @{$prots{$p}};
# sort by descending order of coverage
@allhits = sort {$b->coverage <=> $a->coverage} @allhits;
my $first = shift(@allhits);
# don't select any hits that have coverage less than 2% below that of the first hit, be it 100 or 99.9 or ...
my $curr_pc = $first->coverage() - 2;
# lower bound threshold - reject anything with < 25% coverage
my $lower_threshold = 25;
next PROTEIN if $first->coverage < $lower_threshold;
push (@chosen,$first) unless $first->coverage < $lower_threshold;
PRUNE:
foreach my $hit(@allhits) {
last PRUNE if $hit->coverage() < $curr_pc;
last PRUNE if $hit->coverage() < $lower_threshold;
push (@chosen,$hit);
}
push(@{$self->{_output}},@chosen);
}
}
sub output {
my ($self) = @_;
if (!defined($self->{_output})) {
$self->{_output} = [];
}
return @{$self->{_output}};
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Tools/Pmatch/Second_PMF.pm | Perl | apache-2.0 | 3,282 |
package VMOMI::VirtualUSBRemoteClientBackingOption;
use parent 'VMOMI::VirtualDeviceRemoteDeviceBackingOption';
use strict;
use warnings;
our @class_ancestors = (
'VirtualDeviceRemoteDeviceBackingOption',
'VirtualDeviceBackingOption',
'DynamicData',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/VirtualUSBRemoteClientBackingOption.pm | Perl | apache-2.0 | 512 |
package VMOMI::VirtualMachineMemoryReservationSpec;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['virtualMachineReserved', undef, 0, 1],
['allocationPolicy', undef, 0, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/VirtualMachineMemoryReservationSpec.pm | Perl | apache-2.0 | 490 |
package VMOMI::HostAdminEnableEvent;
use parent 'VMOMI::HostEvent';
use strict;
use warnings;
our @class_ancestors = (
'HostEvent',
'Event',
'DynamicData',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/HostAdminEnableEvent.pm | Perl | apache-2.0 | 418 |
package MGRAST::WebPage::MetagenomeSearch;
use base qw( WebPage );
use strict;
use warnings;
use URI::Escape;
use Data::Dumper;
use HTML::Strip;
use Conf;
use MGRAST::Analysis;
use MGRAST::Metadata;
1;
=pod
=head1 NAME
MetagenomeSelect - an instance of WebPage which lets the user select a metagenome
=head1 DESCRIPTION
Display a metagenome select box
=head1 METHODS
=over 4
=item * B<init> ()
Called when the web page is instanciated.
=cut
sub init {
my ($self) = @_;
$self->title('Metagenome Search');
$self->{icon} = "<img src='./Html/lupe.png' style='width: 20px; height: 20px; padding-right: 5px; position: relative; top: -3px;'>";
$self->application->register_component('Table', "sResult");
$self->application->register_component('Ajax', 'sAjax');
$self->application->register_component('Hover', 'help');
my @mgs = $self->app->cgi->param('metagenomes');
my $jmap = {};
my $pmap = {};
my $jobs;
# my $jobs = $self->get_user_jobs(\@mgs);
# foreach (keys %$jobs) {
# last;
# $jmap->{ $jobs->{$_}{_id} } = $_;
# if (exists $jobs->{$_}{project_id}) {
# push @{ $pmap->{ $jobs->{$_}{project_id} } }, $_;
# }
# }
my $mddb = MGRAST::Metadata->new();
my $mgdb = MGRAST::Analysis->new( $self->app->data_handle('MGRAST')->db_handle );
my $type = [ ["function", "Function", ""],
["organism", "Organism", "taxon"],
["Subsystems", "SEED Subsystem", "hier2"],
["KO", "KEGG Orthology", "hier2"],
["COG", "COG", "hier1"],
["NOG", "NOG", "hier1"],
["metadata", "Metadata", "meta"]
];
my $meta = [ ["all", "All", "text"],
["project", "Project Name", "text"],
["name", "Metagenome Name", "text"],
["metagenome_id", "Metagenome ID", "text"],
["pi", "Principal Investigator", "text"],
["biome", "Biome", "select"],
["feature", "Feature", "select"],
["material", "Material", "select"],
["env_package", "Enviromental Package", "select"],
["country", "Country", "select"],
["latitude", "Latitude", "range"],
["longitude", "Longitude", "range"],
["location", "Location", "text"],
["altitude", "Altitude", "range"],
["depth", "Depth", "range"],
["temperature", "Temperature", "range"],
["ph", "pH", "range"],
["sequencing method", "Sequencing method", "select"],
["bp_count", "bp Count", "range"],
["sequence_count", "Sequence Count", "range"],
["alpha_diversity", "Alpha Diversity", "range"],
["drisee", "DRISEE Score", "range"]
];
my $taxon = [ ['name', 'Organism Name', "text"],
['tax_family', 'Family', "text"],
['tax_order', 'Order', "text"],
['tax_class', 'Class', "text"],
['tax_phylum', 'Phylum', "select"],
['tax_domain', 'Domain', "select"],
['ncbi_tax_id', 'NCBI Taxonomy ID', "text"]
];
my $match = { text => [ ["0_1", "contains"], ["0_0", "does not contain"], ["1_1", "is equal to"], ["1_0", "is not equal to"] ],
select => [ ["1_1", "is equal to"], ["1_0", "is not equal to"] ],
range => [ ["2_1", "is within range"], ["2_0", "is outside range"] ]
};
my $hier1 = [ ["level3", "Name or ID", "text"], ["level2", "Level 2", "select"], ["level1", "Level 1", "select"] ];
my $hier2 = [ ["level4", "Name or ID", "text"], ["level3", "Level 3", "text"], ["level2", "Level 2", "select"], ["level1", "Level 1", "select"] ];
$self->data('jobs', $jobs);
$self->data('jmap', $jmap);
$self->data('pmap', $pmap);
$self->data('mddb', $mddb);
$self->data('mgdb', $mgdb);
$self->data('type', $type);
$self->data('meta', $meta);
$self->data('taxon', $taxon);
$self->data('match', $match);
$self->data('hier1', $hier1);
$self->data('hier2', $hier2);
$self->data('max_results', 500000);
return 1;
}
=pod
=item * B<output> ()
Returns the html output of the MetagenomeSelect page.
=cut
sub output {
my ($self) = @_;
my $msg_count = 1;
my $cgi = $self->app->cgi;
my $qnum = $cgi->param('qnum') || 0;
my $mode = $cgi->param('smode') || 1;
my $torun = $cgi->param('run_now') || 0;
my $mddb = $self->data('mddb');
my $mgdb = $self->data('mgdb');
my $match = $self->data('match');
my $modes = { 1 => "dSimple", 2 => "dAdvanced" };
my $extras = {};
# my $extras = { "metadata" => { "biome" => $self->get_unique_job_info('biome'),
# "feature" => $self->get_unique_job_info('feature'),
# "material" => $self->get_unique_job_info('material'),
# "env_package" => $mddb->get_cv_list('env_package'),
# "country" => $self->get_unique_job_info('country'),
# "sequencing method" => $mddb->get_cv_list('seq_meth') },
# "organism" => { "tax_phylum" => $mgdb->ach->get_taxonomy4level("tax_phylum"),
# "tax_domain" => $mgdb->ach->get_taxonomy4level("tax_domain") },
# "Subsystems" => { "level1" => $mgdb->ach->get_level4ontology("Subsystems","level1"),
# "level2" => $mgdb->ach->get_level4ontology("Subsystems","level2") },
# "KO" => { "level1" => $mgdb->ach->get_level4ontology("KO","level1"),
# "level2" => $mgdb->ach->get_level4ontology("KO","level2") },
# "COG" => { "level1" => $mgdb->ach->get_level4ontology("COG","level1"),
# "level2" => $mgdb->ach->get_level4ontology("COG","level2") },
# "NOG" => { "level1" => $mgdb->ach->get_level4ontology("NOG","level1"),
# "level2" => $mgdb->ach->get_level4ontology("NOG","level2") }
# };
my $taxon_sel = $self->build_select("sel_extra", $self->data('taxon'));
my $hier1_sel = $self->build_select("sel_extra", $self->data('hier1'));
my $hier2_sel = $self->build_select("sel_extra", $self->data('hier2'));
my $metad_sel = $self->build_select("sel_extra", $self->data('meta'));
my $txt_match = $self->build_select("sel_match", $match->{text});
my $sel_match = $self->build_select("sel_match", $match->{select});
my $rng_match = $self->build_select("sel_match", $match->{range});
my $text_sel = "<input name='txt_adv' type='text' value=''/>";
my $range_sel = "<input name='rng1_adv' type='text' value='' size='7'/> to <input name='rng2_adv' type='text' value='' size='7'/>";
my $adv_deflt = $self->get_advanced_search();
my (@adv_list, $adv_srch);
my $to_hide = ($mode == 2) ? $modes->{1} : $modes->{2};
if ($qnum && ($qnum > 0) && ($mode == 2)) {
foreach my $i (1..$qnum) {
last;
my $qtype = $cgi->param("type_q$i");
my $qmatch = $cgi->param("match_q$i");
my $qinput = uri_unescape( $cgi->param("input_q$i") );
my $qextra = $cgi->param("extra_q$i") || "";
push @adv_list, "query$i";
$adv_srch .= "<tr id='query$i'>" . $self->get_advanced_search($qtype, $qmatch, $qinput, $qextra, $extras) . "</tr>";
}
} else {
$qnum = 1;
push @adv_list, "query1";
$adv_srch = qq(<tr id='query1'>$adv_deflt</tr>);
}
my @ext_set = ();
foreach my $t (keys %$extras) {
last;
foreach my $e (keys %{$extras->{$t}}) {
push @ext_set, qq("${t}_$e" : ") . $self->build_select("sel_adv", $extras->{$t}{$e}) . qq(");
}
}
my $ext_json = "{ " . join(",", @ext_set) . " }";
my $qlist = "[ " . join(",", map {"'$_'"} @adv_list) . " ]";
my $stext = $cgi->param('text') || "";
my $stype = $cgi->param('type') || "metadata,function,organism";
if ($cgi->param('init_search')) {
$stext = $cgi->param('init_search');
$stype = "metadata,function,organism";
$torun = 1;
}
my $set_types = "";
my %types = map { $_, 1 } split(/,/, $stype);
if ((scalar(keys %types) < 3) && ($mode == 1)) {
$set_types .= exists($types{metadata}) ? '$("#metaSimple").attr("checked", true); ' : '$("#metaSimple").attr("checked", false); ';
$set_types .= exists($types{function}) ? '$("#funcSimple").attr("checked", true); ' : '$("#funcSimple").attr("checked", false); ';
$set_types .= exists($types{organism}) ? '$("#orgSimple").attr("checked", true); ' : '$("#orgSimple").attr("checked", false); ';
}
my $search_now = "";
if ($torun) {
if ($mode == 1) {
$search_now = "document.getElementById(\"bSimpleAll\").click();";
# $search_now = "simple_search('$stext', [" . join(',', map {"'$_'"} keys %types) . "]);";
}
elsif ($mode == 2) {
$search_now = '$("#dSimple").hide(); $("#dAdvanced").show(); adv_search();';
}
} elsif ($mode == 2) {
$search_now = "switch_mode(2);";
}
my $scripts = qq~
<script type="text/javascript" src="http://maps.google.com/maps/api/js?sensor=false"></script>
<script type="text/javascript" src="Html/js/config.js"></script>
<script type="text/javascript">
\$(document).ready( function() {
var qList = $qlist;
var qNum = $qnum;
var eMap = $ext_json;
switch_sub();
\$("#$to_hide").hide();
\$("#SwitchToAdv").live('click', function() { switch_mode(2); });
\$("#SwitchSearchType").live('click', function() {
if (\$("#dAdvanced").is(':visible')) {
switch_mode(1);
} else {
switch_mode(2);
}
});
\$("#tQuery select[name='sel_type']").live('change', function() {
var match_td = \$(this).parent().next();
var input_td = match_td.next();
var msg_td = input_td.next()
var extra_td = msg_td.next();
var subsel = "";
var msg_txt = 'within';
if (\$(this).val() == 'organism') {
subsel = "$taxon_sel";
} else if (\$(this).val() == 'function') {
msg_txt = "";
} else if ((\$(this).val() == 'Subsystems') || (\$(this).val() == 'KO')) {
subsel = "$hier2_sel";
} else if ((\$(this).val() == 'COG') || (\$(this).val() == 'NOG')) {
subsel = "$hier1_sel";
} else if (\$(this).val() == 'metadata') {
subsel = "$metad_sel";
}
match_td.html("$txt_match");
input_td.html("$text_sel");
msg_td.html(msg_txt);
extra_td.html(subsel);
});
\$("#tQuery select[name='sel_extra']").live('change', function() {
var input_td = \$(this).parent().prev().prev();
var match_td = input_td.prev();
var type_val = match_td.prev().find("select[name='sel_type']").val();
var opt_title = \$(this).children(":selected").attr('title');
if (opt_title == 'select') {
var eKey = type_val + '_' + \$(this).val();
if ( eMap.hasOwnProperty(eKey) ) {
input_td.html( eMap[eKey] );
match_td.html("$sel_match");
} else {
input_td.html("$text_sel");
match_td.html("$txt_match");
}
match_td.html("$sel_match");
} else if (opt_title == 'range') {
input_td.html("$range_sel");
match_td.html("$rng_match");
} else {
input_td.html("$text_sel");
match_td.html("$txt_match");
}
});
\$("#tQuery button[name='but_add']").live('click', function() {
qNum += 1;
var row = \$(this).parent().parent();
var idx = qList.indexOf(row.attr('id'));
var nid = "query" + qNum;
row.after("<tr id='" + nid + "'>$adv_deflt</tr>");
qList.splice(idx+1, 0, nid);
switch_sub();
});
\$("#tQuery button[name='but_sub']").live('click', function() {
if (qNum == 1) { return 0; }
qNum -= 1;
var row = \$(this).parent().parent();
var idx = qList.indexOf(row.attr('id'));
row.remove();
qList.splice(idx, 1);
switch_sub();
});
function switch_sub() {
if (qList.length == 1) {
\$("#"+qList[0]+" button[name='but_sub']").attr('disabled', true);
} else if (qList.length > 1) {
\$("#"+qList[0]+" button[name='but_sub']").attr('disabled', false);
}
}
\$("#bSimpleAll").click( function() {
var sText = clean_text( \$("#tSimpleAll").val() );
var sTypes = [];
if ( \$("#metaSimple").attr('checked') ) { sTypes.push('metadata'); }
if ( \$("#funcSimple").attr('checked') ) { sTypes.push('function'); }
if ( \$("#orgSimple").attr('checked') ) { sTypes.push('organism'); }
var result = document.getElementById("dResult");
result.innerHTML = "<img src='./Html/loading-green.gif' />";
queryAPI({type: sTypes, query: sText, result: "dResult"});
});
\$("#bSimpleMeta").click( function() {
var sText = clean_text( \$("#tSimpleMeta").val() );
switch_mode(1);
\$("#tSimpleMeta").val('')
\$("#tSimpleAll").val(sText);
\$("#metaSimple").attr('checked', true);
\$("#funcSimple").attr('checked', false);
\$("#orgSimple").attr('checked', false);
var result = document.getElementById("dResult");
result.innerHTML = "<img src='./Html/loading-green.gif' />";
queryAPI({type: ['metadata'], query: sText, result: "dResult"});
});
\$("#bSimpleFunc").click( function() {
var sText = clean_text( \$("#tSimpleFunc").val() );
switch_mode(1);
\$("#tSimpleFunc").val('')
\$("#tSimpleAll").val(sText);
\$("#metaSimple").attr('checked', false);
\$("#funcSimple").attr('checked', true);
\$("#orgSimple").attr('checked', false);
var result = document.getElementById("dResult");
result.innerHTML = "<img src='./Html/loading-green.gif' />";
queryAPI({type: ['function'], query: sText, result: "dResult"});
});
\$("#bSimpleOrg").click( function() {
var sText = clean_text( \$("#tSimpleOrg").val() );
switch_mode(1);
\$("#tSimpleOrg").val('')
\$("#tSimpleAll").val(sText);
\$("#metaSimple").attr('checked', false);
\$("#funcSimple").attr('checked', false);
\$("#orgSimple").attr('checked', true);
var result = document.getElementById("dResult");
result.innerHTML = "<img src='./Html/loading-green.gif' />";
queryAPI({type: ['organism'], query: sText, result: "dResult"});
});
\$("#bAdvanced").click( function() {
adv_search();
});
function adv_search() {
var param = ['smode=2', 'qnum=' + qNum];
var rnum = 0;
var msg = "";
\$("#tQuery tr").each( function() {
rnum += 1;
var aText = '';
var inputs = \$(this).find("td[name='input_adv']").children("*[name\$='_adv']");
if ( inputs.length == 2 ) {
var ranges = [];
inputs.each(function(){ ranges.push( clean_text(\$(this).val()) ); });
if ( isNaN(ranges[0]) || isNaN(ranges[1]) ) {
msg = "<p><b style='color:red'>Range values ('" + ranges.join("' and '") + "') must be numeric only.</b></p>";
} else {
if (parseFloat(ranges[0]) > parseFloat(ranges[1])) { ranges = ranges.reverse(); }
aText = ranges.join("_");
}
} else {
aText = clean_text( inputs.val() );
msg = test_text( aText );
}
\$("#dResult").html(msg);
var extra = \$(this).find("td[name='extra']");
if ( extra.find("select").val() ) {
param.push( 'extra_q' + rnum + '=' + extra.find("select").val() );
}
param.push( 'input_q' + rnum + '=' + encodeURIComponent(aText) );
param.push( 'match_q' + rnum + '=' + \$(this).find("select[name='sel_match']").val() );
param.push( 'type_q' + rnum + '=' + \$(this).find("select[name='sel_type']").val() );
});
if ( ! \$("#showGroup").attr('checked') ) { param.push('show_match=1'); }
if (msg == "") { execute_ajax( 'get_advanced_table', 'dResult', param.join("&") ); }
}
function simple_search( sText, sTypes ) {
var cText = clean_text(sText);
var msg = test_text(cText);
var items = cText.split(",");
var types = sTypes.join(',');
var param = [ 'smode=1', 'text=' + encodeURIComponent(cText), 'type=' + types ];
if ((msg == "") && (items.length > 1)) {
msg = "<p><span style='color:red'>Only one search term is allowed.</span><br>" +
"To use multiple search terms please use <a id='SwitchToAdv' style='cursor:pointer;'>advanced search</a> mode. " +
"Please select which of the following searches you wish to run:<blockquote>";
for (var i=0; i<items.length; i++) {
var cItem = clean_text(items[i]);
msg += "<a href='metagenomics.cgi?page=MetagenomeSearch&run_now=1&smode=1&type=" + types + "&text=" + cItem + "'>" + cItem + "</a>, ";
}
msg += "</blockquote></p>";
}
\$("#dResult").html(msg);
if ((msg == "") && (items.length == 1)) {
if (cText.match(/^mgm\\d+\\.\\d+\$/)) {
window.location = "?page=MetagenomeOverview&metagenome="+cText.substring(3);
} else if (cText.match(/^mgp\\d+\$/)) {
window.location = "?page=MetagenomeProject&project="+cText.substring(3);
} else {
execute_ajax( 'get_simple_table', 'dResult', param.join("&") );
}
}
}
function switch_mode( aMode ) {
if (aMode == 1) {
\$("#dAdvanced").hide();
\$("#dSimple").show();
\$("#SwitchSearchType").html("Advanced Search");
\$("#tSimpleAll").val('');
} else if (aMode == 2) {
\$("#dSimple").hide();
\$("#dAdvanced").show();
\$("#SwitchSearchType").html("Simple Search");
\$("#tQuery").html("<tr id='query1'>$adv_deflt</tr>");
qList = ['query1'];
qNum = 1;
\$("#query1 button[name='but_sub']").attr('disabled', true);
}
}
function test_text( aText ) {
if (aText == '') {
return "<p><b style='color:red'>Empty text field detected. Please make sure all text fields are filled out.</b></p>";
} else if (aText.length < 3) {
return "<p><b style='color:red'>Search string '"+aText+"' is to short. Please enter a longer query.</b></p>";
} else {
return "";
}
}
function clean_text( aText ) {
return \$.trim( aText.replace(/\\s+/g, ' ') );
}
$set_types
$search_now
});
</script>
~ . $self->application->component('sAjax')->output;
my $html = '';
my $help = $self->app->component('help');
$help->add_tooltip( 'match_help', 'Uncheck boxes to restrict the search to only values you are interested in.<br>This can be useful if you are searching on common terms.' );
$help->add_tooltip( 'groupby_help', 'Unchecking this option will cause the search to display a row for every match to the metagenomes instead of aggregating them together.<br>Often this will result in thousands of matches in the case of protein and organism searches.' );
$html .= $help->output();
my @colors = ('#3674D9', '#52B95E', '#FF9933');
$html .= "<div id='dSimple' style='padding-top:20px; float: left;'>";
$html .= "<label for='tSimpleAll' style='font-size: 14px; font-weight:bold;'>Search for Metagenomes<br></label>";
$html .= "<input id='tSimpleAll' type='text' placeholder='by metadata / MG-RAST id (name, biome, project name, 4441137.3), function or organism...' value='".$stext."' style='width:580px;' onkeyup='if (event.keyCode == 13) { document.getElementById(\"bSimpleAll\").click(); }'>";
$html .= " <button type='button' id='bSimpleAll'>Search</button><br>";
$html .= "<div style='padding-top: 5px; font:12px sans-serif;'>Match<span id='share_help' onmouseover='hover(event, \"match_help\", " . $help->id . ")'><sup style='cursor: help;'>[?]</sup></span>";
$html .= " <input type='checkbox' name='resType' id='metaSimple' checked/> <span style='font-weight: bold; color: ".$colors[0]."'>metadata / MG-RAST id</span>";
$html .= " <input type='checkbox' name='resType' id='funcSimple' checked/> <span style='font-weight: bold; color: ".$colors[1]."'>function</span>";
$html .= " <input type='checkbox' name='resType' id='orgSimple' checked/> <span style='font-weight: bold; color: ".$colors[2]."'>organism</span></div>";
$html .= "</div>";
$html .= "<div id='dAdvanced' style='padding-top:20px; visiblity: hidden; float: left;'>";
$html .= "<label for='tAdvanced' style='font-size: 14px; font-weight:bold;'>Search for Metagenomes containing all the following condition(s):<br></label>";
$html .= "<table id='tQuery'>".$adv_srch."</table>";
$html .= "<fieldset style='width: 413px; margin-top: 10px; margin-bottom: 10px;'>";
$html .= "<legend>Results options</legend>";
$html .= "<input type='checkbox' id='showGroup' checked/> Group by metagenome<span id='share_help' onmouseover='hover(event, \"groupby_help\", " . $help->id . ")'><sup style='cursor: help;'>[?]</sup></span>";
$html .= "</fieldset>";
$html .= "<button id='bAdvanced'>Search</button> or <a id='SwitchSearchType' style='cursor:pointer;'>Simple Search</a>";
$html .= "</div>";
$html .= "<div class='clear' style='height: 20px;'></div><div class='clear' style='height: 25px;'></div>";
$html .= "<div>";
$html .= "<div style='margin:5px; padding: 20px; float:left; width:260; height:60px; border: 2px dashed ".$colors[0].";'>";
$html .= "<label for='tSimpleMeta' style='font-weight:bold;'>Find by metadata / mg-rast id<br></label>";
$html .= "<input id='tSimpleMeta' type='text' placeholder='MG-RAST id, name, biome, project name...' value='' style='width:260px;' onkeyup='if (event.keyCode == 13) { document.getElementById(\"bSimpleMeta\").click(); }'><br><button id='bSimpleMeta'>Search</button>";
$html .= "</div>";
$html .= "<div style='margin:5px; padding: 20px; float:left; width:260; height:60px; border: 2px dashed ".$colors[1].";'>";
$html .= "<label for='tSimpleFunc' style='font-weight:bold;'>Find by function or functional category<br></label>";
$html .= "<input id='tSimpleFunc' type='text' placeholder='BatE, 3.4.11.9, RNA Metabolism...' value='' style='width:260px;' onkeyup='if (event.keyCode == 13) { document.getElementById(\"bSimpleFunc\").click(); }'><br><button id='bSimpleFunc'>Search</button>";
$html .= "</div>";
$html .= "<div style='margin:5px; padding: 20px; float:left; width:260; height:60px; border: 2px dashed ".$colors[2].";'>";
$html .= "<label for='tSimpleOrg' style='font-weight:bold;'>Find by organism<br></label>";
$html .= "<input id='tSimpleOrg' type='text' placeholder='Firmicutes, Mobiluncus curtisii...' value='' style='width:260px;' onkeyup='if (event.keyCode == 13) { document.getElementById(\"bSimpleOrg\").click(); }'><br><button id='bSimpleOrg'>Search</button>";
$html .= "</div>";
$html .= "</div>";
$html .= "<div class='clear' style='height: 25px;'></div>";
$html .= "<div id='dResult'></div>";
$html .= "<div class='clear' style='height: 15px;'></div>";
return $scripts . $html;
}
sub get_simple_table {
my ($self) = @_;
my $hs = HTML::Strip->new();
my $text = uri_unescape($self->app->cgi->param('text'));
if ($text) {
$text = $hs->parse($text);
}
my $types = $self->app->cgi->param('type') || "metadata,function,organism";
my $table = $self->application->component('sResult');
my $jobs = $self->data('jobs');
my $mgdb = $self->data('mgdb');
my $mgs = [ keys %$jobs ];
my %type_set = map {$_, 1} split(/,/, $types);
my $tomany = "<p><b style='color:red'>Your search request returned to many results.<br>Please try again with more specific criteria.</b></p>";
my $show_md = [['biome','feature','material','country','location','pi'], ['Biome','Feature','Material','Country','Location','PI']];
my %type_map = map { $_->[0], $_->[1] } @{ $self->data('type') };
my %meta_map = map { $_->[0], $_->[1] } @{ $self->data('meta') };
my @data = ();
my $uniq_job = {};
my $uniq_hit = {};
if (exists $type_set{'function'}) {
### jobs function search
my $ffuncs = $self->search_annotation('function', [['name',$text,0,1]]);
if(scalar(keys %$ffuncs) > 0) {
my $fwhere = $self->get_where_str(["id IN (".join(",", keys %$ffuncs).")"], 1);
my $jd_func = $self->get_jobdata('function', $fwhere, $mgs, $ffuncs);
foreach my $j ( keys %$jd_func ) {
my $hits = {};
foreach my $r ( @{$jd_func->{$j}} ) {
$hits->{ $ffuncs->{$r} } = 1;
$uniq_hit->{function}{ $ffuncs->{$r} } = 1;
}
$uniq_job->{$j}{function} = scalar(keys %$hits);
}
}
### jobs ontology search
my $ofuncs = $self->search_annotation('ontology', [['name',$text,0,1]]);
if(scalar(keys %$ofuncs) > 0) {
my $owhere = $self->get_where_str(["id IN (".join(",", keys %$ofuncs).")"], 1);
my $jd_ont = $self->get_jobdata('ontology', $owhere, $mgs, $ofuncs);
foreach my $j ( keys %$jd_ont ) {
my $hits = {};
foreach my $r ( @{$jd_ont->{$j}} ) {
$hits->{ $ofuncs->{$r} } = 1;
$uniq_hit->{function}{ $ofuncs->{$r} } = 1;
}
$uniq_job->{$j}{function} += scalar(keys %$hits);
}
}
}
if (exists $type_set{'organism'}) {
### jobs organism search
my $orgs = $self->search_annotation('organism', [['name',$text,0,1]]);
if(scalar(keys %$orgs) > 0) {
my $where = $self->get_where_str(["id IN (".join(",", keys %$orgs).")", "source=".$mgdb->_src_id->{'M5NR'}], 1);
my $jd_org = $self->get_jobdata('organism', $where, $mgs, $orgs);
foreach my $j ( keys %$jd_org ) {
my $hits = {};
foreach my $r ( @{$jd_org->{$j}} ) {
$hits->{ $orgs->{$r} } = 1;
$uniq_hit->{organism}{ $orgs->{$r} } = 1;
}
$uniq_job->{$j}{organism} = scalar(keys %$hits);
}
}
}
if (exists $type_set{'metadata'}) {
### metadata search
my $md_jobs = $self->search_metadata([['value',$text,0,1]], $mgs);
foreach my $j ( keys %$md_jobs ) {
my $hits = {};
foreach my $r ( @{$md_jobs->{$j}} ) {
my $val = exists($r->{value}) ? $r->{value} : "";
$hits->{$val} = 1;
$uniq_hit->{metadata}{$val} = 1;
}
$uniq_job->{$j}{metadata} = scalar(keys %$hits);
}
### jobcache search
foreach my $j ( keys %$jobs ) {
my $qtext = quotemeta($text);
my $mg_id = $jobs->{$j}{metagenome_id};
my $gname = $jobs->{$j}{name};
my $pname = $jobs->{$j}{project} || '';
if ($mg_id =~ /$qtext/i) {
$uniq_job->{$j}{metadata} += 1;
$uniq_hit->{metadata}{$mg_id} = 1;
}
if ($gname =~ /$qtext/i) {
$uniq_job->{$j}{metadata} += 1;
$uniq_hit->{metadata}{$gname} = 1;
}
if ($pname && ($pname =~ /$qtext/i)) {
$uniq_job->{$j}{metadata} += 1;
$uniq_hit->{metadata}{$pname} = 1;
}
}
}
if (scalar(keys %$uniq_job) > 0) {
foreach my $j (keys %$uniq_job) {
my $row = [ $j,
'<a href="?page=MetagenomeOverview&metagenome='.$jobs->{$j}{metagenome_id}.'">'.$jobs->{$j}{name}.'</a>',
$jobs->{$j}{metagenome_id},
$jobs->{$j}{project} ? '<a href="?page=MetagenomeProject&project='.$jobs->{$j}{project_id}.'">'.$jobs->{$j}{project}.'</a>' : '',
$jobs->{$j}{public} ];
if (scalar(keys %type_set) > 1) {
push @$row, join(", ", sort keys %{$uniq_job->{$j}});
}
my @counts = map { $uniq_job->{$j}{$_} } sort keys %{$uniq_job->{$j}};
push @$row, join(", ", sort @counts);
foreach my $t (@{$show_md->[0]}) {
push @$row, $jobs->{$j}{$t};
}
push @data, $row;
}
}
unless (@data > 0) { return "<p><b style='color:red'>No Metagenomes found for the above search criteria.</b></p>"; }
my $hit_str = "Found " . scalar(keys %$uniq_job) . " metagenomes containing ";
if (scalar(keys %type_set) > 1) {
$hit_str .= "these unique matches:<br><table style='padding-left:20px;'>";
foreach (sort keys %type_set) {
$hit_str .= "<tr><td>" . (exists($uniq_hit->{$_}) ? scalar(keys %{$uniq_hit->{$_}}) : 0) . "</td><td>" . $type_map{$_} . "</td></tr>";
}
$hit_str .= "</table>";
} else {
$hit_str .= (exists($uniq_hit->{$types}) ? scalar(keys %{$uniq_hit->{$types}}) : 0) . " $types matches.";
}
my $cols = [ { name => 'Job Number', visible => 0, sortable => 1 },
{ name => 'Metagenome', filter => 1, operator => 'like', sortable => 1 },
{ name => 'MG-RAST ID', filter => 1, operator => 'like', visible => 1 },
{ name => 'Project' , filter => 1, operator => 'combobox', sortable => 1 },
{ name => 'Public' , filter => 1, operator => 'combobox' } ];
if (scalar(keys %type_set) > 1) {
push @$cols, { name => 'Match Types', filter => 1, operator => 'combobox', sortable => 1 };
}
push @$cols, { name => 'Match Counts', sortable => 1 };
foreach my $md ( @{$show_md->[1]} ) {
my $col = { name => $md, filter => 1, operator => 'combobox', sortable => 1 };
push @$cols, $col;
}
my $tid = $table->id();
$table->show_top_browse(1);
$table->show_bottom_browse(1);
$table->items_per_page(25);
$table->show_select_items_per_page(1);
$table->show_column_select(1);
$table->show_export_button({title => "download results table", strip_html => 1});
$table->show_clear_filter_button(1);
$table->other_buttons([ qq(<button onclick="var mgs = column_extract_data($tid,0,0,1); execute_ajax('get_mg_map','dResult2','mg_set='+mgs);">map metagenomes</button>), qq(<button onclick="var mgs = column_extract_data($tid,0,0,1); execute_ajax('get_mg_col','dResult2','mg_set='+mgs);">create collection</button>) ]);
$table->columns($cols);
$table->data(\@data);
return "<div id='dResult2'></div><p>$hit_str</p>" . $table->output();
}
sub get_advanced_table {
my ($self) = @_;
my $hs = HTML::Strip->new();
my $cgi = $self->app->cgi;
my $table = $self->application->component('sResult');
my $qnum = $cgi->param('qnum');
my $show = $cgi->param('show_match') || 0;
my $jobs = $self->data('jobs');
my $mgdb = $self->data('mgdb');
my $tomany = "<p><b style='color:red'>Your search request returned to many results.<br>Please try again with more specific criteria.</b></p>";
my $empty = "<p><b style='color:red'>No Metagenomes found for the above selected criteria.</b></p>";
my $show_md = [['biome','feature','material','country','location','pi'], ['Biome','Feature','Material','Country','Location','PI']];
my $max = 0;
my $limit = $self->data('max_results');
my $type_map = {};
my $hier_map = {};
my $extra_map = {};
foreach my $t (@{ $self->data('type') }) {
if ($t->[2]) {
my %tmp = map { $_->[0], $_->[1] } @{ $self->data($t->[2]) };
$hier_map->{$t->[0]} = $t->[2];
$extra_map->{$t->[0]} = \%tmp;
}
$type_map->{$t->[0]} = $t->[1];
}
### parse cgi params
my $c_order = []; # [ cat1, cat2, .. ] , based on mapping from type & extra
my $searches = {}; # type => extra => [ [text, eql, has] ]
foreach my $i (1..$qnum) {
my $type = $cgi->param("type_q$i");
my $input = uri_unescape($cgi->param("input_q$i"));
if ($input) {
$input = $hs->parse($input);
}
my $extra = $cgi->param("extra_q$i") || $type;
my ($eql, $has) = split(/_/, $cgi->param("match_q$i"));
$hs->eof;
if ($type eq "metadata") {
if ($extra eq "all") {
push @$c_order, $extra_map->{$type}{all} . " " . $type_map->{$type};
}
else {
push @$c_order, $extra_map->{$type}{$extra};
}
}
elsif ($type eq "function") {
push @$c_order, $type_map->{$type};
$extra = $type;
}
else {
push @$c_order, $type_map->{$type} . " : " . $extra_map->{$type}{$extra};
}
push @{ $searches->{$type}{$extra} }, [$input, $eql, $has];
}
## uniquify order
my %seen = ();
my @cats = ();
foreach my $cat (@$c_order) {
if (exists $seen{$cat}) { next; }
push @cats, $cat;
$seen{$cat} = 1;
}
my $results = {}; ### jobid => cat => { val }
my $cur_mgs = [ keys %$jobs ];
foreach my $type (keys %$searches) {
if ($type eq 'metadata') {
if (exists $searches->{$type}{all}) {
# search all metadata dbs using 'OR' matching
my $cat = $extra_map->{$type}{all} . " " . $type_map->{$type};
my $srch_info = $searches->{$type}{all};
my $to_search = $self->get_search_list({value => $srch_info});
my $md_data = $self->search_metadata($to_search, $cur_mgs);
foreach my $j (keys %$md_data) {
foreach my $r (@{$md_data->{$j}}) {
my $name = $r->{cat} . ": " . $r->{tag};
if ($max > $limit) { return $tomany; }
$results->{$j}{$cat}{ "$name<br>" . $r->{value} } = 1;
$max += 1;
}
}
my %merge = map { $_, 1 } keys %$md_data;
foreach my $jc_item (('project', 'name', 'metagenome_id')) {
$to_search = $self->get_search_list({$jc_item => $srch_info});
my $jc_data = $self->search_jobcache($to_search, $cur_mgs);
foreach my $j (keys %$jc_data) {
if ($max > $limit) { return $tomany; }
$results->{$j}{$cat}{ "$jc_item<br>" . $jc_data->{$j}{$jc_item} } = 1;
$max += 1;
$merge{$j} = 1;
}
}
@$cur_mgs = keys %merge;
unless ($cur_mgs && (@$cur_mgs > 0)) { return $empty; }
}
my %remaining = map { $_, $searches->{$type}{$_} } grep {$_ !~ /all/} keys %{$searches->{$type}};
if (scalar(keys %remaining) > 0) {
my $to_search = $self->get_search_list($searches->{$type});
my %tag_set = map { $_->[0], 1 } @$to_search;
my $jdata = $self->search_jobcache($to_search, $cur_mgs);
foreach my $j (keys %$jdata) {
foreach my $t (keys %tag_set) {
next unless (exists($jdata->{$j}{$t}) && exists($extra_map->{$type}{$t}));
my $cat = $extra_map->{$type}{$t};
if ($max > $limit) { return $tomany; }
$results->{$j}{$cat}{ $jdata->{$j}{$t} } = 1;
$max += 1;
}
}
@$cur_mgs = keys %$jdata;
unless ($cur_mgs && (@$cur_mgs > 0)) { return $empty; }
}
}
elsif ($type eq "function") {
my $to_search = $self->get_search_list({'name' => $searches->{$type}{$type}});
my $funcs = $self->search_annotation($type, $to_search);
if(scalar(keys %$funcs) > 0) {
my $where = $self->get_where_str(["id IN (".join(",", keys %$funcs).")"], 1);
my $jdata = $self->get_jobdata($type, $where, $cur_mgs);
foreach my $j (keys %$jdata) {
foreach my $r (@{$jdata->{$j}}) {
if ($max > $limit) { return $tomany; }
$results->{$j}{ $type_map->{$type} }{ $funcs->{$r} } = 1;
$max += 1;
}
}
@$cur_mgs = keys %$jdata;
unless ($cur_mgs && (@$cur_mgs > 0)) { return $empty; }
}
}
elsif ($type eq "organism") {
foreach my $tax (keys %{$searches->{$type}}) {
my $to_search = [];
my $name_map = {};
my $org_data = {};
if ($tax eq 'name') {
$to_search = $self->get_search_list({'name' => $searches->{$type}{$tax}});
$name_map = $self->search_annotation($type, $to_search);
}
elsif ($tax eq 'ncbi_tax_id') {
my @taxs = map { $_->[0] } @{$searches->{$type}{$tax}};
$name_map = $self->data('mgdb')->organisms_for_taxids(\@taxs);
}
else {
$to_search = $self->get_search_list({$tax => $searches->{$type}{$tax}});
$name_map = $self->search_taxonomy($to_search, $tax);
}
if (scalar(keys %$name_map) > 0) {
my $cat = $type_map->{$type} . " : " . $extra_map->{$type}{$tax};
my $where = $self->get_where_str(["id IN (".join(",", keys %$name_map).")", "source=".$mgdb->_src_id->{'M5NR'}], 1);
$org_data = $self->get_jobdata('organism', $where, $cur_mgs);
foreach my $j (keys %$org_data) {
foreach my $r (@{$org_data->{$j}}) {
if ($max > $limit) { return $tomany; }
$results->{$j}{$cat}{ $name_map->{$r} } = 1;
$max += 1;
}
}
}
@$cur_mgs = keys %$org_data;
unless ($cur_mgs && (@$cur_mgs > 0)) { return $empty; }
}
}
else {
my $end = ($hier_map->{$type} eq 'hier2') ? "level4" : "level3";
foreach my $lvl (keys %{$searches->{$type}}) {
my $to_search = [];
my $name_map = {};
my $func_data = {};
if ($lvl eq $end) {
$to_search = $self->get_search_list({'name' => $searches->{$type}{$lvl}});
$name_map = $self->search_annotation('ontology', $to_search);
}
else {
$to_search = $self->get_search_list({$lvl => $searches->{$type}{$lvl}});
$name_map = $self->search_ontology($to_search, $lvl, $type);
}
if (scalar(keys %$name_map) > 0) {
my $cat = $type_map->{$type} . " : " . $extra_map->{$type}{$lvl};
my $where = $self->get_where_str(["id IN (".join(",", keys %$name_map).")", "source=".$mgdb->_src_id->{$type}], 1);
$func_data = $self->get_jobdata('ontology', $where, $cur_mgs);
foreach my $j (keys %$func_data) {
foreach my $r (@{$func_data->{$j}}) {
if ($max > $limit) { return $tomany; }
$results->{$j}{$cat}{ $name_map->{$r} } = 1;
$max += 1;
}
}
}
@$cur_mgs = keys %$func_data;
unless ($cur_mgs && (@$cur_mgs > 0)) { return $empty; }
}
}
}
## keep only jobs that hit all search criteria
my %hmap = ();
my %final = ();
JOB: foreach my $j (@$cur_mgs) {
my @tmp = ();
foreach my $c (@cats) {
unless (exists($results->{$j}{$c}) && (scalar(keys %{$results->{$j}{$c}}) > 0)) {
$results->{$j} = {};
next JOB;
}
foreach my $t (keys %{$results->{$j}{$c}}) {
$hmap{$c}{$t} = 1;
}
push @tmp, [ keys %{$results->{$j}{$c}} ];
}
$final{$j} = \@tmp;
$results->{$j} = {};
}
my @hits = map { [scalar(keys %{$hmap{$_}}), $_] } @cats;
my $hit_str = "Found " . scalar(keys %final) . " metagenomes containing ";
if (@hits > 1) {
$hit_str .= "these unique matches:<br><table style='padding-left:20px;'>";
foreach (@hits) {
$hit_str .= qq(<tr><td>$_->[0]</td><td>$_->[1]</td></tr>);
}
$hit_str .= "</table>";
} else {
$hit_str .= qq($hits[0][0] $hits[0][1] matches.);
}
%hmap = ();
## build data with all category combinations
$max = 0;
my @data = ();
foreach my $j (keys %final) {
my $job = $jobs->{$j};
my $num = scalar @{ $final{$j} };
my @set = ();
if (! $show) {
my $row = [ $j,
'<a href="?page=MetagenomeOverview&metagenome='.$job->{metagenome_id}.'">'.$job->{name}.'</a>',
$job->{metagenome_id},
$job->{project} ? '<a href="?page=MetagenomeProject&project='.$job->{project_id}.'">'.$job->{project}.'</a>' : '',
$job->{public} ];
foreach my $t ( @{$show_md->[0]} ) {
push @$row, $job->{$t};
}
push @data, $row;
next;
}
if ($num == 1) { @set = map {[$_]} @{ $final{$j}[0] }; }
elsif ($num > 1) { @set = $self->combinations( $final{$j} ); }
$final{$j} = [];
foreach my $line ( @set ) {
if ($max > $limit) { return $tomany; }
my $row = [ $j,
'<a href="?page=MetagenomeOverview&metagenome='.$job->{metagenome_id}.'">'.$job->{name}.'</a>',
$job->{metagenome_id},
$job->{project} ? '<a href="?page=MetagenomeProject&project='.$job->{project_id}.'">'.$job->{project}.'</a>' : '',
$job->{public},
@$line ];
foreach my $t ( @{$show_md->[0]} ) {
push @$row, $job->{$t};
}
push @data, $row;
$max += 1;
}
}
unless (@data > 0) { return $empty; }
## create table
my $cols = [ { name => 'Job' , visible => 0, sortable => 1 },
{ name => 'Metagenome', filter => 1, operator => 'like', sortable => 1 },
{ name => 'ID' , filter => 1, operator => 'like', visible => 0 },
{ name => 'Project' , filter => 1, operator => 'combobox', sortable => 1 },
{ name => 'Public' , filter => 1, operator => 'combobox' }
];
if ($show) {
foreach my $cat ( @cats ) {
push @$cols, { name => $cat, filter => 1, operator => 'like', sortable => 1 };
}
}
foreach my $md ( @{$show_md->[1]} ) {
my $col = { name => $md, filter => 1, operator => 'combobox', visible => ($show ? 0 : 1) };
push @$cols, $col;
}
my $tid = $table->id();
$table->show_top_browse(1);
$table->show_bottom_browse(1);
$table->items_per_page(25);
$table->show_select_items_per_page(1);
$table->show_column_select(1);
$table->show_export_button(1, {title => 'download results table', strip_html => 1});
$table->show_clear_filter_button(1);
$table->other_buttons([ qq(<button onclick="var mgs = column_extract_data($tid,0,0,1); execute_ajax('get_mg_map','dResult2','mg_set='+mgs);">map metagenomes</button>), qq(<button onclick="var mgs = column_extract_data($tid,0,0,1); execute_ajax('get_mg_col','dResult2','mg_set='+mgs);">create collection</button>) ]);
$table->columns($cols);
$table->data(\@data);
return "<div id='dResult2'></div><p>$hit_str</p>" . $table->output();
}
## recursive loop returns a 2d array of all combinations of original 2d array
sub combinations {
my ($self, $list) = @_;
unless ($list && @$list) { return; }
my @array = grep { $_ ne '' } @{ shift @$list };
my @subs = $self->combinations($list);
if (! @subs) {
return map { [$_] } @array;
}
my @out;
foreach my $item ( @array ) {
foreach my $sub ( @subs ) {
push @out, [ $item, @$sub ];
}
}
return @out;
}
sub get_mg_map {
my ($self) = @_;
my $jobs = $self->data('jobs');
my $set = $self->app->cgi->param('mg_set') || "";
my %mgs = map { $_, 1 } split(/~/, $set);
my $locs = {};
my @data = ();
foreach my $j (keys %mgs) {
my $bio = $jobs->{$j}{biome};
my $lat = $jobs->{$j}{latitude};
my $lng = $jobs->{$j}{longitude};
unless (defined($lat) && defined($lng)) { next; }
my $key = sprintf("%.0f",$lat) . "~" . sprintf("%.0f",$lng);
push @{ $locs->{$key} }, { id => $jobs->{$j}{metagenome_id}, name => $jobs->{$j}{name}, lat => $lat, lng => $lng, biome => $bio };
}
if (scalar(keys %$locs) == 0) {
return "<p><b style='color:red'>None of the selected metagenomes contain geographic coordinates.</b></p>";
}
foreach my $key (keys %$locs) {
my $num = 0;
my $td_c = "class='table_row'";
my $names = join(", ", map {$_->{id}} @{$locs->{$key}});
my $table = "<p><table class='table_table'>";
foreach my $mg ( @{$locs->{$key}} ) {
$num += 1;
my $tr_c = (($num % 2) == 0) ? "class='even_row'" : "class='odd_row'";
$table .= "<tr $tr_c><td $td_c>".$mg->{name}."</td><td $td_c>".$mg->{id}."</td><td $td_c>".$mg->{lat}."°, ".$mg->{lng}."°</td><td $td_c>".$mg->{biome}."</td></tr>";
}
$table .= "</table></p>";
push @data, "$key~$names~$table";
}
my $html = qq(
<div id='map_region'>
<button onclick="document.getElementById('map_region').style.display = 'none';">Remove Map</button>
<div id='map_canvas' style='width:100%; height:100%'></div>
<div id='map_data' style='display:none;'>) . join("^", @data) . qq(</div>
<img src='./Html/clear.gif' onload="create_google_map('map_canvas', 'map_data');">
</div>);
return $html;
}
sub get_mg_col {
my ($self) = @_;
my $set = $self->app->cgi->param('mg_set') || "";
return qq(
<table><tr>
<td>Enter Collection Name:</td>
<td style='padding-left:10px'>
<input id='col_name' name='col_name' type='text' value='' /></td>
<td style='padding-left:10px'>
<button onclick="var name = document.getElementById('col_name').value; execute_ajax('set_mg_col','dResult2','mg_set=$set&col_name='+name);">Submit</button></td>
</tr></table>
);
}
sub set_mg_col {
my ($self) = @_;
my $app = $self->application;
my $user = $app->session->user;
my $set = $app->cgi->param('mg_set') || "";
my $col = $app->cgi->param('col_name') || "";
my %ids = map { $_, 1 } split(/~/, $set);
if (! $user) {
return "<p><b style='color:red'>Must be logged in to create collection.</b></p>";
}
elsif ((! $set) || (scalar(keys %ids) == 0)) {
return "<p><b style='color:red'>No metagenomes selected for collection.</b></p>";
}
elsif (! $col) {
return "<p><b style='color:red'>No name entered for collection.</b></p>";
}
my $num = 0;
foreach my $id (keys %ids) {
my $job = $app->data_handle('MGRAST')->Job->init({ metagenome_id => $id });
unless(ref($job)) {
next;
}
my $jid = $job->job_id;
my $existing = $app->dbmaster->Preferences->get_objects( { application => $app->backend,
user => $user,
name => 'mgrast_collection',
value => $col."|".$jid } );
unless (scalar(@$existing)) {
$num += 1;
$app->dbmaster->Preferences->create( { application => $app->backend,
user => $user,
name => 'mgrast_collection',
value => $col."|".$jid } );
}
}
return "<p><b>Collection '$col' of $num metagenomes created.</b></p>";
}
### search functions: queries are all 'AND'
# $to_search = [ [column (string), text (string), equal (bool), has (bool)] ]
sub search_jobcache {
my ($self, $to_search, $jobs) = @_;
my %jdata = %{ $self->data('jobs') };
if ($jobs && (@$jobs > 0)) {
my %tmp = ();
foreach my $j (@$jobs) {
if (exists $jdata{$j}) { $tmp{$j} = $jdata{$j}; }
}
%jdata = %tmp;
}
foreach my $srch (@$to_search) {
my ($cat, $text, $eql, $has) = @$srch;
my %tmp = ();
foreach my $j (keys %jdata) {
unless (exists $jdata{$j}{$cat}) { next; }
my $val = $jdata{$j}{$cat};
next unless (defined($val) && ($val =~ /\S/));
if ($eql == 2) {
my @rng = split(/_/, $text);
if (($val =~ /^\s*([+-]?\d*\.?\d+)/) && (@rng == 2)) {
my $num = $1 * 1.0;
if ($has && ($rng[0] <= $num) && ($num <= $rng[1])) { $tmp{$j} = $jdata{$j}; }
elsif ((! $has) && ($num < $rng[0]) && ($rng[1] > $num)) { $tmp{$j} = $jdata{$j}; }
}
}
elsif ($eql && $has && ($val eq $text)) { $tmp{$j} = $jdata{$j}; }
elsif ($eql && (! $has) && ($val ne $text)) { $tmp{$j} = $jdata{$j}; }
elsif ((! $eql) && $has && ($val =~ /$text/i)) { $tmp{$j} = $jdata{$j}; }
elsif ((! $eql) && (! $has) && ($val !~ /$text/i)) { $tmp{$j} = $jdata{$j}; }
}
%jdata = %tmp;
}
return \%jdata;
}
sub search_metadata {
my ($self, $to_search, $jobs) = @_;
my $db_hdl = $self->app->data_handle('MGRAST')->db_handle;
my $all_jobs = $self->data('jobs');
my $data = {};
my $wstr = '';
if ($to_search && (@$to_search > 0)) {
my @where = map { $self->get_search_str('mysql', $_->[0], $_->[1], $_->[2], $_->[3]) } @$to_search;
$wstr = $self->get_where_str(\@where);
}
my $cdata = {};
my $pdata = {};
foreach my $row (@{$db_hdl->selectall_arrayref("SELECT collection, tag, value FROM MetaDataEntry".$wstr)}) {
next unless ($row->[2] && ($row->[2] =~ /\S/));
push @{ $cdata->{$row->[0]} }, { tag => $row->[1], value => $row->[2] };
}
foreach my $row (@{$db_hdl->selectall_arrayref("SELECT project, tag, value FROM ProjectMD".$wstr)}) {
next unless ($row->[2] && ($row->[2] =~ /\S/));
push @{ $pdata->{$row->[0]} }, { tag => $row->[1], value => $row->[2], cat => 'project' };
}
my @search_jobs = ($jobs && (@$jobs > 0)) ? @$jobs : keys %$all_jobs;
foreach my $j (@search_jobs) {
if (exists $pdata->{$all_jobs->{$j}{_id_project}}) {
push @{ $data->{$j} }, @{ $pdata->{$all_jobs->{$j}{_id_project}} };
}
if (exists $cdata->{$all_jobs->{$j}{_id_sample}}) {
map { $_->{cat} = 'sample' } @{ $cdata->{$all_jobs->{$j}{_id_sample}} };
push @{ $data->{$j} }, @{ $cdata->{$all_jobs->{$j}{_id_sample}} };
}
if (exists $cdata->{$all_jobs->{$j}{_id_library}}) {
map { $_->{cat} = 'library' } @{ $cdata->{$all_jobs->{$j}{_id_library}} };
push @{ $data->{$j} }, @{ $cdata->{$all_jobs->{$j}{_id_library}} };
}
if (exists $cdata->{$all_jobs->{$j}{_id_ep}}) {
map { $_->{cat} = 'enviroment' } @{ $cdata->{$all_jobs->{$j}{_id_ep}} };
push @{ $data->{$j} }, @{ $cdata->{$all_jobs->{$j}{_id_ep}} };
}
}
return $data;
# hash: job_id => [ {tag=>'tag', value=>'value', cat=>'category'} ]
}
sub get_jobdata {
my ($self, $type, $where, $jobs) = @_;
my $filter_jobs = 0;
my $mgdb = $self->data('mgdb');
my $max = $self->data('max_results');
my $data = {};
my $jmap = {};
if ($jobs && (@$jobs > 0)) {
$filter_jobs = 1;
%$jmap = map {$_, 1} @$jobs;
}
my $num = 0;
my $sql = "SELECT job, id FROM ".$mgdb->_jtbl->{$type}.$where." AND ".$mgdb->_qver;
foreach my $row (@{ $mgdb->_dbh->selectall_arrayref($sql) }) {
if ($filter_jobs && (! exists $jmap->{$row->[0]})) { next; }
if ($num >= $max) { last; }
push @{$data->{$row->[0]}}, $row->[1];
$num += 1;
}
return $data;
}
sub search_annotation {
my ($self, $type, $to_search) = @_;
my $mgdb = $self->data('mgdb');
my $data = {};
my $where = [];
if ($to_search && (@$to_search > 0)) {
@$where = map {$self->get_search_str('psql', $_->[0], $_->[1], $_->[2], $_->[3])} @$to_search;
}
my $sql = "SELECT _id, name FROM ".$mgdb->_atbl->{$type}.$self->get_where_str($where);
my $rows = $mgdb->_dbh->selectall_arrayref($sql);
if ($rows && (@$rows > 0)) {
%$data = map { $_->[0], $_->[1] } @$rows;
}
return $data; # ann_id => ann_name
}
sub search_taxonomy {
my ($self, $to_search, $tax) = @_;
my $data = {};
my $where = [];
if ($to_search && (@$to_search > 0)) {
@$where = map {$self->get_search_str('psql', $_->[0], $_->[1], $_->[2], $_->[3])} @$to_search;
}
push @$where, "ncbi_tax_id IS NOT NULL";
my $sql = "SELECT _id, $tax FROM organisms_ncbi".$self->get_where_str($where);
my $rows = $self->data('mgdb')->_dbh->selectall_arrayref($sql);
if ($rows && (@$rows > 0)) {
%$data = map { $_->[0], $_->[1] } @$rows;
}
return $data; # org_id => tax_level_name
}
sub search_ontology {
my ($self, $to_search, $lvl, $type) = @_;
my %data = ();
my ($idx) = $lvl =~ /(\d)$/; ## index of level is level# - 1
my $ontol = $self->data('mgdb')->get_hierarchy('ontology', $type, undef, 1);
foreach my $srch (@$to_search) {
my ($lvl, $text, $eql, $has) = @$srch;
foreach my $id (keys %$ontol) {
my $node = $ontol->{$id}->[$idx-1]; # index of level is level# - 1
my $func = $ontol->{$id}->[-1]; # func name is last in list
if (($type eq 'Subsystems') && ($idx == 3)) { $text =~ s/\s+/_/g; }
if ($eql && $has && ($node eq $text)) { $data{$id} = $node; }
elsif ($eql && (! $has) && ($node ne $text)) { $data{$id} = $node; }
elsif ((! $eql) && $has && ($node =~ /$text/i)) { $data{$id} = $node; }
elsif ((! $eql) && (! $has) && ($node !~ /$text/i)) { $data{$id} = $node; }
}
}
return \%data; # func_id => node_level_name
}
### helper functions
sub get_user_jobs {
my ($self, $mgs) = @_;
my $user = $self->app->session->user;
my $mgrast = $self->app->data_handle('MGRAST');
my $data = {};
map { $data->{$_->{job_id}} = $_ } @{ $mgrast->Job->fetch_browsepage_viewable($user, $mgs) };
my $sql = "SELECT parent, _id FROM MetaDataCollection WHERE type='ep' AND parent IS NOT NULL";
my $tmp = $mgrast->db_handle->selectall_arrayref($sql);
my %eps = map { $_->[0], $_->[1] } @$tmp;
$sql = "SELECT job_id, primary_project, sample, library FROM Job WHERE job_id IN (".join(",", keys %$data).")";
$tmp = $mgrast->db_handle->selectall_arrayref($sql);
foreach my $row (@$tmp) {
last;
$data->{$row->[0]}{_id_project} = $row->[1] ? $row->[1] : 0;
$data->{$row->[0]}{_id_sample} = $row->[2] ? $row->[2] : 0;
$data->{$row->[0]}{_id_library} = $row->[3] ? $row->[3] : 0;
$data->{$row->[0]}{_id_ep} = exists($eps{$data->{$row->[0]}{_id_sample}}) ? $eps{$data->{$row->[0]}{_id_sample}} : 0;
}
return $data;
}
sub get_unique_job_info {
my ($self, $item) = @_;
my $set = {};
my $all_jobs = $self->data('jobs');
map { $set->{ $all_jobs->{$_}{$item} } = 1 }
grep { exists($all_jobs->{$_}{$item}) && defined($all_jobs->{$_}{$item}) && ($all_jobs->{$_}{$item} ne '') }
keys %$all_jobs;
return [ sort keys %$set ];
}
sub get_where_str {
my ($self, $items, $no_max) = @_;
my @text;
unless ($items && (@$items > 0)) { return ""; }
foreach my $i (@$items) {
if ($i && ($i =~ /\S/)) {
push @text, $i;
}
}
my $max = " LIMIT " . $self->data('max_results');
if (@text == 1) {
return " WHERE " . $text[0] . ($no_max ? '' : $max);
} elsif (@text > 1) {
return " WHERE " . join(" AND ", @text) . ($no_max ? '' : $max);
} else {
return $no_max ? '' : $max;
}
}
sub get_search_str {
my ($self, $db, $col, $txt, $eql, $has) = @_;
unless ($col && $txt) { return ""; }
$txt =~ s/\\//g;
my $qtxt = '';
if ($db eq 'psql') {
$qtxt = $self->data('mgdb')->_dbh->quote($txt);
} elsif ($db eq 'mysql') {
$qtxt = $self->data('mgdb')->_jcache->quote($txt);
} else {
return "";
}
if (length($qtxt)) { $qtxt = substr($qtxt, 1, length($qtxt) - 2); }
if ($eql == 2) {
my @rng = split(/_/, $txt);
if (@rng != 2) { return ""; }
if ($has) { return "$col BETWEEN $rng[0] AND $rng[1]"; }
else { return "$col NOT BETWEEN $rng[0] AND $rng[1]"; }
}
elsif ($eql && $has) { return "$col = '$qtxt'"; }
elsif ($eql && (! $has)) { return "$col != '$qtxt'"; }
elsif ((! $eql) && $has) { return ($db eq 'psql') ? "$col ILIKE '$qtxt\%'" : "$col LIKE '\%$qtxt\%'"; }
elsif ((! $eql) && (! $has)) { return ($db eq 'psql') ? "$col NOT ILIKE '$qtxt\%'" : "$col NOT LIKE '\%$qtxt\%'"; }
}
sub get_advanced_search {
my ($self, $type, $match, $text, $extra, $extra_map) = @_;
my $d_txt = $text || "";
my $style = "style='padding-left:10px'";
my $match_type = "text";
my $input_html = "<input name='txt_adv' type='text' value='$d_txt'/>";
my $extra_html = "";
my $extra_msg = "";
if ($type) {
my @extras = map { $_->[2] } grep { $_->[0] eq $type } @{ $self->data('type') };
if ($extras[0] && (@extras == 1)) {
$extra_html = $self->build_select("sel_" . $extras[0], $self->data($extras[0]), $extra);
$extra_msg = "within";
if ($extra) {
my @e = grep { $_->[0] eq $extra } @{ $self->data($extras[0]) };
if ((@e == 1) && (scalar(@{$e[0]}) == 3) && (exists $self->data('match')->{$e[0][2]})) {
$match_type = $e[0][2];
}
}
}
}
if ($text && ($match_type eq 'range')) {
my @rngs = split(/_/, $text);
if (@rngs == 2) {
$input_html = "<input name='rng1_adv' type='text' value='$rngs[0]' size='7'/> to <input name='rng2_adv' type='text' value='$rngs[1]' size='7'/>";
}
} elsif ($extra && ($match_type eq 'select')) {
if (exists $extra_map->{$type}{$extra}) {
$input_html = $self->build_select("sel_adv", $extra_map->{$type}{$extra}, $text);
}
}
my $html = "<td>" . $self->build_select("sel_type", $self->data('type'), $type) . "</td>" .
"<td $style>" . $self->build_select("sel_match", $self->data('match')->{$match_type}, $match) . "</td>" .
"<td $style name='input_adv'>$input_html</td>" .
"<td $style>$extra_msg</td>" .
"<td $style name='extra'>$extra_html</td>" .
"<td $style><button name='but_add'><b>+</b></button></td>" .
"<td $style><button name='but_sub'><b>-</b></button></td>";
return $html;
}
sub build_select {
my ($self, $name, $list, $sel) = @_;
my $html = "<select name='$name'>";
foreach (@$list) {
if (ref($_)) {
my $title = (@$_ > 2) ? qq( title='$_->[2]') : "";
if ($sel && ($sel eq $_->[0])) {
$html .= qq(<option value='$_->[0]'$title selected='selected'>$_->[1]</option>);
} else {
$html .= qq(<option value='$_->[0]'$title>$_->[1]</option>);
}
} else {
if ($sel && ($sel eq $_)) {
$html .= qq(<option value='$_' selected='selected'>$_</option>);
} else {
$html .= qq(<option value='$_'>$_</option>);
}
}
}
return $html . "</select>";
}
sub get_search_list {
my ($self, $set) = @_;
my $to_search = [];
foreach my $cat ( keys %$set ) {
foreach my $srch ( @{$set->{$cat}} ) {
push @$to_search, [$cat, @$srch];
}
}
return $to_search;
}
sub require_css {
return [ "$Conf::cgi_url/Html/bootstrap.min.css" ];
}
sub require_javascript {
return [ "$Conf::cgi_url/Html/MetagenomeSearch.js" ];
}
| wilke/MG-RAST | src/MGRAST/lib/WebPage/MetagenomeSearch.pm | Perl | bsd-2-clause | 55,871 |
package AsposeTasksCloud::Object::TaskItemsResponse;
require 5.6.0;
use strict;
use warnings;
use utf8;
use JSON qw(decode_json);
use Data::Dumper;
use Module::Runtime qw(use_module);
use Log::Any qw($log);
use Date::Parse;
use DateTime;
use base "AsposeTasksCloud::Object::BaseObject";
#
#
#
#NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually.
#
my $swagger_types = {
'Items' => 'TaskItems',
'Code' => 'string',
'Status' => 'string'
};
my $attribute_map = {
'Items' => 'Items',
'Code' => 'Code',
'Status' => 'Status'
};
# new object
sub new {
my ($class, %args) = @_;
my $self = {
#
'Items' => $args{'Items'},
#
'Code' => $args{'Code'},
#
'Status' => $args{'Status'}
};
return bless $self, $class;
}
# get swagger type of the attribute
sub get_swagger_types {
return $swagger_types;
}
# get attribute mappping
sub get_attribute_map {
return $attribute_map;
}
1;
| asposetasks/Aspose_Tasks_Cloud | SDKs/Aspose.Tasks-Cloud-SDK-for-Perl/lib/AsposeTasksCloud/Object/TaskItemsResponse.pm | Perl | mit | 1,033 |
#!/usr/bin/perl -w
# ======================================================================================
# check_lsi_raid: Nagios/Icinga plugin to check LSI Raid Controller status
# --------------------------------------------------------------------------------------
# Created as part of a semester project at the University of Applied Sciences Hagenberg
# (http://www.fh-ooe.at/en/hagenberg-campus/)
#
# Copyright (c) 2013-2014:
# Georg Schoenberger (gschoenberger@thomas-krenn.com)
# Grubhofer Martin (s1110239013@students.fh-hagenberg.at)
# Scheipner Alexander (s1110239032@students.fh-hagenberg.at)
# Werner Sebastian (s1110239038@students.fh-hagenberg.at)
# Jonas Meurer (jmeurer@inet.de)
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, see <http://www.gnu.org/licenses/>.
# ======================================================================================
use strict;
use warnings;
use Getopt::Long qw(:config no_ignore_case);
our $VERBOSITY = 0;
our $VERSION = "2.1";
our $NAME = "check_lsi_raid: Nagios/Icinga plugin to check LSI Raid Controller status";
our $C_TEMP_WARNING = 80;
our $C_TEMP_CRITICAL = 90;
our $PD_TEMP_WARNING = 40;
our $PD_TEMP_CRITICAL = 45;
our $BBU_TEMP_WARNING = 50;
our $BBU_TEMP_CRITICAL = 60;
our $CV_TEMP_WARNING = 70;
our $CV_TEMP_CRITICAL = 85;
our ($IGNERR_M, $IGNERR_O, $IGNERR_P, $IGNERR_S, $IGNERR_B) = (0, 0, 0, 0, 0);
our $NOENCLOSURES = 0;
our $CONTROLLER = 0;
our $criticality = "critical";
use constant {
STATE_OK => 0,
STATE_WARNING => 1,
STATE_CRITICAL => 2,
STATE_UNKNOWN => 3,
};
# Header maps to parse logical and physical devices
our $LDMAP;
our @map_a = ('DG/VD','TYPE','State','Access','Consist','Cache','sCC','Size');
our @map_cc_a = ('DG/VD','TYPE','State','Access','Consist','Cache','Cac','sCC','Size');
our @pdmap_a = ('EID:Slt','DID','State','DG','Size','Intf','Med','SED','PI','SeSz','Model','Sp');
# Print command line usage to stdout.
sub displayUsage {
print "Usage: \n";
print " [ -h | --help ]
Display this help page\n";
print " [ -v | -vv | -vvv | --verbose ]
Sets the verbosity level.
No -v is the normal single line output for Nagios/Icinga, -v is a
more detailed version but still usable in Nagios. -vv is a
multiline output for debugging configuration errors or more
detailed information. -vvv is for plugin problem diagnosis.
For further information please visit:
http://nagiosplug.sourceforge.net/developer-guidelines.html#AEN39\n";
print " [ -V --version ]
Displays the plugin and, if available, the version if StorCLI.\n";
print " [ -C <num> | --controller <num> ]
Specifies a controller number, defaults to 0.\n";
print " [ -EID <ids> | --enclosure <ids> ]
Specifies one or more enclosure numbers, per default all enclosures. Takes either
an integer as additional argument or a commaseperated list,
e.g. '0,1,2'. With --noenclosures enclosures can be disabled.\n";
print " [ -LD <ids> | --logicaldevice <ids>]
Specifies one or more logical devices, defaults to all. Takes either an
integer as additional argument or a comma seperated list e.g. '0,1,2'.\n";
print " [ -PD <ids> | --physicaldevice <ids> ]
Specifies one or more physical devices, defaults to all. Takes either an
integer as additional argument or a comma seperated list e.g. '0,1,2'.\n";
print " [ -Tw <temp> | --temperature-warn <temp> ]
Specifies the RAID controller temperature warning threshold, the default
threshold is ${C_TEMP_WARNING}C.\n";
print " [ -Tc <temp> | --temperature-critical <temp> ]
Specifies the RAID controller temperature critical threshold, the default
threshold is ${C_TEMP_CRITICAL}C.\n";
print " [ -PDTw <temp> | --physicaldevicetemperature-warn <temp> ]
Specifies the disk temperature warning threshold, the default threshold
is ${PD_TEMP_WARNING}C.\n";
print " [ -PDTc <temp> | --physicaldevicetemperature-critical <temp> ]
Specifies the disk temperature critical threshold, the default threshold
is ${PD_TEMP_CRITICAL}C.\n";
print " [ -BBUTw <temp> | --bbutemperature-warning <temp> ]
Specifies the BBU temperature warning threshold, default threshold
is ${BBU_TEMP_WARNING}C.\n";
print " [ -BBUTc <temp> | --bbutemperature-critical <temp> ]
Specifies the BBU temperature critical threshold, default threshold
is ${BBU_TEMP_CRITICAL}C.\n";
print " [ -CVTw <temp> | --cvtemperature-warning <temp> ]
Specifies the CV temperature warning threshold, default threshold
is ${CV_TEMP_WARNING}C.\n";
print " [ -CVTc <temp> | --cvtemperature-critical <temp> ]
Specifies the CV temperature critical threshold, default threshold
is ${CV_TEMP_CRITICAL}C.\n";
print " [ -Im <count> | --ignore-media-errors <count> ]
Specifies the warning threshold for media errors per disk, the default
threshold is $IGNERR_M.\n";
print " [ -Io <count> | --ignore-other-errors <count> ]
Specifies the warning threshold for media errors per disk, the default
threshold is $IGNERR_O.\n";
print " [ -Ip <count> | --ignore-predictive-fail-count <count> ]
Specifies the warning threshold for media errors per disk, the default
threshold is $IGNERR_P.\n";
print " [ -Is <count> | --ignore-shield-counter <count> ]
Specifies the warning threshold for media errors per disk, the default
threshold is $IGNERR_S.\n";
print " [ -Ib <count> | --ignore-bbm-counter <count> ]
Specifies the warning threshold for bbm errors per disk, the default
threshold is $IGNERR_B.\n";
print " [ -p <path> | --path <path>]
Specifies the path to StorCLI, per default uses the tool 'which' to get
the StorCLI path.\n";
print " [ -z <criticality> | --criticality <criticality>]
Specifies the criticality alert level for this check, the default is
critical for this alert.\n";
print " [ -b <0/1> | --BBU <0/1> ]
Check if a BBU or a CacheVault module is present. One must be present unless
'-b 0' is defined. This ensures that for a given controller a BBU/CV must be
present per default.\n";
print " [ --noenclosures <0/1> ]
Specifies if enclosures are present or not. 0 means enclosures are
present (default), 1 states no enclosures are used (no 'eall' in
storcli commands).\n"
}
# Displays a short Help text for the user
sub displayHelp {
print $NAME."\n";
print "Pulgin version: " . $VERSION ."\n";
print "Copyright (C) 2013-2014 Thomas-Krenn.AG\n";
print "Current updates available at
http://git.thomas-krenn.com/check_lsi_raid.git\n";
print "This Nagios/Icinga Plugin checks LSI RAID controllers for controller,
physical device, logical device, BBU and CV warnings and errors.\n";
print "In order for this plugin to work properly you need to add the nagios
user to your sudoers file (or create a new one in /etc/sudoers.d/).\n";
displayUsage();
print "Further information about this plugin can be found at:
http://www.thomas-krenn.com/de/wiki/LSI_RAID_Monitoring_Plugin and
http://www.thomas-krenn.com/de/wiki/LSI_RAID_Monitoring_Plugin
Please send an email to the tk-monitoring plugin-user mailing list:
tk-monitoring-plugins-user\@lists.thomas-krenn.com
if you have questions regarding use of this software, to submit patches, or
suggest improvements.
Example usage:
* check_lsi_raid -p /opt/MegaRAID/storcli/storcli64
* check_lsi_raid -p /opt/MegaRAID/storcli/storcli64 -C 1\n";
exit(STATE_OK);
}
# Prints the name anmd the version of check_lsi_raid. If storcli is available,
# the version if it is printed also.
# @param storcli The path to storcli command utility
sub displayVersion {
my $storcli = shift;
if(defined($storcli)){
my @storcliVersion = `$storcli -v`;
foreach my $line (@storcliVersion){
if($line =~ /^\s+StorCli.*/) {
$line =~ s/^\s+|\s+$//g;
print $line;
}
}
print "\n";
}
exit(STATE_OK);
}
# Checks if a storcli call was successfull, i.e. if the line 'Status = Sucess'
# is present in the command output.
# @param output The output of the storcli command as array
# @return 1 on success, 0 if not
sub checkCommandStatus{
my @output = @{(shift)};
foreach my $line (@output){
if($line =~ /^Status/){
if($line eq "Status = Success\n"){
return 1;
}
else{
return 0;
}
}
}
}
# Shows the time the controller is using. Can be used to check if the
# controller number is a correct one.
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @return 1 on success, 0 if not
sub getControllerTime{
my $storcli = shift;
my @output = `$storcli show time`;
return (checkCommandStatus(\@output));
}
# Get the status of the raid controller
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @param logDevices If given, a list of desired logical device numbers
# @param commands_a An array to push the used command to
# @return A hash, each key a value of the raid controller info
sub getControllerInfo{
my $storcli = shift;
my $commands_a = shift;
my $command = '';
$storcli =~ /^(.*)\/c[0-9]+/;
$command = $1.'adpallinfo a'.$CONTROLLER;
push @{$commands_a}, $command;
my @output = `$command`;
if($? >> 8 != 0){
print "Invalid StorCLI command! ($command)\n";
exit(STATE_UNKNOWN);
}
my %foundController_h;
foreach my $line(@output){
if($line =~ /\:/){
my @lineVals = split(':', $line);
$lineVals[0] =~ s/^\s+|\s+$//g;
$lineVals[1] =~ s/^\s+|\s+$//g;
$foundController_h{$lineVals[0]} = $lineVals[1];
}
}
return \%foundController_h;
}
# Checks the status of the raid controller
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the verbose
# information for the sensors.
# @param foundController The hash of controller infos, created by getControllerInfo
sub getControllerStatus{
my @statusLevel_a = @{(shift)};
my %foundController = %{(shift)};
my $status = 'OK';
foreach my $key (%foundController){
if($key eq 'ROC temperature'){
$foundController{$key} =~ /^([0-9]+\.?[0-9]+).*$/;
if(defined($1)){
if(!(checkThreshs($1, $C_TEMP_CRITICAL))){
$status = getMaxStatus('Critical',$status);
push @{$statusLevel_a[2]}, 'ROC_Temperature';
}
elsif(!(checkThreshs($1, $C_TEMP_WARNING))){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, 'ROC_Temperature';
}
$statusLevel_a[3]->{'ROC_Temperature'} = $1;
}
}
elsif($key eq 'Degraded'){
if($foundController{$key} != 0){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, 'CTR_Degraded_drives';
$statusLevel_a[3]->{'CTR_Degraded_drives'} = $foundController{$key};
}
}
elsif($key eq 'Offline'){
if($foundController{$key} != 0){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, 'CTR_Offline_drives';
$statusLevel_a[3]->{'CTR_Offline_drives'} = $foundController{$key};
}
}
elsif($key eq 'Critical Disks'){
if($foundController{$key} != 0){
$status = getMaxStatus('Critical',$status);
push @{$statusLevel_a[2]}, 'CTR_Critical_disks';
$statusLevel_a[3]->{'CTR_Critical_disks'} = $foundController{$key};
}
}
elsif($key eq 'Failed Disks'){
if($foundController{$key} != 0){
$status = getMaxStatus('Critical',$status);
push @{$statusLevel_a[2]}, 'CTR_Failed_disks';
$statusLevel_a[3]->{'CTR_Failed_disks'} = $foundController{$key};
}
}
elsif($key eq 'Memory Correctable Errors'){
if($foundController{$key} != 0){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, 'CTR_Memory_correctable_errors';
$statusLevel_a[3]->{'CTR_Memory_correctable_errors'} = $foundController{$key};
}
}
elsif($key eq 'Memory Uncorrectable Errors'){
if($foundController{$key} != 0){
$status = getMaxStatus('Critical',$status);
push @{$statusLevel_a[2]}, 'CTR_Memory_Uncorrectable_errors';
$statusLevel_a[3]->{'CTR_Memory_Uncorrectable_errors'} = $foundController{$key};
}
}
}
$statusLevel_a[3]->{'CTR_Status'} = $status;
${$statusLevel_a[0]} = getMaxStatus(${$statusLevel_a[0]},$status);
}
# Checks which logical devices are present for the given controller and parses
# the logical devices to a list of hashes. Each hash represents a logical device
# with its values from the output.
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @param logDevices If given, a list of desired logical device numbers
# @param action The storcli action to check, 'all' or 'init'
# @param commands_a An array to push the used command to
# @return A list of hashes, each hash is one logical device. Check ldmap_a for valid
# hash keys.
sub getLogicalDevices{
my $storcli = shift;
my @logDevices = @{(shift)};
my $action = shift;
my $commands_a = shift;
my $command = $storcli;
if(scalar(@logDevices) == 0) { $command .= "/vall"; }
elsif(scalar(@logDevices) == 1) { $command .= "/v$logDevices[0]"; }
else { $command .= "/v".join(",", @logDevices); }
$command .= " show $action";
push @{$commands_a}, $command;
my @output = `$command`;
my @foundDevs;
if(checkCommandStatus(\@output)) {
if($action eq "all") {
my $currBlock;
foreach my $line(@output){
my @splittedLine;
if($line =~ /^\/(c[0-9]*\/v[0-9]*).*/){
$currBlock = $1;
next;
}
if(defined($currBlock)){
if($line =~ /^DG\/VD TYPE.*/){
@splittedLine = split(' ', $line);
if(scalar(@splittedLine)== 9){
$LDMAP = \@map_a;
}
if(scalar(@splittedLine)== 10){
$LDMAP = \@map_cc_a;
}
}
if($line =~ /^\d+\/\d+\s+\w+\d\s+\w+.*/){
@splittedLine = map { s/^\s*//; s/\s*$//; $_; } split(/\s+/,$line);
my %lineValues_h;
# The current block is the c0/v0 name
$lineValues_h{'ld'} = $currBlock;
for(my $i = 0; $i < @{$LDMAP}; $i++){
$lineValues_h{$LDMAP->[$i]} = $splittedLine[$i];
}
push @foundDevs, \%lineValues_h;
}
}
}
}
elsif($action eq "init") {
foreach my $line(@output){
$line =~ s/^\s+|\s+$//g;#trim line
if($line =~ /^([0-9]+)\s+INIT.*$/){
my $vdNum = 'c'.$CONTROLLER.'/v'.$1;
if($line !~ /Not in progress/i){
my %lineValues_h;
my @vals = split('\s+',$line);
$lineValues_h{'ld'} = $vdNum;
$lineValues_h{'init'} = $vals[2];
push @foundDevs, \%lineValues_h;
}
}
}
}
}
else {
print "Invalid StorCLI command! ($command)\n";
exit(STATE_UNKNOWN);
}
return \@foundDevs;
}
# Checks the status of the logical devices.
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the verbose
# information for the sensors.
# @param foundLDs The array of logical devices, created by getLogicalDevices
sub getLDStatus{
my @statusLevel_a = @{(shift)};
my @foundLDs = @{(shift)};
my $status = 'OK';
foreach my $LD (@foundLDs){
if(exists($LD->{'State'})){
if($LD->{'State'} ne 'Optl'){
$status = getMaxStatus('Critical', $status);
push @{$statusLevel_a[2]}, $LD->{'ld'}.'_State';
$statusLevel_a[3]->{$LD->{'ld'}.'_State'} = $LD->{'State'};
}
}
if(exists($LD->{'Consist'})){
if($LD->{'Consist'} ne 'Yes' && $LD->{'TYPE'} ne 'Cac1'){
$status = getMaxStatus('Warning', $status);
push @{$statusLevel_a[1]}, $LD->{'ld'}.'_Consist';
$statusLevel_a[3]->{$LD->{'ld'}.'_Consist'} = $LD->{'Consist'};
}
}
if(exists($LD->{'init'})){
$status = getMaxStatus('Warning', $status);
push @{$statusLevel_a[1]}, $LD->{'ld'}.'_Init';
$statusLevel_a[3]->{$LD->{'ld'}.'_Init'} = $LD->{'init'};
}
}
if (exists($statusLevel_a[3]->{'LD_Status'})) {
$statusLevel_a[3]->{'LD_Status'} = getMaxStatus($statusLevel_a[3]->{'LD_Status'} ,$status);
}
else {
$statusLevel_a[3]->{'LD_Status'} = $status;
}
${$statusLevel_a[0]} = getMaxStatus(${$statusLevel_a[0]},$status);
}
# Checks which physical devices are present for the given controller and parses
# the physical devices to a list of hashes. Each hash represents a physical device
# with its values from the output.
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @param physDevices If given, a list of desired physical device numbers
# @param action The storcli action to check, 'all', 'initialization' or 'rebuild'
# @param commands_a An array to push the used command to
# @return A list of hashes, each hash is one physical device. Check pdmap_a for valid
# hash keys.
sub getPhysicalDevices{
my $storcli = shift;
my @enclosures = @{(shift)};
my @physDevices = @{(shift)};
my $action = shift;
my $commands_a = shift;
my $command = $storcli;
if(!$NOENCLOSURES){
if(scalar(@enclosures) == 0) { $command .= "/eall"; }
elsif(scalar(@enclosures) == 1) { $command .= "/e$enclosures[0]"; }
else { $command .= "/e".join(",", @enclosures); }
}
if(scalar(@physDevices) == 0) { $command .= "/sall"; }
elsif(scalar(@physDevices) == 1) { $command .= "/s$physDevices[0]"; }
else { $command .= "/s".join(",", @physDevices); }
$command .= " show $action";
push @{$commands_a}, $command;
my @output = `$command`;
my @foundDevs;
if(checkCommandStatus(\@output)){
if($action eq "all") {
my $currBlock;
my $line_ref;
foreach my $line(@output){
my @splittedLine;
if($line =~ /^Drive \/(c[0-9]*\/e[0-9]*\/s[0-9]*) \:$/){
$currBlock = $1;
$line_ref = {};
next;
}
if(defined($currBlock)){
# If a drive is not in a group, a - is at the DG column
if($line =~ /^\d+\:\d+\s+\d+\s+\w+\s+[0-9-F]+.*/){
@splittedLine = map { s/^\s*//; s/\s*$//; $_; } split(/\s+/,$line);
# The current block is the c0/e252/s0 name
$line_ref->{'pd'} = $currBlock;
my $j = 0;
for(my $i = 0; $i < @pdmap_a; $i++){
if($pdmap_a[$i] eq 'Size'){
my $size = $splittedLine[$j];
if($splittedLine[$j+1] eq 'GB' || $splittedLine[$j+1] eq 'TB'){
$size .= ''.$splittedLine[$j+1];
$j++;
}
$line_ref->{$pdmap_a[$i]} = $size;
$j++;
}
elsif($pdmap_a[$i] eq 'Model'){
my $model = $splittedLine[$j];
# Model should be the next last element, j starts at 0
if(($j+2) != scalar(@splittedLine)){
$model .= ' '.$splittedLine[$j+1];
$j++;
}
$line_ref->{$pdmap_a[$i]} = $model;
$j++;
}
else{
$line_ref->{$pdmap_a[$i]} = $splittedLine[$j];
$j++;
}
}
}
if($line =~ /^(Shield Counter|Media Error Count|Other Error Count|BBM Error Count|Drive Temperature|Predictive Failure Count|S\.M\.A\.R\.T alert flagged by drive)\s\=\s+(.*)$/){
$line_ref->{$1} = $2;
}
# If the last value is parsed, set up for the next device
if(exists($line_ref->{'S.M.A.R.T alert flagged by drive'})){
push @foundDevs, $line_ref;
undef $currBlock;
undef $line_ref;
}
}
}
}
elsif($action eq 'rebuild' || $action eq 'initialization') {
foreach my $line(@output){
$line =~ s/^\s+|\s+$//g;#trim line
if($line =~ /^\/c$CONTROLLER\/.*/){
if($line !~ /Not in progress/i){
my %lineValues_h;
my @vals = split('\s+',$line);
my $key;
if($action eq 'rebuild'){ $key = 'rebuild'; }
if($action eq 'initialization'){ $key = 'init'; }
$lineValues_h{'pd'} = substr($vals[0], 1);
$lineValues_h{$key} = $vals[1];
push @foundDevs, \%lineValues_h;
}
}
}
}
}
else {
print "Invalid StorCLI command! ($command)\n";
exit(STATE_UNKNOWN);
}
return \@foundDevs;
}
# Checks the status of the physical devices.
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the vebose
# information for the sensors.
# @param foundPDs The array of physical devices, created by getPhysicalDevices
sub getPDStatus{
my @statusLevel_a = @{(shift)};
my @foundPDs = @{(shift)};
my $status = 'OK';
foreach my $PD (@foundPDs){
if(exists($PD->{'State'})){
if($PD->{'State'} ne 'Onln' && $PD->{'State'} ne 'UGood' && $PD->{'State'} ne 'GHS'){
$status = getMaxStatus('Critical',$status);
push @{$statusLevel_a[2]}, $PD->{'pd'}.'_State';
$statusLevel_a[3]->{$PD->{'pd'}.'_State'} = $PD->{'State'};
}
}
if(exists($PD->{'Shield Counter'})){
if($PD->{'Shield Counter'} > $IGNERR_S){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Shield_counter';
$statusLevel_a[3]->{$PD->{'pd'}.'_Shield_counter'} = $PD->{'Shield Counter'};
}
}
if(exists($PD->{'Media Error Count'})){
if($PD->{'Media Error Count'} > $IGNERR_M){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Media_error_count';
$statusLevel_a[3]->{$PD->{'pd'}.'_Media_error_count'} = $PD->{'Media Error Count'};
}
}
if(exists($PD->{'Other Error Count'})){
if($PD->{'Other Error Count'} > $IGNERR_O){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Other_error_count';
$statusLevel_a[3]->{$PD->{'pd'}.'_Other_error_count'} = $PD->{'Other Error Count'};
}
}
if(exists($PD->{'BBM Error Count'})){
if($PD->{'BBM Error Count'} > $IGNERR_B){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_BBM_error_count';
$statusLevel_a[3]->{$PD->{'pd'}.'_BBM_error_count'} = $PD->{'BBM Error Count'};
}
}
if(exists($PD->{'Predictive Failure Count'})){
if($PD->{'Predictive Failure Count'} > $IGNERR_P){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Predictive_failure_count';
$statusLevel_a[3]->{$PD->{'pd'}.'_Predictive_failure_count'} = $PD->{'Predictive Failure Count'};
}
}
if(exists($PD->{'S.M.A.R.T alert flagged by drive'})){
if($PD->{'S.M.A.R.T alert flagged by drive'} ne 'No'){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_SMART_flag';
}
}
if(exists($PD->{'DG'})){
if($PD->{'DG'} eq 'F'){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_DG';
$statusLevel_a[3]->{$PD->{'pd'}.'_DG'} = $PD->{'DG'};
}
}
if(exists($PD->{'Drive Temperature'})){
my $temp = $PD->{'Drive Temperature'};
if($temp ne 'N/A' && $temp ne '0C (32.00 F)'){
$temp =~ /^([0-9]+)C/;
if(!(checkThreshs($1, $PD_TEMP_CRITICAL))){
$status = getMaxStatus('Critical',$status);
push @{$statusLevel_a[2]}, $PD->{'pd'}.'_Drive_Temperature';
}
elsif(!(checkThreshs($1, $PD_TEMP_WARNING))){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Drive_Temperature';
}
$statusLevel_a[3]->{$PD->{'pd'}.'_Drive_Temperature'} = $1;
}
}
if(exists($PD->{'init'})){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Init';
$statusLevel_a[3]->{$PD->{'pd'}.'_Init'} = $PD->{'init'};
}
if(exists($PD->{'rebuild'})){
$status = getMaxStatus('Warning',$status);
push @{$statusLevel_a[1]}, $PD->{'pd'}.'_Rebuild';
$statusLevel_a[3]->{$PD->{'pd'}.'_Rebuild'} = $PD->{'rebuild'};
}
}
if(exists($statusLevel_a[3]->{'PD_Status'})) {
$statusLevel_a[3]->{'PD_Status'} = getMaxStatus($statusLevel_a[3]->{'PD_Status'},$status );
}
else {
$statusLevel_a[3]->{'PD_Status'} = $status;
}
${$statusLevel_a[0]} = getMaxStatus(${$statusLevel_a[0]},$status);
}
# Checks the status of the BBU, parses 'bbu show status' for the given controller.
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the verbose
# information for the sensors.
# @param commands_a An array to push the used command to
sub getBBUStatus {
my $storcli = shift;
my @statusLevel_a = @{(shift)};
my $commands_a = shift;
my $command = "$storcli /bbu show status";
push @{$commands_a}, $command;
my $status;
my @output = `$command`;
if(checkCommandStatus(\@output)) {
my $currBlock;
foreach my $line (@output) {
if($line =~ /^(BBU_Info|BBU_Firmware_Status|GasGaugeStatus)/){
$currBlock = $1;
next;
}
if(defined($currBlock)){
$line =~ s/^\s+|\s+$//g;#trim line
if($currBlock eq 'BBU_Info'){
if ($line =~ /^Battery State/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne 'Optimal'){
$status = 'Warning';
push @{$statusLevel_a[1]}, 'BBU_State';
$statusLevel_a[3]->{'BBU_State'} = $1
}
}
elsif($line =~ /^Temperature/){
$line =~ /([0-9]+) C$/;
if(!(checkThreshs($1, $BBU_TEMP_CRITICAL))){
$status = 'Critical';
push @{$statusLevel_a[2]}, 'BBU_Temperature';
}
elsif(!(checkThreshs($1, $BBU_TEMP_WARNING))){
$status = 'Warning';
push @{$statusLevel_a[1]}, 'BBU_Temperature';
}
$statusLevel_a[3]->{'BBU_Temperature'} = $1;
}
}
elsif($currBlock eq 'BBU_Firmware_Status'){
if($line =~ /^Temperature/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "OK") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_Firmware_temperature';
$statusLevel_a[3]->{'BBU_Firmware_temperature'} = $1;
}
}
elsif($line =~ /^Voltage/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "OK") {
$status = 'Warning';
push @{$statusLevel_a[1]},'BBU_Voltage';
$statusLevel_a[3]->{'BBU_Voltage'} = $1;
}
}
elsif($line =~ /^I2C Errors Detected/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_Firmware_I2C_errors';
$statusLevel_a[3]->{'BBU_Firmware_I2C_Errors'} = $1;
}
}
elsif($line =~ /^Battery Pack Missing/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_Pack_missing';
$statusLevel_a[3]->{'BBU_Pack_missing'} = $1;
}
}
elsif($line =~ /^Replacement required/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_Replacement_required';
$statusLevel_a[3]->{'BBU_Replacement_required'} = $1;
}
}
elsif($line =~ /^Remaining Capacity Low/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Warning';
push @{$statusLevel_a[1]},'BBU_Remaining_capacity_low';
$statusLevel_a[3]->{'BBU_Remaining_capacity_low'} = $1;
}
}
elsif($line =~ /^Pack is about to fail \& should be replaced/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_Should_be_replaced';
$statusLevel_a[3]->{'BBU_Should_be_replaced'} = $1;
}
}
}
elsif($currBlock eq 'GasGaugeStatus'){
if($line =~ /^Fully Discharged/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_GasGauge_discharged';
$statusLevel_a[3]->{'BBU_GasGauge_discharged'} = $1;
}
}
elsif($line =~ /^Over Temperature/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Warning';
push @{$statusLevel_a[1]},'BBU_GasGauge_over_temperature';
$statusLevel_a[3]->{'BBU_GasGauge_over_temperature'} = $1;
}
}
elsif($line =~ /^Over Charged/){
$line =~ /([a-zA-Z]*)$/;
if($1 ne "No") {
$status = 'Critical';
push @{$statusLevel_a[2]},'BBU_GasGauge_over_charged';
$statusLevel_a[3]->{'BBU_GasGauge_over_charged'} = $1;
}
}
}
}
if(defined($status)){
if($status eq 'Warning'){
if(${$statusLevel_a[0]} ne 'Critical'){
${$statusLevel_a[0]} = 'Warning';
}
}
else{
${$statusLevel_a[0]} = 'Critical';
}
$statusLevel_a[3]->{'BBU_Status'} = $status;
}
else{
$statusLevel_a[3]->{'BBU_Status'} = 'OK';
}
}
}
else {
print "Invalid StorCLI command! ($command)\n";
exit(STATE_UNKNOWN);
}
}
# Checks the status of the CV module, parses 'cv show status' for the given
# controller.
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the verbose
# information for the sensors.
# @param commands_a An array to push the used command to
sub getCVStatus {
my $storcli = shift;
my @statusLevel_a = @{(shift)};
my $commands_a = shift;
my $command = "$storcli /cv show status";
push @{$commands_a}, $command;
my $status = 'OK';
my @output = `$command`;
if(checkCommandStatus(\@output)) {
my $currBlock;
foreach my $line (@output) {
if($line =~ /^(Cachevault_Info|Firmware_Status)/){
$currBlock = $1;
next;
}
if(defined($currBlock)){
$line =~ s/^\s+|\s+$//g;#trim line
if($currBlock eq 'Cachevault_Info' && $line =~ /^State/){
my @vals = split('\s{2,}',$line);
if($vals[1] ne "Optimal") {
$status = getMaxStatus('Warning', $status);
push @{$statusLevel_a[1]}, 'CV_State';
$statusLevel_a[3]->{'CV_State'} = $vals[1]
}
}
elsif($currBlock eq 'Cachevault_Info' && $line =~ /^Temperature/){
$line =~ /([0-9]+) C$/;
if(!(checkThreshs($1, $CV_TEMP_CRITICAL))){
$status = getMaxStatus('Critical', $status);
push @{$statusLevel_a[2]}, 'CV_Temperature';
}
elsif(!(checkThreshs($1, $CV_TEMP_WARNING))){
$status = getMaxStatus('Warning', $status);
push @{$statusLevel_a[1]}, 'CV_Temperature';
}
$statusLevel_a[3]->{'CV_Temperature'} = $1;
}
elsif($currBlock eq 'Firmware_Status' && $line =~ /^Replacement required/){
$line =~ /([a-zA-Z0-9]*)$/;
if($1 ne "No") {
$status = getMaxStatus('Critical', $status);
push @{$statusLevel_a[2]},'CV_Replacement_required';
}
$statusLevel_a[3]->{'CV_Replacement_required'} = $1;
}
}
}
$statusLevel_a[3]->{'CV_Status'} = $status;
${$statusLevel_a[0]} = getMaxStatus(${$statusLevel_a[0]},$status);
}
else {
print "Invalid StorCLI command! ($command)\n";
exit(STATE_UNKNOWN);
}
}
# Checks if wheter BBU or CV is present
# @param storcli The path to storcli command utility, followed by the controller
# number, e.g. 'storcli64 /c0'.
# @return A tuple, e.g. (0,0), where 0 means module is not present, 1 present
sub checkBBUorCVIsPresent{
my $storcli = shift;
my ($bbu,$cv);
my @output = `$storcli /bbu show`;
if(checkCommandStatus(\@output)){ $bbu = 1; }
else{ $bbu = 0 };
@output = `$storcli /cv show`;
if(checkCommandStatus(\@output)) { $cv = 1; }
else{ $cv = 0 };
return ($bbu, $cv);
}
# Checks if a given value is in a specified range, the range must follow the
# nagios development guidelines:
# http://nagiosplug.sourceforge.net/developer-guidelines.html#THRESHOLDFORMAT
# @param value The given value to check the pattern for
# @param pattern The pattern specifying the threshold range, e.g. '10:', '@10:20'
# @return 0 if the value is outside the range, 1 if the value satisfies the range
sub checkThreshs{
my $value = shift;
my $pattern = shift;
if($pattern =~ /(^[0-9]+$)/){
if($value < 0 || $value > $1){
return 0;
}
}
elsif($pattern =~ /(^[0-9]+)\:$/){
if($value < $1){
return 0;
}
}
elsif($pattern =~ /^\~\:([0-9]+)$/){
if($value > $1){
return 0;
}
}
elsif($pattern =~ /^([0-9]+)\:([0-9]+)$/){
if($value < $1 || $value > $2){
return 0;
}
}
elsif($pattern =~ /^\@([0-9]+)\:([0-9]+)$/){
if($value >= $1 and $value <= $2){
return 0;
}
}
else{
print "Invalid temperature parameter! ($pattern)\n";
exit(STATE_UNKNOWN);
}
return 1;
}
# Get the status string as plugin output
# @param level The desired level to get the status string for. Either 'Warning'
# or 'Critical'.
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the verbose
# information for the sensors, elem 4 the used storcli commands.
# @return The created status string
sub getStatusString{
my $level = shift;
my @statusLevel_a = @{(shift)};
my @sensors_a;
my $status_str = "";
if($level eq "Warning"){
@sensors_a = @{$statusLevel_a[1]};
}
if($level eq "Critical"){
@sensors_a = @{$statusLevel_a[2]};
}
# Add the controller parts only once
my $parts = '';
# level comes from the method call, not the real status level
if($level eq "Critical"){
my @keys = ('CTR_Status','LD_Status','PD_Status','BBU_Status','CV_Status');
# Check which parts where checked
foreach my $key (@keys){
$key =~ /^([A-Z]+)\_.*$/;
my $part = $1;
if(${$statusLevel_a[0]} eq 'OK'){
if(exists($statusLevel_a[3]->{$key}) && $statusLevel_a[3]->{$key} eq 'OK'){
$parts .= ", " unless $parts eq '';
$parts .= $part;
}
}
else{
if(exists($statusLevel_a[3]->{$key}) && $statusLevel_a[3]->{$key} ne 'OK'){
$parts .= ", " unless $parts eq '';
$parts .= $part;
$parts .= ' '.substr($statusLevel_a[3]->{$key}, 0, 4);
}
}
}
$status_str.= '(';
$status_str .= $parts unless !defined($parts);
$status_str.= ')';
}
if($level eq 'Critical'){
$status_str.= ' ' unless !(@sensors_a);
}
if($level eq 'Warning' && !@{$statusLevel_a[2]}){
$status_str.= ' ' unless !(@sensors_a);
}
if($level eq "Warning" || $level eq "Critical"){
if(@sensors_a){
# Print which sensors are Warn or Crit
foreach my $sensor (@sensors_a){
$status_str .= "[".$sensor." = ".$level;
if($VERBOSITY){
if(exists($statusLevel_a[3]->{$sensor})){
$status_str .= " (".$statusLevel_a[3]->{$sensor}.")";
}
}
$status_str .= "]";
}
}
}
return $status_str;
}
# Get the verbose string if a higher verbose level is used
# @param statusLevel_a The status level array, elem 0 is the current status,
# elem 1 the warning sensors, elem 2 the critical sensors, elem 3 the verbose
# information for the sensors, elem 4 the used storcli commands.
# @param controllerToCheck Controller parsed by getControllerInfo
# @param LDDevicesToCheck LDs parsed by getLogicalDevices
# @param LDInitToCheck LDs parsed by getLogicalDevices init
# @param PDDevicesToCheck PDs parsed by getPhysicalDevices
# @param PDInitToCheck PDs parsed by getPhysicalDevices init
# @param PDRebuildToCheck PDs parsed by getPhysicalDevices rebuild
# @return The created verbosity string
sub getVerboseString{
my @statusLevel_a = @{(shift)};
my %controllerToCheck = %{(shift)};
my @LDDevicesToCheck = @{(shift)};
my @LDInitToCheck = @{(shift)};
my @PDDevicesToCheck = @{(shift)};
my @PDInitToCheck = @{(shift)};
my @PDRebuildToCheck = @{(shift)};
my @sensors_a;
my $verb_str;
$verb_str .= "Used storcli commands:\n";
foreach my $cmd (@{$statusLevel_a[4]}){
$verb_str .= '- '.$cmd."\n";
}
if(${$statusLevel_a[0]} eq 'Critical'){
$verb_str .= "Critical sensors:\n";
foreach my $sensor (@{$statusLevel_a[2]}){
$verb_str .= "\t- ".$sensor;
if(exists($statusLevel_a[3]->{$sensor})){
$verb_str .= ' ('.$statusLevel_a[3]->{$sensor}.')';
}
$verb_str .= "\n";
}
}
if( ${$statusLevel_a[0]} ne 'OK'){
$verb_str .= "Warning sensors:\n";
foreach my $sensor (@{$statusLevel_a[1]}){
$verb_str .= "\t- ".$sensor;
if(exists($statusLevel_a[3]->{$sensor})){
$verb_str .= ' ('.$statusLevel_a[3]->{$sensor}.')';
}
$verb_str .= "\n";
}
}
if($VERBOSITY == 3){
$verb_str .= "CTR information:\n";
$verb_str .= "\t- ".$controllerToCheck{'Product Name'}.":\n";
$verb_str .= "\t\t- ".'Serial No='.$controllerToCheck{'Serial No'}."\n";
$verb_str .= "\t\t- ".'FW Package Build='.$controllerToCheck{'FW Package Build'}."\n";
$verb_str .= "\t\t- ".'Mfg. Date='.$controllerToCheck{'Mfg. Date'}."\n";
$verb_str .= "\t\t- ".'Revision No='.$controllerToCheck{'Revision No'}."\n";
$verb_str .= "\t\t- ".'BIOS Version='.$controllerToCheck{'BIOS Version'}."\n";
$verb_str .= "\t\t- ".'FW Version='.$controllerToCheck{'FW Version'}."\n";
$verb_str .= "\t\t- ".'ROC temperature='.$controllerToCheck{'ROC temperature'}."\n";
$verb_str .= "LD information:\n";
foreach my $LD (@LDDevicesToCheck){
$verb_str .= "\t- ".$LD->{'ld'}.":\n";
foreach my $key (sort (keys($LD))){
$verb_str .= "\t\t- ".$key.'='.$LD->{$key}."\n";
}
foreach my $LDinit (@LDInitToCheck){
if($LDinit->{'ld'} eq $LD->{'ld'}){
$verb_str .= "\t\t- init=".$LDinit->{'init'}."\n";
}
}
}
$verb_str .= "PD information:\n";
foreach my $PD (@PDDevicesToCheck){
$verb_str .= "\t- ".$PD->{'pd'}.":\n";
foreach my $key (sort (keys($PD))){
$verb_str .= "\t\t- ".$key.'='.$PD->{$key}."\n";
}
foreach my $PDinit (@PDInitToCheck){
if($PDinit->{'pd'} eq $PD->{'pd'}){
$verb_str .= "\t\t- init=".$PDinit->{'init'}."\n";
}
}
foreach my $PDrebuild (@PDRebuildToCheck){
if($PDrebuild->{'pd'} eq $PD->{'pd'}){
$verb_str .= "\t\t- rebuild=".$PDrebuild->{'rebuild'}."\n";
}
}
}
my @keys = ('BBU_Status','CV_Status');
foreach my $key(@keys){
if(exists($statusLevel_a[3]->{$key})){
$key =~ /^(\w+)_\w+$/;
my $type = $1;
$verb_str .= $type." information:\n";
foreach my $stat (sort (keys($statusLevel_a[3]))){
if($stat =~ /^$type.+$/){
$verb_str .= "\t\t- $stat=".$statusLevel_a[3]->{$stat}."\n";
}
}
}
}
}
return $verb_str;
}
# Get the performance string for the current check. The values are taken from
# the varbose hash in the status level array.
# @param statusLevel_a The current status level array
# @return The created performance string
sub getPerfString{
my @statusLevel_a = @{(shift)};
my %verboseValues_h = %{$statusLevel_a[3]};
my $perf_str;
foreach my $key (sort (keys(%verboseValues_h))){
if($key =~ /temperature/i){
$perf_str .= ' ' unless !defined($perf_str);
$perf_str .= $key.'='.$verboseValues_h{$key};
}
if($key =~ /ROC_Temperature$/){
$perf_str .= ';'.$C_TEMP_WARNING.';'.$C_TEMP_CRITICAL;
}
elsif($key =~ /Drive_Temperature$/){
$perf_str .= ';'.$PD_TEMP_WARNING.';'.$PD_TEMP_CRITICAL;
}
elsif($key eq 'BBU_Temperature'){
$perf_str .= ';'.$BBU_TEMP_WARNING.';'.$BBU_TEMP_CRITICAL;
}
elsif($key eq 'CV_Temperature'){
$perf_str .= ';'.$CV_TEMP_WARNING.';'.$CV_TEMP_CRITICAL;
}
}
return $perf_str;
}
# Get max Status for two status value: Critical > Warning > OK
sub getMaxStatus {
my $Status1 = shift;
my $Status2 = shift;
if ($Status1 eq 'Critical' || $Status2 eq 'Critical') {
return 'Critical'
}
elsif ($Status1 eq 'Warning' || $Status2 eq 'Warning') {
return 'Warning'
}
return 'OK'
}
MAIN: {
my ($storcli, $sudo, $noSudo, $version, $exitCode);
# Create default sensor arrays and push them to status level
my @statusLevel_a ;
my $status_str = 'OK';
my $warnings_a = [];
my $criticals_a = [];
my $verboseValues_h = {};
my $verboseCommands_a = [];
push @statusLevel_a, \$status_str;
push @statusLevel_a, $warnings_a;
push @statusLevel_a, $criticals_a;
push @statusLevel_a, $verboseValues_h;
push @statusLevel_a, $verboseCommands_a;
# Per default use a BBU
my $bbu = 1;
my @enclosures;
my @logDevices;
my @physDevices;
my $platform = $^O;
# Check storcli tool
$storcli = `which storcli64 2>/dev/null`;
chomp($storcli);
if( !(GetOptions(
'h|help' => sub {displayHelp();},
'v|verbose' => sub {$VERBOSITY = 1 },
'vv' => sub {$VERBOSITY = 2},
'vvv' => sub {$VERBOSITY = 3},
'V|version' => \$version,
'C|controller=i' => \$CONTROLLER,
'EID|enclosure=s' => \@enclosures,
'LD|logicaldevice=s' => \@logDevices,
'PD|physicaldevice=s' => \@physDevices,
'Tw|temperature-warn=s' => \$C_TEMP_WARNING,
'Tc|temperature-critical=s' => \$C_TEMP_CRITICAL,
'PDTw|physicaldevicetemperature-warn=s' => \$PD_TEMP_WARNING,
'PDTc|physicaldevicetemperature-critical=s' => \$PD_TEMP_CRITICAL,
'BBUTw|bbutemperature-warning=s' => \$BBU_TEMP_WARNING,
'BBUTc|bbutemperature-critical=s' => \$BBU_TEMP_CRITICAL,
'CVTw|cvtemperature-warning=s' => \$CV_TEMP_WARNING,
'CVTc|cvtemperature-critical=s' => \$CV_TEMP_CRITICAL,
'Im|ignore-media-errors=i' => \$IGNERR_M,
'Io|ignore-other-errors=i' => \$IGNERR_O,
'Ip|ignore-predictive-fail-count=i' => \$IGNERR_P,
'Is|ignore-shield-counter=i' => \$IGNERR_S,
'Ib|ignore-bbm-counter=i' => \$IGNERR_B,
'p|path=s' => \$storcli,
'z|criticality=s' => \$criticality,
'b|BBU=i' => \$bbu,
'noenclosures=i' => \$NOENCLOSURES,
'nosudo' => \$noSudo,
))){
print $NAME . " Version: " . $VERSION ."\n";
displayUsage();
exit(STATE_UNKNOWN);
}
if(defined($version)){ print $NAME . "\nVersion: ". $VERSION . "\n"; }
if($storcli eq ""){
print "Error: cannot find storcli executable.\n";
print "Ensure storcli is in your path, or use the '-p <storcli path>' switch!\n";
exit(STATE_UNKNOWN);
}
if($platform eq 'linux') {
if(!defined($noSudo)){
my $sudo;
chomp($sudo = `which sudo`);
if(!defined($sudo)){
print "Error: cannot find sudo executable.\n";
exit(STATE_UNKNOWN);
}
$storcli = $sudo.' '.$storcli;
}
}
# Print storcli version if available
if(defined($version)){ displayVersion($storcli) }
# Prepare storcli command
$storcli .= " /c$CONTROLLER";
# Check if the controller number can be used
if(!getControllerTime($storcli)){
print "Error: invalid controller number, controller not found!\n";
exit(STATE_UNKNOWN);
}
# Prepare command line arrays
@enclosures = split(/,/,join(',', @enclosures));
@logDevices = split(/,/,join(',', @logDevices));
@physDevices = split(/,/,join(',', @physDevices));
# Check if the BBU param is correct
if(($bbu != 1) && ($bbu != 0)) {
print "Error: invalid BBU/CV parameter, must be 0 or 1!\n";
exit(STATE_UNKNOWN);
}
my ($bbuPresent,$cvPresent) = (0,0);
if($bbu == 1){
($bbuPresent,$cvPresent) = checkBBUorCVIsPresent($storcli);
if($bbuPresent == 0 && $cvPresent == 0){
${$statusLevel_a[0]} = 'Critical';
push @{$criticals_a}, 'BBU/CV_Present';
$statusLevel_a[3]->{'BBU_Status'} = 'Critical';
$statusLevel_a[3]->{'CV_Status'} = 'Critical';
}
}
if($bbuPresent == 1){getBBUStatus($storcli, \@statusLevel_a, $verboseCommands_a); }
if($cvPresent == 1){ getCVStatus($storcli, \@statusLevel_a, $verboseCommands_a); }
my $controllerToCheck = getControllerInfo($storcli, $verboseCommands_a);
my $LDDevicesToCheck = getLogicalDevices($storcli, \@logDevices, 'all', $verboseCommands_a);
my $LDInitToCheck = getLogicalDevices($storcli, \@logDevices, 'init', $verboseCommands_a);
my $PDDevicesToCheck = getPhysicalDevices($storcli, \@enclosures, \@physDevices, 'all', $verboseCommands_a);
my $PDInitToCheck = getPhysicalDevices($storcli, \@enclosures, \@physDevices, 'initialization', $verboseCommands_a);
my $PDRebuildToCheck = getPhysicalDevices($storcli, \@enclosures, \@physDevices, 'rebuild', $verboseCommands_a);
getControllerStatus(\@statusLevel_a, $controllerToCheck);
getLDStatus(\@statusLevel_a, $LDDevicesToCheck);
getLDStatus(\@statusLevel_a, $LDInitToCheck);
getPDStatus(\@statusLevel_a, $PDDevicesToCheck);
getPDStatus(\@statusLevel_a, $PDInitToCheck);
getPDStatus(\@statusLevel_a, $PDRebuildToCheck);
print ${$statusLevel_a[0]}." ";
print getStatusString("Critical",\@statusLevel_a);
print getStatusString("Warning",\@statusLevel_a);
my $perf_str = getPerfString(\@statusLevel_a);
if($perf_str){
print "|".$perf_str;
}
if($VERBOSITY == 2 || $VERBOSITY == 3){
print "\n".getVerboseString(\@statusLevel_a, $controllerToCheck, $LDDevicesToCheck, $LDInitToCheck,
$PDDevicesToCheck, $PDInitToCheck, $PDRebuildToCheck)
}
if(${$statusLevel_a[0]} eq "Critical" && ($criticality eq "critical")){
$exitCode = STATE_CRITICAL;
}
elsif (${$statusLevel_a[0]} eq "OK") {
$exitCode = STATE_OK;
}
else {
$exitCode = STATE_WARNING;
}
exit($exitCode);
}
| blueboxgroup/ursula-monitoring | sensu/plugins/check-storcli.pl | Perl | apache-2.0 | 47,842 |
package Paws::ServiceCatalog::AcceptPortfolioShareOutput;
use Moose;
has _request_id => (is => 'ro', isa => 'Str');
### main pod documentation begin ###
=head1 NAME
Paws::ServiceCatalog::AcceptPortfolioShareOutput
=head1 ATTRIBUTES
=head2 _request_id => Str
=cut
1; | ioanrogers/aws-sdk-perl | auto-lib/Paws/ServiceCatalog/AcceptPortfolioShareOutput.pm | Perl | apache-2.0 | 280 |
package RSP::Extension::JSONEncoder;
use Moose;
use namespace::autoclean;
with qw(RSP::Role::Extension RSP::Role::Extension::JSInstanceManipulation);
use Try::Tiny;
use JSON::XS;
my $encoders = [
JSON::XS->new->utf8,
JSON::XS->new->utf8->pretty
];
sub bind {
my ($self) = @_;
$self->bind_extension({
json => {
encode => $self->generate_js_closure('json_encode'),
decode => $self->generate_js_closure('json_decode'),
},
});
}
sub json_encode {
my ($self, $data, $encoder) = @_;
try {
return $encoders->[$encoder]->encode($data);
} catch {
die "$_\n";
};
}
sub json_decode {
my ($self, $data, $encoder) = @_;
try {
return $encoders->[$encoder]->decode($data);
} catch {
die "$_\n";
};
}
__PACKAGE__->meta->make_immutable;
1;
| sanyaade-mobiledev/smart-platform | lib/RSP/Extension/JSONEncoder.pm | Perl | mit | 853 |
=pod
=head1 NAME
ocsp - Online Certificate Status Protocol utility
=head1 SYNOPSIS
B<openssl> B<ocsp>
[B<-help>]
[B<-out file>]
[B<-issuer file>]
[B<-cert file>]
[B<-serial n>]
[B<-signer file>]
[B<-signkey file>]
[B<-sign_other file>]
[B<-no_certs>]
[B<-req_text>]
[B<-resp_text>]
[B<-text>]
[B<-reqout file>]
[B<-respout file>]
[B<-reqin file>]
[B<-respin file>]
[B<-nonce>]
[B<-no_nonce>]
[B<-url URL>]
[B<-host host:port>]
[B<-header>]
[B<-path>]
[B<-CApath dir>]
[B<-CAfile file>]
[B<-no-CAfile>]
[B<-no-CApath>]
[B<-attime timestamp>]
[B<-check_ss_sig>]
[B<-crl_check>]
[B<-crl_check_all>]
[B<-explicit_policy>]
[B<-extended_crl>]
[B<-ignore_critical>]
[B<-inhibit_any>]
[B<-inhibit_map>]
[B<-no_check_time>]
[B<-partial_chain>]
[B<-policy arg>]
[B<-policy_check>]
[B<-policy_print>]
[B<-purpose purpose>]
[B<-suiteB_128>]
[B<-suiteB_128_only>]
[B<-suiteB_192>]
[B<-trusted_first>]
[B<-no_alt_chains>]
[B<-use_deltas>]
[B<-auth_level num>]
[B<-verify_depth num>]
[B<-verify_email email>]
[B<-verify_hostname hostname>]
[B<-verify_ip ip>]
[B<-verify_name name>]
[B<-x509_strict>]
[B<-VAfile file>]
[B<-validity_period n>]
[B<-status_age n>]
[B<-noverify>]
[B<-verify_other file>]
[B<-trust_other>]
[B<-no_intern>]
[B<-no_signature_verify>]
[B<-no_cert_verify>]
[B<-no_chain>]
[B<-no_cert_checks>]
[B<-no_explicit>]
[B<-port num>]
[B<-index file>]
[B<-CA file>]
[B<-rsigner file>]
[B<-rkey file>]
[B<-rother file>]
[B<-resp_no_certs>]
[B<-nmin n>]
[B<-ndays n>]
[B<-resp_key_id>]
[B<-nrequest n>]
[B<-md5|-sha1|...>]
=head1 DESCRIPTION
The Online Certificate Status Protocol (OCSP) enables applications to
determine the (revocation) state of an identified certificate (RFC 2560).
The B<ocsp> command performs many common OCSP tasks. It can be used
to print out requests and responses, create requests and send queries
to an OCSP responder and behave like a mini OCSP server itself.
=head1 COMMAND OPTIONS
This command operates as either a client or a server.
The options are described below, divided into those two modes.
=head2 OCSP Client Options
=over 4
=item B<-help>
Print out a usage message.
=item B<-out filename>
specify output filename, default is standard output.
=item B<-issuer filename>
This specifies the current issuer certificate. This option can be used
multiple times. The certificate specified in B<filename> must be in
PEM format. This option B<MUST> come before any B<-cert> options.
=item B<-cert filename>
Add the certificate B<filename> to the request. The issuer certificate
is taken from the previous B<issuer> option, or an error occurs if no
issuer certificate is specified.
=item B<-serial num>
Same as the B<cert> option except the certificate with serial number
B<num> is added to the request. The serial number is interpreted as a
decimal integer unless preceded by B<0x>. Negative integers can also
be specified by preceding the value by a B<-> sign.
=item B<-signer filename>, B<-signkey filename>
Sign the OCSP request using the certificate specified in the B<signer>
option and the private key specified by the B<signkey> option. If
the B<signkey> option is not present then the private key is read
from the same file as the certificate. If neither option is specified then
the OCSP request is not signed.
=item B<-sign_other filename>
Additional certificates to include in the signed request.
=item B<-nonce>, B<-no_nonce>
Add an OCSP nonce extension to a request or disable OCSP nonce addition.
Normally if an OCSP request is input using the B<reqin> option no
nonce is added: using the B<nonce> option will force addition of a nonce.
If an OCSP request is being created (using B<cert> and B<serial> options)
a nonce is automatically added specifying B<no_nonce> overrides this.
=item B<-req_text>, B<-resp_text>, B<-text>
print out the text form of the OCSP request, response or both respectively.
=item B<-reqout file>, B<-respout file>
write out the DER encoded certificate request or response to B<file>.
=item B<-reqin file>, B<-respin file>
read OCSP request or response file from B<file>. These option are ignored
if OCSP request or response creation is implied by other options (for example
with B<serial>, B<cert> and B<host> options).
=item B<-url responder_url>
specify the responder URL. Both HTTP and HTTPS (SSL/TLS) URLs can be specified.
=item B<-host hostname:port>, B<-path pathname>
if the B<host> option is present then the OCSP request is sent to the host
B<hostname> on port B<port>. B<path> specifies the HTTP path name to use
or "/" by default. This is equivalent to specifying B<-url> with scheme
http:// and the given hostname, port, and pathname.
=item B<-header name=value>
Adds the header B<name> with the specified B<value> to the OCSP request
that is sent to the responder.
This may be repeated.
=item B<-timeout seconds>
connection timeout to the OCSP responder in seconds
=item B<-CAfile file>, B<-CApath pathname>
file or pathname containing trusted CA certificates. These are used to verify
the signature on the OCSP response.
=item B<-no-CAfile>
Do not load the trusted CA certificates from the default file location
=item B<-no-CApath>
Do not load the trusted CA certificates from the default directory location
=item B<-attime>, B<-check_ss_sig>, B<-crl_check>, B<-crl_check_all>,
B<-explicit_policy>, B<-extended_crl>, B<-ignore_critical>, B<-inhibit_any>,
B<-inhibit_map>, B<-no_alt_chains>, B<-no_check_time>, B<-partial_chain>, B<-policy>,
B<-policy_check>, B<-policy_print>, B<-purpose>, B<-suiteB_128>,
B<-suiteB_128_only>, B<-suiteB_192>, B<-trusted_first>, B<-use_deltas>,
B<-auth_level>, B<-verify_depth>, B<-verify_email>, B<-verify_hostname>,
B<-verify_ip>, B<-verify_name>, B<-x509_strict>
Set different certificate verification options.
See L<verify(1)> manual page for details.
=item B<-verify_other file>
file containing additional certificates to search when attempting to locate
the OCSP response signing certificate. Some responders omit the actual signer's
certificate from the response: this option can be used to supply the necessary
certificate in such cases.
=item B<-trust_other>
the certificates specified by the B<-verify_other> option should be explicitly
trusted and no additional checks will be performed on them. This is useful
when the complete responder certificate chain is not available or trusting a
root CA is not appropriate.
=item B<-VAfile file>
file containing explicitly trusted responder certificates. Equivalent to the
B<-verify_other> and B<-trust_other> options.
=item B<-noverify>
don't attempt to verify the OCSP response signature or the nonce values. This
option will normally only be used for debugging since it disables all verification
of the responders certificate.
=item B<-no_intern>
ignore certificates contained in the OCSP response when searching for the
signers certificate. With this option the signers certificate must be specified
with either the B<-verify_other> or B<-VAfile> options.
=item B<-no_signature_verify>
don't check the signature on the OCSP response. Since this option tolerates invalid
signatures on OCSP responses it will normally only be used for testing purposes.
=item B<-no_cert_verify>
don't verify the OCSP response signers certificate at all. Since this option allows
the OCSP response to be signed by any certificate it should only be used for
testing purposes.
=item B<-no_chain>
do not use certificates in the response as additional untrusted CA
certificates.
=item B<-no_explicit>
do not explicitly trust the root CA if it is set to be trusted for OCSP signing.
=item B<-no_cert_checks>
don't perform any additional checks on the OCSP response signers certificate.
That is do not make any checks to see if the signers certificate is authorised
to provide the necessary status information: as a result this option should
only be used for testing purposes.
=item B<-validity_period nsec>, B<-status_age age>
these options specify the range of times, in seconds, which will be tolerated
in an OCSP response. Each certificate status response includes a B<notBefore>
time and an optional B<notAfter> time. The current time should fall between
these two values, but the interval between the two times may be only a few
seconds. In practice the OCSP responder and clients clocks may not be precisely
synchronised and so such a check may fail. To avoid this the
B<-validity_period> option can be used to specify an acceptable error range in
seconds, the default value is 5 minutes.
If the B<notAfter> time is omitted from a response then this means that new
status information is immediately available. In this case the age of the
B<notBefore> field is checked to see it is not older than B<age> seconds old.
By default this additional check is not performed.
=item B<-[digest]>
this option sets digest algorithm to use for certificate identification in the
OCSP request. Any digest supported by the OpenSSL B<dgst> command can be used.
The default is SHA-1. This option may be used multiple times to specify the
digest used by subsequent certificate identifiers.
=back
=head2 OCSP Server Options
=over 4
=item B<-index indexfile>
B<indexfile> is a text index file in B<ca> format containing certificate revocation
information.
If the B<index> option is specified the B<ocsp> utility is in responder mode, otherwise
it is in client mode. The request(s) the responder processes can be either specified on
the command line (using B<issuer> and B<serial> options), supplied in a file (using the
B<reqin> option) or via external OCSP clients (if B<port> or B<url> is specified).
If the B<index> option is present then the B<CA> and B<rsigner> options must also be
present.
=item B<-CA file>
CA certificate corresponding to the revocation information in B<indexfile>.
=item B<-rsigner file>
The certificate to sign OCSP responses with.
=item B<-rother file>
Additional certificates to include in the OCSP response.
=item B<-resp_no_certs>
Don't include any certificates in the OCSP response.
=item B<-resp_key_id>
Identify the signer certificate using the key ID, default is to use the subject name.
=item B<-rkey file>
The private key to sign OCSP responses with: if not present the file specified in the
B<rsigner> option is used.
=item B<-port portnum>
Port to listen for OCSP requests on. The port may also be specified using the B<url>
option.
=item B<-nrequest number>
The OCSP server will exit after receiving B<number> requests, default unlimited.
=item B<-nmin minutes>, B<-ndays days>
Number of minutes or days when fresh revocation information is available: used in the
B<nextUpdate> field. If neither option is present then the B<nextUpdate> field
is omitted meaning fresh revocation information is immediately available.
=back
=head1 OCSP Response verification.
OCSP Response follows the rules specified in RFC2560.
Initially the OCSP responder certificate is located and the signature on
the OCSP request checked using the responder certificate's public key.
Then a normal certificate verify is performed on the OCSP responder certificate
building up a certificate chain in the process. The locations of the trusted
certificates used to build the chain can be specified by the B<CAfile>
and B<CApath> options or they will be looked for in the standard OpenSSL
certificates directory.
If the initial verify fails then the OCSP verify process halts with an
error.
Otherwise the issuing CA certificate in the request is compared to the OCSP
responder certificate: if there is a match then the OCSP verify succeeds.
Otherwise the OCSP responder certificate's CA is checked against the issuing
CA certificate in the request. If there is a match and the OCSPSigning
extended key usage is present in the OCSP responder certificate then the
OCSP verify succeeds.
Otherwise, if B<-no_explicit> is B<not> set the root CA of the OCSP responders
CA is checked to see if it is trusted for OCSP signing. If it is the OCSP
verify succeeds.
If none of these checks is successful then the OCSP verify fails.
What this effectively means if that if the OCSP responder certificate is
authorised directly by the CA it is issuing revocation information about
(and it is correctly configured) then verification will succeed.
If the OCSP responder is a "global responder" which can give details about
multiple CAs and has its own separate certificate chain then its root
CA can be trusted for OCSP signing. For example:
openssl x509 -in ocspCA.pem -addtrust OCSPSigning -out trustedCA.pem
Alternatively the responder certificate itself can be explicitly trusted
with the B<-VAfile> option.
=head1 NOTES
As noted, most of the verify options are for testing or debugging purposes.
Normally only the B<-CApath>, B<-CAfile> and (if the responder is a 'global
VA') B<-VAfile> options need to be used.
The OCSP server is only useful for test and demonstration purposes: it is
not really usable as a full OCSP responder. It contains only a very
simple HTTP request handling and can only handle the POST form of OCSP
queries. It also handles requests serially meaning it cannot respond to
new requests until it has processed the current one. The text index file
format of revocation is also inefficient for large quantities of revocation
data.
It is possible to run the B<ocsp> application in responder mode via a CGI
script using the B<reqin> and B<respout> options.
=head1 EXAMPLES
Create an OCSP request and write it to a file:
openssl ocsp -issuer issuer.pem -cert c1.pem -cert c2.pem -reqout req.der
Send a query to an OCSP responder with URL http://ocsp.myhost.com/ save the
response to a file, print it out in text form, and verify the response:
openssl ocsp -issuer issuer.pem -cert c1.pem -cert c2.pem \
-url http://ocsp.myhost.com/ -resp_text -respout resp.der
Read in an OCSP response and print out text form:
openssl ocsp -respin resp.der -text -noverify
OCSP server on port 8888 using a standard B<ca> configuration, and a separate
responder certificate. All requests and responses are printed to a file.
openssl ocsp -index demoCA/index.txt -port 8888 -rsigner rcert.pem -CA demoCA/cacert.pem
-text -out log.txt
As above but exit after processing one request:
openssl ocsp -index demoCA/index.txt -port 8888 -rsigner rcert.pem -CA demoCA/cacert.pem
-nrequest 1
Query status information using an internally generated request:
openssl ocsp -index demoCA/index.txt -rsigner rcert.pem -CA demoCA/cacert.pem
-issuer demoCA/cacert.pem -serial 1
Query status information using request read from a file, and write the response
to a second file.
openssl ocsp -index demoCA/index.txt -rsigner rcert.pem -CA demoCA/cacert.pem
-reqin req.der -respout resp.der
=head1 HISTORY
The -no_alt_chains options was first added to OpenSSL 1.1.0.
=head1 COPYRIGHT
Copyright 2001-2016 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| GaloisInc/hacrypto | src/C/openssl/openssl-1.1.0b/doc/apps/ocsp.pod | Perl | bsd-3-clause | 15,185 |
#! /usr/bin/env perl
foreach $name (@ARGV) {
print "Process full class on $name\n";
my $new = $name . ".new";
my $line = "";
open(OLD,"<$name") || die "Can not open $name:$!\n";
open(NEW,">$new") || die "Can not open $new:$!\n";
while(<OLD>) {
chomp();
$line = $_;
if($line =~ /^TESTCLASS\((.*),quick\)$/) {
#print "Found quick test $1\n";
print NEW "$line\n";
print NEW "TESTCLASS($1,full)\n";
} elsif($line =~ /^TESTCLASS\((.*),long\)$/) {
#print "Found long test $1\n";
print NEW "$line\n";
print NEW "TESTCLASS($1,full)\n";
} else {
#print "Keep this $line\n";
print NEW "$line\n";
}
}
close(OLD);
close(NEW);
system("mv $name $name.old");
system("mv $name.new $name");
}
| djw8605/htcondor | src/condor_tests/x_addfullclass.pl | Perl | apache-2.0 | 725 |
package CJDNS;
use Bencode qw(bencode bdecode);
use Carp qw/croak/;
use Digest::SHA qw(sha256_hex);
use IO::Socket;
# buffer size for reading from teh sawkets.
use constant BUFFER_SIZE => 8192;
our @ISA = qw();
our $VERSION = '0.01';
# turn on autoflush for this class.
our $| = 1;
sub new {
my ($class, $addr, $port, $password) = @_;
my $self = bless({
connection => "$addr:$port",
password => $password,
}, $class);
$self->{s} = IO::Socket::INET->new(
PeerAddr => $addr,
PeerPort => $port,
Proto => 'udp',
Type => SOCK_DGRAM
);
unless ($self->_ping) {
die "Can't ping cjdns admin interface at udp://$addr:$port\n";
}
$self->_make_methods;
return $self;
}
sub _make_methods {
my ($self) = @_;
my $s = $self->s;
my $availableFunctions;
my $page = 0;
while (1) {
my $to_decode;
print $s "d1:q24:Admin_availableFunctions4:argsd4:pagei$page" . "eee";
# grab the data and rock it out!
recv($s, $to_decode, BUFFER_SIZE, undef);
my $decoded = bdecode($to_decode);
# copy the hash!
foreach my $key (keys %{$decoded->{availableFunctions}}) {
$availableFunctions->{$key} = $decoded->{availableFunctions}->{$key};
}
last unless exists $decoded->{more};
# get the next page.
$page++;
}
# first let's start by loading them as named into the cjdns namespace.
foreach my $method_name (keys %$availableFunctions) {
my $prototype = $availableFunctions->{$method_name};
$self->{capabilities}->{$method_name} = $prototype;
# This is the code that actually calls the function!
my $method = sub {
my ($self, %args) = @_;
my $s = $self->s;
my $to_decode;
print $s "d1:q6:cookiee";
recv($s, $to_decode, BUFFER_SIZE, undef);
my $dec = bdecode($to_decode);
my $cookie = $dec->{cookie};
my $req = {
q => 'auth',
aq => $method_name,
hash => sha256_hex($self->{password} . $cookie),
cookie => " $cookie",
args => \%args,
};
# replace $req->{hash} with a hash of the bencoded request.
my $req_benc = bencode($req);
$req->{hash} = sha256_hex($req_benc);
# then re-encode thusly:
$req_benc = bencode($req);
print $s $req_benc;
my $to_decode;
recv($s, $to_decode, BUFFER_SIZE, undef);
my $dec = bdecode($to_decode);
if (ref($dec)) {
# { error: "none" } is a success case
if ($dec->{error} && $dec->{error} ne 'none') {
croak "[error] CJDNS method '$method_name': $dec->{error}";
}
}
return $dec;
};
# and now it's a method!
my $full_name = "CJDNS::$method_name";
*{$full_name} = $method;
}
}
sub capabilities {
my ($self) = @_;
my $return = "Cjdns Administration Protocol Capabilities\n";
$return .= "------------------------------------------\n";
foreach my $func (keys %{$self->{capabilities}}) {
$return .= " $func\n";
foreach my $attr (keys %{$self->{capabilities}->{$func}}) {
$return .= " + $attr: $self->{capabilities}->{$func}->{$attr}->{type} ";
if ($self->{capabilities}->{$func}->{$attr}->{required}) {
$return .= "[required]";
}
$return .= "\n";
}
$return .= "\n";
}
return $return;
}
sub _ping {
my ($self) = @_;
my $s = $self->s;
my $data;
print $s "d1:q4:pinge";
recv($s, $data, BUFFER_SIZE, undef);
if ($data eq "d1:q4:ponge") {
return 1;
} else {
return undef;
}
}
sub s {
my ($self) = @_;
return $self->{s};
}
1;
__END__
=head1 NAME
Cjdns - Perl interface to the Cjdns Administration Interface
=head1 SYNOPSIS
use CJDNS;
my $cjdns = CJDNS->new('localhost', '12345', 'abc123');
printf("Cjdns' routing table is using %d bytes of memory!\n", $cjdns->memory->{bytes});
=head1 DESCRIPTION
Perl interface to the cjdns Administration system
=head1 SEE ALSO
https://github.com/cjdelisle/cjdns
=head1 AUTHOR
Michael Gregorowicz, E<lt>mikei@mg2.orgE<gt>
=head1 COPYRIGHT AND LICENSE
Copyright (C) 2012 by Michael Gregorowicz
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself, either Perl version 5.14.2 or,
at your option, any later version of Perl 5 you may have available.
=cut
| krattai/noo-ebs | ref_code/cjdns/contrib/perl/CJDNS/lib/CJDNS.pm | Perl | bsd-2-clause | 4,741 |
%query: f(i,o).
/* due to Naomi Lindenstrauss */
less(0,s(0)).
less(s(X),s(Y)) :- less(X,Y).
less(X,s(Y)) :- less(X,Y).
add(0,0,0).
add(s(X),Y,s(N)) :- add(X,Y,N).
add(X,s(Y),s(N)) :- add(X,Y,N).
f(X,N) :- less(s(s(0)),X),add(N,s(0),X).
f(X,N) :- less(X,s(s(s(0)))),add(X,s(s(0)),S),f(S,N1),f(N1,N).
| ComputationWithBoundedResources/ara-inference | doc/tpdb_trs/Logic_Programming/SGST06/baby91.pl | Perl | mit | 305 |
# RoboForm 6.xx HTML export converter
#
# Copyright 2015 Mike Cappella (mike@cappella.us)
package Converters::Roboform 1.01;
our @ISA = qw(Exporter);
our @EXPORT = qw(do_init do_import do_export);
our @EXPORT_OK = qw();
use v5.14;
use utf8;
use strict;
use warnings;
#use diagnostics;
binmode STDOUT, ":utf8";
binmode STDERR, ":utf8";
use Utils::PIF;
use Utils::Utils;
use Utils::Normalize;
use HTML::Entities;
# Roboform does not use standardized Username and Password fields - rather, it uses the names of the
# fields in the HTML form data for the web page login, and those strings get stored in the exported
# HTML. The tables below are used to define the regular expression patterns to match Usernames and
# Passwords. If you find you have unmatched Usernames or Passwords in your Login entries, you can add
# new entries to the appropriate tables below. You'll notice any such unmatched # key / value pairs
# in the Notes section of the entry, or as custom fields if you are using the option --addfields.
#
# Add new patterns to the relevant tables below - these are regular expressions, and will automatically
# be created as fully anchored patterns (beginning and end) of the string. Place more specific entries
# at the top of the tables (to be tried first) and less specific entries later. This helps provide the
# best match opportunities. The patterns below have been developed empirically over time, as reported
# by users.
#
# Note: the pattern matcher will reject username tokens with a variant of 'password' in them, to avoid
# the situation where, for example, 'login_password' when matched as a username (the 'login' term could
# match a username, but 'password' suggests its a password).
# Username and Password regular expression patterns tables.
my %userpass_REs = (
username_reject => [
qr/autologin/i,
qr/passwo?r?d/i,
],
username => [
qr/^(?:user|login)(?:[-_\s]*(?:name|id))$/i,
qr/^webusername$/i,
qr/.*_login_username$/i,
qr/^login(?:name|user)[-_]?id$/i,
qr/^user_?name_\w+/i,
qr/^\w+_?user_?name$/i,
qr/^user$/i,
qr/^txtuserid$/i,
qr/^txtuser$/i,
qr/^login$/i,
qr/^[-_\w]*log[oi]nid$/i,
qr/^loginuser$/i,
qr/^login-user-name$/i,
qr/^\w+[-_]?login$/i,
qr/^\w+-login-id$/i,
qr/^loginInput$/i,
qr/^user-field$/i,
qr/^\w+_usernamefrm$/i,
qr/^usr_name_home$/i,
qr/^login-(appleid|account)$/i,
qr/^appleid$/i,
qr/^login_field$/i,
qr/^onlineid\d$/i,
qr/^txtloginname$/i,
qr/^(signinemailaddress|emailaddress)$/i,
qr/^(?:user|login)[-_]?email$/i,
qr/^login-email-address$/i,
qr/^inputemailhandle$/i,
qr/^adslaccount$/i,
qr/^\w*email[-_](?:\w+)$/i,
qr/^[-_\w-]*email(?:[-_]?id)?$/i,
qr/^e?mail$/i,
qr/^pwuser$/i,
qr/^uid$/i,
qr/^\w+_uid$/i,
qr/^loginquestion\w*$/i,
qr/^clientpin$/i,
qr/^\w*accountname$/i,
qr/^[-_\w]*userid[-_\w]*$/i,
qr/^wireless_num$/i,
qr/^membernametext$/i,
qr/^\w*username[-_\w]*$/i,
qr/^txtlogin$/i,
qr/^name$/i,
qr/^usernm$/i,
qr/^log$/i,
qr/^\w+_uname$/i,
qr/^[-_\w]*login-name[-_\w]*$/i,
qr/^address$/i,
qr/^memberid$/i,
qr/^[-_\w]*accountnumber$/i,
qr/^inusid$/i,
qr/^handle$/i,
qr/^\w*identifier$/i,
qr/use?r_name/i,
qr/loginemail/i,
qr/signinid/i,
qr/logincontrol/i,
qr/^(?:usr|id|ns|ui|u)$/i,
],
password_reject => [
qr/_hint$/i,
],
password => [
qr/^passwo?r?d$/i,
qr/.*login[-_]password$/i,
qr/^loginpassword$/i,
qr/^webupassword$/i,
qr/^loginpwd?$/i,
qr/^passwd_login?$/i,
qr/^userpwd$/i,
qr/^pwpwd$/i,
qr/^txtpwd$/i,
qr/^\w+-logon-password$/i,
qr/^passwordInput$/i,
qr/^(?:user|login)[-_]?pass$/i,
qr/^\w*loginpwd$/i,
qr/^(?:pass|txtpass|pword|pswd|pwd)$/i,
qr/^(?:log|u)pass$/i,
qr/password[_]?\w+/i,
qr/^[-_\w-]*password$/i,
qr/^password-field$/i,
qr/^\w+_password$/i,
qr/^usr_password_home$/i,
qr/^\w+_passwordfrm$/i,
qr/^passcode\d$/i,
qr/^adslpw$/i,
qr/^clientpw$/i,
qr/^\w*accountpw$/i,
qr/^[-_\w]*password[-_\w]*$/i,
qr/^\w+_psd$/i,
qr/^inpswd$/i,
qr/^\w*userpwd$/i,
qr/^[-_\w]*txtpin$/i,
qr/pass_word/i,
qr/^(?:pw|ps|up|p)$/i,
],
);
=cut
=cut
# Language-specificmy strings, as found in the HTML
my %category_strings = (
logins => [
'Rob­oForm Logins List', # English
'Liste der Rob­oForm-An­meldungen', # German
'Rob­oForm - Loginy', # Polish
],
safenotes => [
'Rob­oForm Saf­enotes List', # English
],
identities => [
'Rob­oForm Ide­ntities List', # English
],
);
=cut
consider how to handle these.
_fieldformat$1#j_username##textfield-1015-inputEl:
_fieldformat$1#j_password##textfield-1016-inputEl:
_fieldformat$1#loginfmt##i0116:
_fieldformat$1#passwd##i0118:
_fieldformat$1#login##i0116:
_fieldformat$1#passwd##i0118:
_fieldformat$1#email##field:
_fieldformat$1#password##field:
_fieldformat$1#loginname##text1:
_fieldformat$1#loginpassword##text2:
=cut
sub userpass_matcher {
my ($kvpair, $key) = @_;
my $key_reject = $key . '_reject';
if ($key eq 'password' or $key eq 'username') {
my $token = $kvpair->[0];
# when the HTML contains fields named such as loginform[usernameemail], pull out the inner component
if ($token =~ /^[^[]+\[([^]]+)\]$/) {
$token = $1;
}
# otherwise, split the components and use the last on
else {
$token = (split /[#.:\$]/, $token)[-1];
}
debug " attempting to match field token '$token' as a $key";
if ($token =~ /^\[[^\]]+\]$/) {
debug " rejecting bracketed token '$token' as a $key";
return ();
}
if ($kvpair->[1] eq '*') {
debug " rejecting token '$token' as a $key because its value is '*'";
return ();
}
if ($kvpair->[1] =~ /^([01])$/) {
debug " rejecting token '$token' as a $key because its value is '$1'";
return ();
}
# rejection tests first
for (my $i = 0; $i < @{$userpass_REs{$key_reject}}; $i++) {
debug " $i: pattern reject test: $userpass_REs{$key_reject}[$i]";
if ($token =~ $userpass_REs{$key_reject}[$i]) {
debug "\t\t pattern reject $i matched";
debug " rejecting token '$token' as a $key because its in the reject list";
return ();
}
}
for (my $i = 0; $i < @{$userpass_REs{$key}}; $i++) {
debug " $i: pattern test: $userpass_REs{$key}[$i]";
if ($token =~ $userpass_REs{$key}[$i]) {
debug "\t\t pattern $i matched";
return ($token, $i + 1);
}
}
debug " NO match on field token '$token' as a $key";
}
return ();
}
my %card_field_specs = (
address => { textname => 'Address', fields => [
[ 'address1', 0, qr/^Address Line 1$/, ],
[ 'address2', 0, qr/^Address Line 2$/, ],
[ 'city', 0, qr/^City$/, ],
[ 'state_zip', 0, qr/^State Zip$/, ],
[ 'county', 0, qr/^County$/, ],
[ 'country', 0, qr/^Country$/, ],
]},
authentication => { textname => 'Authentication', type_out => 'note', fields => [
[ 'favuserid', 0, qr/^Favorite User ID$/, { custfield => [ $Utils::PIF::sn_main, $Utils::PIF::k_string, 'favorite user id' ] } ],
[ 'favpassword', 0, qr/^Favorite Password$/, { custfield => [ $Utils::PIF::sn_main, $Utils::PIF::k_concealed, 'favorite password', 'generate'=>'off' ] } ],
[ 'password_q', 0, qr/^Password Question$/, { custfield => [ $Utils::PIF::sn_main, $Utils::PIF::k_string, 'password question' ] } ],
[ 'password_a', 0, qr/^Password Answer$/, { custfield => [ $Utils::PIF::sn_main, $Utils::PIF::k_concealed, 'password answer', 'generate'=>'off' ] } ],
]},
bankacct => { textname => 'Bank Account', fields => [
[ 'bankName', 0, qr/^Bank Name$/, ],
[ 'accountNo', 0, qr/^Account Number$/, ],
[ 'accountType', 0, qr/^Account Type$/, ],
[ 'routingNo', 0, qr/^Routing Number$/, ],
[ '_branch', 0, qr/^Bank Branch$/, { custfield => [ $Utils::PIF::sn_branchInfo, $Utils::PIF::k_string, 'branch' ] } ],
[ 'branchPhone', 0, qr/^Bank Phone$/, ],
[ 'branchAddress', 0, qr/^Bank Address$/, ],
[ 'swift', 0, qr/^SWIFT$/, ],
[ '_rate', 0, qr/^Interest Rate$/, { custfield => [ $Utils::PIF::sn_extra, $Utils::PIF::k_string, 'interest rate' ] } ],
[ 'owner', 0, qr/^Account Owner$/, ],
[ 'telephonePin', 0, qr/^Bank PIN Code$/, ],
]},
business => { textname => 'Business', fields => [
[ 'name', 0, qr/^Company Name$/, ],
[ 'department', 0, qr/^Department$/, ],
[ 'phone', 0, qr/^Toll Free Phone$/, ],
[ 'website', 0, qr/^Web Site$/, ],
[ 'biztype', 0, qr/^Business Type$/, ],
[ 'employerid', 0, qr/^Employer Id$/, ],
[ 'stocksym', 0, qr/^Stock Symbol$/, ],
]},
car => { textname => 'Car', type_out => 'note', fields => [
[ 'plate', 0, qr/^Plate$/, ],
[ 'make', 0, qr/^Make$/, ],
[ 'model', 0, qr/^Model$/, ],
[ 'year', 0, qr/^Year$/, ],
[ 'vin', 0, qr/^VIN$/, ],
]},
creditcard => { textname => 'Credit Card', fields => [
[ 'type', 0, qr/^Card Type$/, ],
[ 'ccnum', 0, qr/^Card Number$/, ],
[ 'cvv', 0, qr/^Validation Code$/, ],
[ '_expiry', 0, qr/^Card Expires$/, ],
[ '_validFrom', 0, qr/^Valid From$/, ],
[ 'cardholder', 0, qr/^Card User Name$/, ],
[ 'bank', 0, qr/^Issuing Bank$/, ],
[ 'phoneTollFree', 0, qr/^Cust Svc Phone $/, ], # note trailing space
[ 'phoneIntl', 0, qr/^Intl Svc Phone $/, ], # note trailing space
[ 'pin', 0, qr/^PIN Number$/, ],
[ 'creditLimit', 0, qr/^Credit Limit$/, ],
[ 'interest', 0, qr/^Interest Rate$/, ],
]},
custom => { textname => 'Custom', type_out => 'note', fields => [
]},
person => { textname => 'Person', type_out => 'identity', fields => [
[ '_title', 0, qr/^Title$/, ],
[ '_name', 0, qr/^Name$/, ],
[ 'jobtitle', 0, qr/^Job Title$/, ],
[ 'defphone', 0, qr/^Phone$/, ],
[ 'homephone', 0, qr/^Home Tel$/, ],
[ 'busphone', 0, qr/^Work Tel$/, ],
[ 'cellphone', 0, qr/^Cell Tel$/, ],
[ '_pager', 0, qr/^Pager$/, ],
[ '_fax', 0, qr/^Fax$/, ],
[ 'email', 0, qr/^Email$/, ],
[ 'yahoo', 0, qr/^Yahoo ID$/, ],
[ 'msn', 0, qr/^MSN ID$/, ],
[ 'aim', 0, qr/^AOL Name$/, ],
[ 'icq', 0, qr/^ICQ No$/, ],
[ 'skype', 0, qr/^Skype ID$/, ],
[ 'sex', 0, qr/^Sex$/, ],
[ '_age', 0, qr/^Age$/, ],
[ '_birthdate', 0, qr/^Birth Date$/, ],
[ '_birthplace', 0, qr/^Birth Place$/, ],
[ '_income', 0, qr/^Income$/, ],
[ 'number', 0, qr/^Soc Sec No$/, { type_out => 'socialsecurity' } ],
[ 'number', 0, qr/^Driver License$/, { type_out => 'driverslicense' } ], # see DL FIXUP
[ 'state', 0, qr/^Driver License State$/, { type_out => 'driverslicense' } ], # see DL FIXUP
[ '_expiry_date', 0, qr/^License Expires$/, { type_out => 'driverslicense' } ], # see DL FIXUP
]},
login => { textname => undef, fields => [
[ 'url', 1, qr/url/i, ],
[ 'password', 1, \&userpass_matcher, ],
[ 'username', 1, \&userpass_matcher, ],
]},
passport => { textname => 'Passport', fields => [
[ 'number', 0, qr/^Passport Number$/, ],
[ '_issue_date', 0, qr/^Passport Issue Date$/, ],
[ '_expiry_date', 0, qr/^Passport Expiration Date$/, ],
]},
);
$DB::single = 1; # triggers breakpoint when debugging
sub do_init {
return {
'specs' => \%card_field_specs,
'imptypes' => undef,
'opts' => [ [ q{ --windows # export file is from Windows Roboform },
'windows' ],
],
};
}
sub clean {
local $_ = shift;
return undef if not defined $_;
s/<WBR>//gi;
s/<BR>/\n/gi;
s/­//g;
s/(?: )+$//g; # rf mac sometimes adds trailing nbsp
s/(?: )+/ /g; # rf mac inserts multiple nbsp chars as spaces
return decode_entities $_;
}
my %rfinfo;
my %identity_title_re = (
mac => qr#\A\s*<TR align=left><TD class="caption" colspan=\d+>(.+?)</TD></TR>#ms,
winv6 => qr#\A<TR align=left>\s*<TD class=caption(?: colSpan=\d+)?>(.+?)<\/TD><\/TR>\s*#ms,
winv7 => qr#\A<DIV class=caption style="WIDTH: 100%; WORD-BREAK: break-all; CLEAR: both">(.*?)</DIV>\s*#ms,
);
my %entry_re = (
mac => qr#\A.*?<TABLE width="100%">\s*(.*?)\s*<\/TABLE>\s*</TD></TR>\s*#ms,
winv6 => qr#\A.*?<TABLE width="100%">\s*(.*?)\s*<\/TABLE>\s*</TD>(?:</TR>)?\s*#ms,
winv7 => qr#\A.*?<DIV class="floatdiv orph">(.+?)</TABLE>(?:</DIV>){1,2}\s*#ms,
);
my %url_re = (
mac => qr#^.*?<TR align=left><TD class="subcaption" colspan=\d+>(.+?)</TD></TR>#ms,
winv6 => qr#^.*?<TR align=left>\s*<TD class=subcaption colSpan=\d+>(.+?)</TD></TR>\s*#ms,
winv7 => qr#.*?<TD class=subcaption style="WORD-BREAK: break-all; COLOR: gray" colSpan=\d+>(.*?)</TD></TR>#msi,
);
my %re_fvpair = (
mac => qr#<TR><TD class=field align=left valign=top width="40%">(.+?)</TD><TD></TD><TD class=wordbreakfield align=left valign=top width="55%">(.+?)</TD></TR>\s*#,
winv6 => qr#<TR>\s*<TD class=field[^>]*>(.*?)</TD>.*?<TD class=wordbreakfield [^>]*>(.*?)</TD></TR>\s*#msi,
winv7 => qr#<TR width="100%">\s*<TD width=.*? class=field[^>]*>(.*?):?</TD>.*?<TD width=.*? class=field[^>]*>(.*?)</TD></TR>\s*#msi,
);
my %re_entry_type = (
mac => qr#\A<TR align=left><TD class="subcaption" colspan=3>(.+?)</TD></TR>\s*#ms,
winv6 => qr#\A.*?<TR align=left>\s*<TD class=subcaption colSpan=\d+>(.+?)</TD></TR>\s*#ms,
winv7 => qr#\A.*?<TR align=left width="100%">\s*<TD class=idsubcaption style="WORD-BREAK: break-all" colSpan=3>(.+?)</TD></TR>\s*#ms,
);
# Patterns used to detect the export type, per-plaform, per-Roboform release
my %type_pats = (
winv7 => {
logins => '<P style="FONT-SIZE:.*; TEXT-ALIGN: center">__TYPE_STRING__</P>',
safenotes => '<P style="FONT-SIZE:.*; TEXT-ALIGN: center">__TYPE_STRING__</P>',
identities => '<P style="FONT-SIZE:.*; TEXT-ALIGN: center">__TYPE_STRING__</P>\s*<DIV class=preline>\s*',
},
winv6 => {
logins => qr#<HEAD><TITLE>RoboForm Passcards List#,
safenotes => qr#<HEAD><TITLE>RoboForm Safenotes List#,
identities => qr#<HTML><HEAD><TITLE>RoboForm Identities List.*?<TBODY>\s*#ms,
},
mac => {
safenotes => sub { ! grep(/class="subcaption"/, $_[0]) },
logins => sub { m#<TR align=left><TD class="caption" colspan=3>.+?</TD></TR>\s+<TR align=left><TD class="subcaption" colspan=3>.+?</TD></TR>#m },
identities => sub { m#^<TR align=left><TD class="caption" colspan=1>.*?</TD></TR>\s+<TR><TD style="border-left: 0 solid darkgray; border-right-width: 0;" valign=top align=left width="100%">\s+<TABLE width="100%">\s+<TR align=left><TD class="subcaption" colspan=3>Person#m },
}
);
sub do_import {
my ($files, $imptypes) = @_;
my %Cards;
my $n = 1;
my $entry_re;
# for debugging Windows exports on OS X
$^O = 'MSWin32' if exists $main::opts{'windows'};
for my $file (ref($files) eq 'ARRAY' ? @$files : $files) {
$_ = slurp_file($file, $^O eq 'MSWin32' ? 'UTF-16LE' : 'UTF-8');
s/^\x{FEFF}//; # remove BOM
get_export_file_info($_);
my $identity_name; # identity entry blocks are preceeded by the identity name
while (my $entry = get_next_entry(\$_)) {
my (%cmeta, @fieldlist);
my ($title, $label, $value);
my $identity_type;
if ($rfinfo{'type'} eq 'safenotes') {
my $notes;
($title, $notes) = get_notes($entry);
$title ||= 'Untitled';
if ($notes) {
debug "\tnotes => unfold_and_chop $notes";
$cmeta{'notes'} = $notes;
}
}
else {
if ($rfinfo{'type'} eq 'logins') {
$title = get_title($entry);
debug "\tfull title: $title";
if (my $url = get_url($entry)) {
debug "\tfield(url) => $url";
push @fieldlist, [ url => $url ];
}
if (my $notes = get_notes($entry)) {
debug "\tnotes => unfold_and_chop $notes";
$cmeta{'notes'} = $notes;
}
}
elsif ($rfinfo{'type'} eq 'identities') {
if (ref $entry eq 'SCALAR') {
$identity_name = $$entry;
debug "**** Identity items for ", $identity_name;
next;
}
my @a = split / - /, get_identity_entry_type($entry);
$identity_type = shift @a;
debug "\t**Identity subtype: ", $identity_type;
$title = myjoin ' - ', $identity_name, $identity_type, @a;
}
while (1) {
($label, $value) = get_fv_pair($entry);
last if !defined $label;
# Notes from login's print list on darwin
if ($rfinfo{'version'} ne 'winv7' and $label eq 'Note$') {
$cmeta{'notes'} = clean $value;
debug "\tnotes => ", unfold_and_chop $cmeta{'notes'};
next;
}
next if not defined $value or $value eq '';
my $fvref = undef;
if ($rfinfo{'type'} eq 'identities') {
$label =~ s/^"(.*)"$/$1/ if $identity_type eq 'Custom';
# DL FIXUPs
if ($identity_type eq 'Person') {
if ($label eq '') {
$label = 'License Expires'
}
elsif ($label eq 'Driver License') {
$fvref = do_dl_fixup($label,$value);
}
}
}
if ($fvref) {
for (keys %$fvref) {
debug "\tfield($_) => $fvref->{$_}";
push @fieldlist, [ $_ => $fvref->{$_} ];
}
}
else {
debug "\tfield($label) => $value";
push @fieldlist, [ $label => $value ];
}
}
}
if ($title) {
my @title = split $rfinfo{'pathsep_re'}, clean $title;
$cmeta{'title'} = pop @title;
debug "\ttitle => $cmeta{'title'}";
if (@title) {
$cmeta{'tags'} = join '::', @title;
$cmeta{'folder'} = [ @title ];
debug "\ttags => $cmeta{tags}";
}
}
my $itype = find_card_type(\@fieldlist, $identity_type);
# skip all types not specifically included in a supplied import types list
next if defined $imptypes and (! exists $imptypes->{$itype});
prioritize_fieldlist($itype, \@fieldlist) if $itype eq 'login';
my $normalized = normalize_card_data(\%card_field_specs, $itype, \@fieldlist, \%cmeta);
my $cardlist = explode_normalized($itype, $normalized);
for (keys %$cardlist) {
print_record($cardlist->{$_});
push @{$Cards{$_}}, $cardlist->{$_};
}
$n++;
}
}
summarize_import('item', $n - 1);
return \%Cards;
}
sub do_dl_fixup {
my ($l,$v) = @_;
# mac: CA N21828857Expires02/31/2019
# winv7: CA ­N21­828857
if ($v =~ /^(?:(?<state>.+?) )?(?<num>.+?)(?:Expires(?<expires>.+))?$/) {
my %dlhash;
$dlhash{'Driver License'} = $+{num};
$dlhash{'Driver License State'} = $+{state} if exists $+{state};
$dlhash{'License Expires'} = $+{expires} if exists $+{expires} and $+{expires} ne '//';
return \%dlhash;
}
return undef;
}
sub do_export {
add_custom_fields(\%card_field_specs);
create_pif_file(@_);
}
sub find_card_type {
my $fieldlist = shift;
my $identity_type = shift;
for my $type (keys %card_field_specs) {
# for identity sub types, match the textname
if ($identity_type) {
if (defined $card_field_specs{$type}{'textname'} and $identity_type eq $card_field_specs{$type}{'textname'}) {
debug "type detected as '$type'";
return $type;
}
next;
}
else {
for my $cfs (@{$card_field_specs{$type}{'fields'}}) {
for (@$fieldlist) {
if ($cfs->[CFS_TYPEHINT]) {
if (ref $cfs->[CFS_MATCHSTR] eq 'CODE') {
if (my @ret = $cfs->[CFS_MATCHSTR]->($_, $cfs->[0]) and $_->[1] ne '') {
debug "type detected as '$type' (pattern $ret[1], token='$ret[0]' key='$_->[0]')";
return $type;
}
}
elsif ($_->[0] =~ $cfs->[CFS_MATCHSTR]) {
debug "type detected as '$type' (key='$_->[0]')";
return $type;
}
}
}
}
}
}
debug "\t\ttype defaulting to 'note'";
return 'note';
}
# For 'login' types, need to munge the fieldlist to downgrade fields that would result in duplicate matches
# for a given key. For example, for the 'username' key, the HTML might contain form strings that result in
# both of the tokens 'username' and 'email', each of which would match the 'username' patterns.
# This will be done by modifying the field names of the lower priority entries such that they wont match in
# the normalize_card_data() routine.
#
sub prioritize_fieldlist {
my ($type, $fieldlist) = @_;
for my $cfs (@{$card_field_specs{$type}{'fields'}}) {
debug "\tprioritizing key '$cfs->[0]'";
# Add the cfs key and weight to the fieldlist entry. A weight of 0 is added for simple single-RE matches
for (@$fieldlist) {
if (ref $cfs->[CFS_MATCHSTR] eq 'CODE') {
if (my @ret = $cfs->[CFS_MATCHSTR]->($_, $cfs->[0]) and $_->[1] ne '') {
debug "\tCODE match (pattern \#$ret[1] token='$ret[0]', key='$_->[0]')";
push @$_, ($cfs->[0], $ret[1]); # add cfs key and weight
next;
}
}
elsif ($_->[0] =~ $cfs->[CFS_MATCHSTR]) {
debug "\tRE match (pattern $cfs->[CFS_MATCHSTR], key='$_->[0]')";
push @$_, ($cfs->[0], 0); # add cfs key and weight=0
next;
}
}
}
my (%field_key_done, @newlist);
for (@$fieldlist) {
# fieldlist entries that don't match - nothing to do
if (not defined $_->[2]) {
push @newlist, $_;
next;
}
# skip already processed cfs keys
next if exists $field_key_done{$_->[2]};
# look for all fieldlist entries with the current key
my $key = $_->[2];
my @found = grep { defined $_->[2] and $_->[2] eq $key } @$fieldlist;
# single entries just go to the new list
if (@found == 1) {
push @newlist, @found;
}
# adjust the field name of 1..N-th entries so they will not match later in normalize_card_data()
else {
@found = sort {$a->[3] <=> $b->[3] } @found;
for (my $i = 1; $i < @found; $i++) {
$found[$i][0] = '[' . $found[$i][0] . ']';
}
push @newlist, @found;
}
$field_key_done{$_->[2]}++; # done with this cfs key
}
# Done with the original fieldlist, now create a new one with the (possibly modified) field, value pairs
@$fieldlist = ();
push @$fieldlist, [ $_->[0], $_->[1] ] for @newlist;
}
# Localize the REs used to detect the export type. See the %category_strings table above. Only valid currently for winv7.
sub patch_langauage_strings {
my $vers = $rfinfo{'version'};
return unless $vers eq 'winv7';
for my $type (keys %{$type_pats{$vers}}) {
$type_pats{$vers}{$type} =~ s/__TYPE_STRING__/join '|', map { "(?:$_)" } @{$category_strings{$type}}/e;
$type_pats{$vers}{$type} = qr/$type_pats{$vers}{$type}/;
}
}
sub get_export_file_info {
if ($_[0] =~ /^<html>/) { $rfinfo{'version'} = 'mac'; }
elsif ($_[0] =~ /^<HTML>/) { $rfinfo{'version'} = 'winv6'; }
elsif ($_[0] =~ s/^<!DOCTYPE html>.*?<BODY oncontextmenu="return false">\s*//ms) { $rfinfo{'version'} = 'winv7'; }
else {
bail 'Unexpected RoboForm print list format; please report your platform and version of RoboForm';
}
if ($rfinfo{'version'} eq 'winv6') {
#$_[0] =~ s/^.*?<TABLE width="100%">//ms;
$rfinfo{'pathsep_re'} = qr/\\/; # Win v6 uses \ as the folder separator
}
else {
$_[0] =~ s/^.*?<body>//ms;
$rfinfo{'pathsep_re'} = qr/\//; # OS X and Win v7 use / as the folder separator
patch_langauage_strings();
}
for my $key (keys %{$type_pats{$rfinfo{'version'}}}) {
if (ref $type_pats{$rfinfo{'version'}}{$key} eq 'CODE') {
$rfinfo{'type'} = $key if &{$type_pats{$rfinfo{'version'}}{$key}}($_[0]);
}
elsif ($_[0] =~ s#$type_pats{$rfinfo{'version'}}{$key}##) {
$rfinfo{'type'} = $key;
last;
}
}
exists $rfinfo{'type'} or
bail "Failed to detect file's type from Roboform $rfinfo{'version'} export file";
debug "RoboForm export version: $rfinfo{'version'}; type: $rfinfo{'type'}";
}
sub get_next_entry {
my $sref = shift;
my $ret;
if ($rfinfo{'type'} eq 'identities' and $$sref =~ s#$identity_title_re{$rfinfo{'version'}}##) {
return \(my $ref = clean $1); # return a REF to indicate this is an identity title
}
elsif ($$sref =~ s#$entry_re{$rfinfo{'version'}}##) {
$ret = $1;
}
return $ret if defined $ret;
return undef;
}
sub get_title {
if ($rfinfo{'version'} eq 'mac') {
if ($_[0] =~ s#^.*?<TR align=left><TD class="caption" colspan=\d+>(.+?)</TD></TR>\s*##ms) {
return clean $1;
}
}
else {
if ($rfinfo{'type'} eq 'logins') {
if ($rfinfo{'version'} eq 'winv6') {
if ($_[0] =~ s#\A.*?<TR align=left>\s*<TD class=caption colSpan=\d+>(.+?)</TD></TR>\s*##ms) {
return $1;
}
}
else {
if ($_[0] =~ s#^.*<TD style="WORD-BREAK: break-all"><SPAN class=caption style="VERTICAL-ALIGN: middle; WORD-BREAK: break-all">(.*?)</SPAN></TD></TR>\s*##ms) {
return $1;
}
}
}
}
return undef;
}
sub get_url {
my $ret;
if ($_[0] =~ s#$url_re{$rfinfo{'version'}}##) {
return lc clean $1;
}
return undef
}
sub get_fv_pair {
if ($_[0] =~ s#$re_fvpair{$rfinfo{'version'}}##) {
return (clean($1), clean($2));
}
return (undef, undef);
}
sub get_notes {
if ($rfinfo{'version'} eq 'mac') {
# bookmarks
if ($rfinfo{'type'} eq 'logins' and $_[0] =~ s#<TR><TD class=wordbreakfield align=left valign=top width="100%">(.+?)</TD></TR>##m) {
return clean $1;
}
# safenotes: pull title and note
if ($_[0] =~ s#<TR align=left><TD class="caption" colspan=\d+>(.*?)</TD></TR>\s*<TR><TD class=wordbreakfield [^>]+>(.*?)</TD></TR>##ms) {
return (clean($1), clean($2));
}
}
elsif ($rfinfo{'version'} eq 'winv7') {
if ($rfinfo{'type'} eq 'logins') {
# passcards/logins
if ($_[0] =~ s#<TR align=left width="100%">\s*<TD class=field[^>]*>Note:</TD>.*?<TD class=field[^>]*>(.*?)</TD></TR>\s*##msi) {
return clean $1;
}
# bookmarks
elsif ($_[0] =~ s#\s*<TD width="100%" align=left class=field vAlign=top>(.*?)</TD></TR></TBODY></TABLE></TD></TR></TBODY>##ms) {
return clean $1;
}
}
elsif ($rfinfo{'type'} eq 'safenotes') {
$_[0] =~ s#^.*<DIV class=caption style="WIDTH: \d+%; WORD-BREAK: break-all; CLEAR: both">(.*?)</DIV></TD></TR>.*<TD width="\d+%".*class=field[^>]*>(.*?)</TD></TR>##ms;
return (clean($1), clean($2));
}
}
elsif ($rfinfo{'version'} eq 'winv6') {
if ($rfinfo{'type'} eq 'logins') {
# bookmarks
if ($_[0] =~ s#^<TD class=wordbreakfield vAlign=top width="100%" align=left>(.+?)</TD></TR></TBODY>##m) {
return clean $1;
}
}
elsif ($rfinfo{'type'} eq 'safenotes') {
$_[0] =~ s#^.*?<TBODY>\s*<TR align=left>\s*<TD class=caption colSpan=\d+>(.+?)</TD></TR>\s*<TR>\s*<TD class=wordbreakfield vAlign=top width="100%" align=left>(.+?)</TD></TR></TBODY>\s*##ms;
return (clean($1), clean($2));
}
}
return undef;
}
sub get_identity_entry_type {
if ($_[0] =~ s#$re_entry_type{$rfinfo{'version'}}##ms) {
return clean $1;
}
return undef;
}
1;
| mikecappella/onepassword-utilities | convert_to_1p4/Converters/Roboform.pm | Perl | mit | 27,055 |
package #
Date::Manip::Offset::off363;
# Copyright (c) 2008-2015 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Wed Nov 25 11:44:44 EST 2015
# Data version: tzdata2015g
# Code version: tzcode2015g
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.52';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '-06:00:00';
%Offset = (
0 => [
'america/chicago',
'america/indiana/knox',
'america/indiana/tell_city',
'america/menominee',
'america/winnipeg',
'america/rainy_river',
'america/belize',
'america/guatemala',
'america/costa_rica',
'america/el_salvador',
'america/tegucigalpa',
'america/cancun',
'america/matamoros',
'america/merida',
'america/monterrey',
'america/bahia_banderas',
'america/mexico_city',
'america/managua',
'america/rankin_inlet',
'america/regina',
'america/swift_current',
'pacific/easter',
'pacific/galapagos',
'america/north_dakota/center',
'america/north_dakota/new_salem',
'america/north_dakota/beulah',
'etc/gmt-6',
'f',
'america/indiana/petersburg',
'america/indiana/vincennes',
'america/indiana/winamac',
'america/resolute',
'america/cambridge_bay',
'america/kentucky/monticello',
'america/iqaluit',
'america/pangnirtung',
'america/chihuahua',
'america/ojinaga',
'america/indiana/marengo',
'america/kentucky/louisville',
'america/indiana/indianapolis',
'america/indiana/vevay',
'america/hermosillo',
'america/mazatlan',
'america/atikokan',
'america/detroit',
'america/thunder_bay',
],
1 => [
'america/denver',
'america/edmonton',
'america/boise',
'america/cambridge_bay',
'america/yellowknife',
'america/inuvik',
'america/mazatlan',
'america/chihuahua',
'america/ojinaga',
'america/north_dakota/beulah',
'america/bahia_banderas',
'america/north_dakota/new_salem',
'america/hermosillo',
'america/north_dakota/center',
'pacific/easter',
'america/phoenix',
'america/swift_current',
'america/regina',
],
);
1;
| jkb78/extrajnm | local/lib/perl5/Date/Manip/Offset/off363.pm | Perl | mit | 2,715 |
package Admin;
use strict;
use warnings;
use FileHandle;
use JSON;
use Data::Dumper;
use Scalar::Util qw(looks_like_number);
use List::MoreUtils 'firstidx';
my $json = JSON->new->convert_blessed;
my %command = (
dec => \&cmd_decr,
del => \&cmd_delete,
e => \&cmd_eval,
inc => \&cmd_incr,
games => \&cmd_games,
give => \&cmd_give,
keys => \&cmd_keys,
p => \&cmd_print,
players => \&cmd_players,
r => \&cmd_registers,
set => \&cmd_set,
use => \&cmd_use,
x => \&cmd_dump,
);
sub new {
my ( $pkg, $fh, $game ) = @_;
my $self = bless { fh => $fh, game => $game }, $pkg;
my @games = keys %{ $game->{game} };
if (@games) {
$self->{instance} = $games[0];
syswrite $fh, "using $self->{instance}\n";
}
syswrite $fh, "> ";
return $self;
}
sub get_game {
my $self = shift;
if ( !$self->{instance} && $self->{game}{game}{ $self->{insance} } ) {
syswrite $self->{fh}, "No game chosen\nGames available: ";
syswrite $self->{fh}, join( ", ", keys %{ $self->{game}{game} } );
syswrite $self->{fh}, "\n";
return;
}
return $self->{game}{game}{ $self->{instance} };
}
sub run {
my ( $self, $input ) = @_;
$input =~ s/\s+$//;
my ( $cmd, @toks ) = split /\s+/, $input, 3;
if ( exists $command{$cmd} ) {
$command{$cmd}->( $self, @toks );
}
else {
syswrite $self->{fh}, "No command '$cmd'\n";
syswrite $self->{fh}, Dumper [ keys %command ];
}
syswrite $self->{fh}, "> ";
}
sub lookup {
my ( $game, $path ) = @_;
my $base = $game;
my ( $name, $loc ) = $path =~ m#^\s*(?::([^/]+))?(/?.*?)/?\s*$#;
if ($name) {
for my $p ( values %{ $game->{player} } ) {
if ( $p->{public}{name} =~ /$name/ ) {
$base = $p;
last;
}
}
}
$loc =~ s#^/?#/#;
$loc =~ s#\+# #;
$loc =~ s#/([^/]+)#{$1}#g;
return ( $base, $loc );
}
sub cmd_decr {
my ( $self, $path ) = @_;
my $game = $self->get_game || return;
my ( $base, $expr ) = lookup( $game, $path );
eval "\$base->$expr--";
$self->cmd_print($path);
}
sub cmd_eval {
my ( $self, $path, $cmd ) = @_;
my $game = $self->get_game || return;
my ( $base, $expr ) = lookup( $game, $path );
eval "\$base->$expr $cmd";
$self->cmd_dump($path);
}
sub cmd_incr {
my ( $self, $path ) = @_;
my $game = $self->get_game || return;
my ( $base, $expr ) = lookup( $game, $path );
eval "\$base->$expr++";
$self->cmd_print($path);
}
sub cmd_dump {
my ( $self, $path ) = @_;
return unless $path;
my $game = $self->get_game || return;
my ( $base, $expr ) = lookup( $game, $path );
eval "\$base = \$base->$expr";
my $d = Data::Dumper->new( [$base], ['x'] );
$d->Sortkeys(1);
for my $p ( values %{ $game->{player} } ) {
$d->Seen( { '*sock', $p->{sock} } );
}
$d->Seen(
{ '*private' => $game->{private}{player},
'*public' => $game->{public}{player},
'*map' => $game->{map}
}
);
$d->Seen( { '*game' => $game } ) if !( ref($base) && $game == $base );
syswrite $self->{fh}, $d->Dump;
}
sub cmd_print {
my ( $self, $path ) = @_;
my $game = $self->get_game || return;
my ( $base, $expr ) = lookup( $game, $path );
eval "\$base = \$base->$expr";
syswrite $self->{fh}, scalar($base) . "\n";
}
sub cmd_games {
my $self = shift;
syswrite $self->{fh}, Dumper [ keys %{ $self->{game}{game} } ];
}
sub find_player {
my ($game, $name) = @_;
for my $p ( values %{ $game->{player} } ) {
if ( $p->{public}{name} =~ /$name/i ) {
return $p;
}
}
return;
}
sub get_option {
my ( $game, $card ) = @_;
my $idx = firstidx { $_->{name} =~ /$card/i } @{ $game->{options}{cards} };
if ($idx != -1) {
return splice @{ $game->{options}{cards} }, $idx, 1;
}
return;
}
sub cmd_give {
my ($self, $name, $card) = @_;
if (!$name) {
syswrite $self->{fh}, "Missing name\n";
return;
}
if (!$card) {
syswrite $self->{fh}, "Missing card\n";
return;
}
my $game = $self->get_game || return;
my $p = find_player($game, $name);
return unless $p;
my $o = get_option($game, $card);
return unless defined $o;
$p->{public}{options}{$o->{name}} = $o;
for $o (values %{$p->{public}{options}}) {
syswrite $self->{fh}, "$o->{name}\n";
}
$game->broadcast(
{ cmd => 'options',
player => $p->{id},
options => $p->{public}{options}
}
);
}
sub cmd_keys {
my ( $self, $path ) = @_;
return unless $path;
my $game = $self->get_game || return;
my ( $base, $expr ) = lookup( $game, $path );
eval "\$base = \$base->$expr";
syswrite $self->{fh}, Dumper [ sort keys %$base ];
}
sub cmd_players {
my $self = shift;
if ( $self->{instance} ) {
}
syswrite $self->{fh}, Dumper [ keys %{ $self->{game}{game} } ];
}
sub center {
my ($str, $w) = @_;
my $l = $w - length($str);
my $r = int($l/2);
$l = $w - $r;
return sprintf "%*s%*s", $l, $str, $r, '';
}
sub cmd_registers {
my ($self) = @_;
my $game = $self->get_game || return;
my @rows = [qw/Player 1 2 3 4 5/];
for my $p ( values %{ $game->{player} } ) {
my @row = $p->{name};
for my $r ( @{ $p->{public}{registers} } ) {
push @row,
join( ' ', map {"($_->{name}:$_->{priority})"} @{ $r->{program} } );
}
push @rows, \@row;
}
my @w;
for my $i (0..5) {
$w[$i] = 0;
for my $r (@rows) {
my $l = length($r->[$i]);
$w[$i] = $l if $w[$i] < $l;
}
for my $r (@rows) {
$r->[$i] = center($r->[$i], $w[$i]);
}
}
push @rows, [];
syswrite $self->{fh}, join("\n", map { join(" | ", @$_) } @rows);
}
sub cmd_set {
my ( $self, $ref, $value ) = @_;
if ( looks_like_number($value) ) {
$value += 0;
}
}
sub cmd_use {
my ( $self, $game ) = @_;
if ( $self->{game}{game}{$game} ) {
$self->{instance} = $game;
}
else {
syswrite $self->{fh}, "No game named '$game'\n";
syswrite $self->{fh}, Dumper [ keys %{ $self->{game}{game} } ];
}
}
1;
| bpa/cyborg-rally | lib/Admin.pm | Perl | mit | 6,477 |
#!/usr/bin/env perl
# Needleman-Wunsch Algorithm
# usage statement
die "usage: $0 <sequence 1> <sequence 2>\n" unless @ARGV == 2;
# get sequences from command line
my ($seq1, $seq2) = @ARGV;
# scoring scheme
my $MATCH = 1; # +1 for letters that match
my $MISMATCH = -1; # -1 for letters that mismatch
my $GAP = -1; # -1 for any gap
# initialization
my @matrix;
$matrix[0][0]{score} = 0;
$matrix[0][0]{pointer} = "none";
for (my $j = 1; $j <= length($seq1); $j++) {
$matrix[0][$j]{score} = $GAP * $j;
$matrix[0][$j]{pointer} = "left";
}
for (my $i = 1; $i <= length($seq2); $i++) {
$matrix[$i][0]{score} = $GAP * $i;
$matrix[$i][0]{pointer} = "up";
}
# fill
for (my $i = 1; $i <= length($seq2); $i++) {
for (my $j = 1; $j <= length($seq1); $j++) {
my ($diagonal_score, $left_score, $up_score);
# calculate match score
my $letter1 = substr($seq1, $j-1, 1);
my $letter2 = substr($seq2, $i-1, 1);
if ($letter1 eq $letter2) {
$diagonal_score = $matrix[$i-1][$j-1]{score} + $MATCH;
}
else {
$diagonal_score = $matrix[$i-1][$j-1]{score} + $MISMATCH;
}
# calculate gap scores
$up_score = $matrix[$i-1][$j]{score} + $GAP;
$left_score = $matrix[$i][$j-1]{score} + $GAP;
# choose best score
if ($diagonal_score >= $up_score) {
if ($diagonal_score >= $left_score) {
$matrix[$i][$j]{score} = $diagonal_score;
$matrix[$i][$j]{pointer} = "diagonal";
}
else {
$matrix[$i][$j]{score} = $left_score;
$matrix[$i][$j]{pointer} = "left";
}
}
else {
if ($up_score >= $left_score) {
$matrix[$i][$j]{score} = $up_score;
$matrix[$i][$j]{pointer} = "up";
}
else {
$matrix[$i][$j]{score} = $left_score;
$matrix[$i][$j]{pointer} = "left";
}
}
}
}
# trace-back
my $align1 = "";
my $align2 = "";
# start at last cell of matrix
my $j = length($seq1);
my $i = length($seq2);
while (1) {
last if $matrix[$i][$j]{pointer} eq "none"; # ends at first cell of matrix
if ($matrix[$i][$j]{pointer} eq "diagonal") {
$align1 .= substr($seq1, $j-1, 1);
$align2 .= substr($seq2, $i-1, 1);
$i--;
$j--;
}
elsif ($matrix[$i][$j]{pointer} eq "left") {
$align1 .= substr($seq1, $j-1, 1);
$align2 .= "-";
$j--;
}
elsif ($matrix[$i][$j]{pointer} eq "up") {
$align1 .= "-";
$align2 .= substr($seq2, $i-1, 1);
$i--;
}
}
$align1 = reverse $align1;
$align2 = reverse $align2;
print "$align1\n";
print "$align2\n";
| sestaton/sesbio | phylogenetics/needleman-wunsch.pl | Perl | mit | 2,836 |
#!/usr/bin/perl -w
######################################################################
#
# process product file (MAI or CRB) and create CSV files for each section.
#
######################################################################
#
use strict;
#
use Carp;
use Getopt::Std;
use File::Find;
use File::Path qw(mkpath);
use File::Basename;
use File::Path 'rmtree';
use DBI;
#
######################################################################
#
# logical constants
#
use constant TRUE => 1;
use constant FALSE => 0;
#
use constant SUCCESS => 1;
use constant FAIL => 0;
#
# section names
#
use constant INDEX => '[Index]';
use constant INFORMATION => '[Information]';
use constant LOTNAMES => '[LotNames]';
#
# verbose levels
#
use constant NOVERBOSE => 0;
use constant MINVERBOSE => 1;
use constant MIDVERBOSE => 2;
use constant MAXVERBOSE => 3;
#
# section types
#
use constant SECTION_UNKNWON => 0;
use constant SECTION_NAME_VALUE => 1;
use constant SECTION_LIST => 2;
#
# file types
#
use constant FILE_TYPE_CRB => 0;
use constant FILE_TYPE_MAI => 1;
use constant FILE_TYPE_UNKNOWN => 2; # always last
#
my %file_types =
(
'cerberus' => FILE_TYPE_CRB(),
'maihime2' => FILE_TYPE_MAI()
);
#
######################################################################
#
# globals
#
my $cmd = $0;
my $log_fh = *STDOUT;
my $dbh = undef;
#
# cmd line options
#
my $logfile = '';
my $verbose = NOVERBOSE;
my $csv_dir = './CSV';
my $rmv_prod_dir = FALSE;
my $delimiter = "\t";
my $combine_lot_files = FALSE;
my $save_to_db = FALSE;
#
my %verbose_levels =
(
off => NOVERBOSE(),
min => MINVERBOSE(),
mid => MIDVERBOSE(),
max => MAXVERBOSE()
);
#
######################################################################
#
# miscellaneous functions
#
sub usage
{
my ($arg0) = @_;
print $log_fh <<EOF;
usage: $arg0 [-?] [-h] \\
[-w | -W |-v level] \\
[-l logfile] \\
[-p path] \\
[-d delimiter] \\
[-R] [-L] [-D] \\
CRB or MAI file ...
where:
-? or -h - print this usage.
-w - enable warning (level=min=1)
-W - enable warning and trace (level=mid=2)
-v - verbose level: 0=off,1=min,2=mid,3=max
-l logfile - log file path
-p path - csv and db directory, defaults to './CSV'.
-d delimiter - CSV delimiter characer. default is a tab.
-R - remove old CRB or MAI directories (off by default).
-L - combine separate LOT files into one file keyed by LOT.
-D - export to SQLite DB
EOF
}
#
######################################################################
#
# db functions
#
sub table_exists
{
my ($dbh, $table_name) = @_;
my $sth = $dbh->table_info(undef, 'public', $table_name, 'TABLE');
$sth->execute;
my @info = $sth->fetchrow_array;
if (scalar(@info) > 0)
{
return TRUE;
}
else
{
return FALSE;
}
}
#
######################################################################
#
# load name-value or list section
#
sub load_name_value
{
my ($praw_data, $section, $pirec, $max_rec, $pprod_db) = @_;
#
$pprod_db->{found_data}->{$section} = FALSE;
$pprod_db->{section_type}->{$section} = SECTION_NAME_VALUE;
#
my $re_section = '\\' . $section;
my @section_data =
grep /^${re_section}\s*$/ .. /^\s*$/, @{$praw_data};
#
printf $log_fh "%d: <%s>\n",
__LINE__,
join("\n", @section_data)
if ($verbose >= MAXVERBOSE);
#
$$pirec += scalar(@section_data);
#
if (scalar(@section_data) <= 2)
{
$pprod_db->{$section} = {};
printf $log_fh "\t\t%d: NO NAME-VALUE DATA FOUND IN SECTION %s. Lines read: %d\n",
__LINE__, $section, scalar(@section_data);
return FAIL;
}
#
shift @section_data; # remove section name
pop @section_data; # remove end-of-section null-length line
#
%{$pprod_db->{$section}->{data}} =
map { split /\s*=\s*/, $_, 2 } @section_data;
#
$pprod_db->{found_data}->{$section} = TRUE;
#
printf $log_fh "\t\t%d: Number of key-value pairs: %d\n",
__LINE__,
scalar(keys %{$pprod_db->{$section}->{data}})
if ($verbose >= MINVERBOSE);
printf $log_fh "\t\t%d: Lines read: %d\n",
__LINE__,
scalar(@section_data)
if ($verbose >= MINVERBOSE);
#
return SUCCESS;
}
#
sub split_quoted_string
{
my $rec = shift;
#
my $rec_len = length($rec);
#
my $istart = -1;
my $iend = -1;
my $in_string = 0;
#
my @tokens = ();
my $token = "";
#
for (my $i=0; $i<$rec_len; $i++)
{
my $c = substr($rec, $i, 1);
#
if ($in_string == 1)
{
if ($c eq '"')
{
$in_string = 0;
}
else
{
$token .= $c;
}
}
elsif ($c eq '"')
{
$in_string = 1;
}
elsif ($c eq ' ')
{
# printf $log_fh "Token ... <%s>\n", $token;
push (@tokens, $token);
$token = '';
}
else
{
$token .= $c;
}
}
#
if (length($token) > 0)
{
# printf $log_fh "Token ... <%s>\n", $token;
push (@tokens, $token);
$token = '';
}
#
# printf $log_fh "Tokens: \n%s\n", join("\n",@tokens);
#
return @tokens;
}
#
sub load_list
{
my ($praw_data, $section, $pirec, $max_rec, $pprod_db) = @_;
#
$pprod_db->{found_data}->{$section} = FALSE;
$pprod_db->{section_type}->{$section} = SECTION_LIST;
#
my $re_section = '\\' . $section;
my @section_data =
grep /^${re_section}\s*$/ .. /^\s*$/, @{$praw_data};
#
printf $log_fh "%d: <%s>\n", __LINE__, join("\n", @section_data) if ($verbose >= MAXVERBOSE);
#
$$pirec += scalar(@section_data);
#
if (scalar(@section_data) <= 3)
{
$pprod_db->{$section} = {};
printf $log_fh "\t\t\t%d: NO LIST DATA FOUND IN SECTION %s. Lines read: %d\n",
__LINE__,
$section, scalar(@section_data)
if ($verbose >= MINVERBOSE);
return SUCCESS;
}
#
shift @section_data; # remove section name
pop @section_data; # remove end-of-section null-length line
#
$pprod_db->{$section}->{header} = shift @section_data;
@{$pprod_db->{$section}->{column_names}} =
split / /, $pprod_db->{$section}->{header};
my $number_columns = scalar(@{$pprod_db->{$section}->{column_names}});
#
@{$pprod_db->{$section}->{data}} = ();
#
printf $log_fh "\t\t\t%d: Number of Columns: %d\n",
__LINE__,
$number_columns
if ($verbose >= MINVERBOSE);
#
foreach my $record (@section_data)
{
my @tokens = split_quoted_string($record);
my $number_tokens = scalar(@tokens);
#
printf $log_fh "\t\t\t%d: Number of tokens in record: %d\n", __LINE__, $number_tokens if ($verbose >= MAXVERBOSE);
#
if ($number_tokens == $number_columns)
{
my %data = ();
@data{@{$pprod_db->{$section}->{column_names}}} = @tokens;
#
unshift @{$pprod_db->{$section}->{data}}, \%data;
printf $log_fh "\t\t\t%d: Current Number of Records: %d\n", __LINE__, scalar(@{$pprod_db->{$section}->{data}}) if ($verbose >= MAXVERBOSE);
}
else
{
printf $log_fh "\t\t\t%d: SKIPPING RECORD - NUMBER TOKENS (%d) != NUMBER COLUMNS (%d)\n", __LINE__, $number_tokens, $number_columns;
}
}
#
$pprod_db->{found_data}->{$section} = TRUE;
#
return SUCCESS;
}
#
######################################################################
#
# load and process product files, either CRB or MAI
#
sub read_file
{
my ($prod_file, $praw_data) = @_;
#
printf $log_fh "\t%d: Reading Product file: %s\n",
__LINE__, $prod_file;
#
if ( ! -r $prod_file )
{
printf $log_fh "\t%d: ERROR: file $prod_file is NOT readable\n\n", __LINE__;
return FAIL;
}
#
unless (open(INFD, $prod_file))
{
printf $log_fh "\t%d: ERROR: unable to open $prod_file.\n\n", __LINE__;
return FAIL;
}
@{$praw_data} = <INFD>;
close(INFD);
#
chomp(@{$praw_data});
printf $log_fh "\t\t%d: Lines read: %d\n", __LINE__, scalar(@{$praw_data}) if ($verbose >= MINVERBOSE);
#
return SUCCESS;
}
#
sub process_data
{
my ($prod_file, $praw_data, $pprod_db) = @_;
#
printf $log_fh "\t%d: Processing product data: %s\n",
__LINE__, $prod_file;
#
my $max_rec = scalar(@{$praw_data});
my $sec_no = 0;
#
for (my $irec=0; $irec<$max_rec; )
{
my $rec = $praw_data->[$irec];
#
if ($rec =~ m/^(\[[^\]]*\])/)
{
my $section = ${1};
#
printf $log_fh "\t\t%d: Section %03d: %s\n",
__LINE__, ++$sec_no, $section
if ($verbose >= MINVERBOSE);
#
$rec = $praw_data->[${irec}+1];
#
if ($rec =~ m/^\s*$/)
{
$irec += 2;
printf $log_fh "\t\t%d: Empty section\n", __LINE__;
}
elsif ($rec =~ m/.*=.*/)
{
load_name_value($praw_data,
$section,
\$irec,
$max_rec,
$pprod_db);
}
else
{
load_list($praw_data,
$section,
\$irec,
$max_rec,
$pprod_db);
}
}
else
{
$irec += 1;
}
}
#
return SUCCESS;
}
#
sub export_list_to_csv
{
my ($prod_file, $pprod_db, $prod_dir, $section, $ftype) = @_;
#
if (($combine_lot_files == FALSE) ||
($section !~ m/<([0-9]+)>/))
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>/_$1/g;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
my $pcols = $pprod_db->{$section}->{column_names};
if ($print_cols == TRUE)
{
my $comma = "";
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $comma, $col;
$comma = $delimiter;
}
printf $outfh "\n";
}
#
foreach my $prow (@{$pprod_db->{$section}->{data}})
{
my $comma = "";
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $comma, $prow->{$col};
$comma = $delimiter;
}
printf $outfh "\n";
}
#
close($outfh);
}
else
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>//g;
my $lotno = $1;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
my $pcols = $pprod_db->{$section}->{column_names};
if ($print_cols == TRUE)
{
printf $outfh "lotno";
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $delimiter, $col;
}
printf $outfh "\n";
}
#
foreach my $prow (@{$pprod_db->{$section}->{data}})
{
printf $outfh "%s", $lotno;
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $delimiter, $prow->{$col};
}
printf $outfh "\n";
}
#
close($outfh);
}
}
#
sub export_name_value_to_csv
{
my ($prod_file, $pprod_db, $prod_dir, $section, $ftype) = @_;
#
if (($combine_lot_files == FALSE) ||
($section !~ m/<([0-9]+)>/))
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>/_$1/g;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
if ($print_cols == TRUE)
{
printf $outfh "NAME%sVALUE\n", $delimiter;
}
#
foreach my $key (keys %{$pprod_db->{$section}->{data}})
{
printf $outfh "%s%s%s\n",
$key,
$delimiter,
$pprod_db->{$section}->{data}->{$key};
}
#
close($outfh);
}
else
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>//g;
my $lotno = $1;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
if ($print_cols == TRUE)
{
printf $outfh "LOTNO%sNAME%sVALUE\n", $delimiter, $delimiter;
}
#
foreach my $key (keys %{$pprod_db->{$section}->{data}})
{
printf $outfh "%s%s%s%s%s\n",
$lotno,
$delimiter,
$key,
$delimiter,
$pprod_db->{$section}->{data}->{$key};
}
#
close($outfh);
}
}
#
sub get_file_type
{
my ($pprod_db) = @_;
#
my $file_type = FILE_TYPE_UNKNOWN;
if (exists($pprod_db->{'[Index]'}->{data}->{Format}))
{
my $format = $pprod_db->{'[Index]'}->{data}->{Format};
foreach my $re (keys %file_types)
{
if ($format =~ m/\Q$re/i)
{
$file_type = $file_types{$re};
printf $log_fh "\t\t%d: File type: %s\n",
__LINE__, $re;
last;
}
}
}
if ($file_type == FILE_TYPE_UNKNOWN)
{
printf $log_fh "\t\t%d: File type: UNKNOWN\n", __LINE__;
}
return $file_type;
}
#
sub export_to_csv
{
my ($prod_file, $pprod_db) = @_;
#
printf $log_fh "\t%d: Writing product data to CSV: %s\n",
__LINE__, $prod_file;
#
my $prod_name = basename($prod_file);
$prod_name =~ tr/a-z/A-Z/;
my $prod_csv_dir = $csv_dir . '/CSV_' . $prod_name;
#
rmtree($prod_csv_dir) if ($rmv_prod_dir == TRUE);
( mkpath($prod_csv_dir) || die $! ) unless ( -d $prod_csv_dir );
#
printf $log_fh "\t\t%d: product %s CSV directory: %s\n",
__LINE__, $prod_name, $prod_csv_dir;
#
my $file_type = get_file_type($pprod_db);
#
foreach my $section (sort keys %{$pprod_db->{found_data}})
{
if ($pprod_db->{found_data}->{$section} != TRUE)
{
printf $log_fh "\t\t%d: No data for section %s. Skipping it.\n",
__LINE__, $section if ($verbose >= MINVERBOSE);
}
elsif ($pprod_db->{section_type}->{$section} == SECTION_NAME_VALUE)
{
printf $log_fh "\t\t%d: Name-Value Section: %s\n",
__LINE__, $section;
export_name_value_to_csv($prod_file,
$pprod_db,
$prod_csv_dir,
$section,
$file_type);
}
elsif ($pprod_db->{section_type}->{$section} == SECTION_LIST)
{
printf $log_fh "\t\t%d: List Section: %s\n",
__LINE__, $section;
export_list_to_csv($prod_file,
$pprod_db,
$prod_csv_dir,
$section,
$file_type);
}
else
{
printf $log_fh "\t\t%d: Unknown type Section: %s\n",
__LINE__, $section;
}
}
#
return SUCCESS;
}
#
sub export_list_to_db
{
my ($prod_file, $pprod_db, $prod_dir, $section, $ftype) = @_;
#
return;
if (($combine_lot_files == FALSE) ||
($section !~ m/<([0-9]+)>/))
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>/_$1/g;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
my $pcols = $pprod_db->{$section}->{column_names};
if ($print_cols == TRUE)
{
my $comma = "";
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $comma, $col;
$comma = $delimiter;
}
printf $outfh "\n";
}
#
foreach my $prow (@{$pprod_db->{$section}->{data}})
{
my $comma = "";
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $comma, $prow->{$col};
$comma = $delimiter;
}
printf $outfh "\n";
}
#
close($outfh);
}
else
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>//g;
my $lotno = $1;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
my $pcols = $pprod_db->{$section}->{column_names};
if ($print_cols == TRUE)
{
printf $outfh "lotno";
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $delimiter, $col;
}
printf $outfh "\n";
}
#
foreach my $prow (@{$pprod_db->{$section}->{data}})
{
printf $outfh "%s", $lotno;
foreach my $col (@{$pcols})
{
printf $outfh "%s%s", $delimiter, $prow->{$col};
}
printf $outfh "\n";
}
#
close($outfh);
}
}
#
sub export_name_value_to_db
{
my ($prod_file, $pprod_db, $prod_dir, $section, $ftype) = @_;
#
# generate table name and verify if table exists. if table
# does not exist, then create the table.
#
my $table_name = "Section_" . $section;
$table_name =~ s/[\[\]]//g;
$table_name =~ s/<([0-9]+)>/_$1/g;
#
my $dbh = $pprod_db->{sqlite}->{dbh};
#
if (table_exists($dbh, $table_name) == TRUE)
{
printf $log_fh "\t\t\t%d: Table %s already exists\n", __LINE__, $table_name;
}
else
{
printf $log_fh "\t\t\t%d: Creating table %s\n", __LINE__, $table_name;
my $create_tbl_sql = sprintf <<'END_SQL', ${table_name};
create table %s (
name varchar(100) primary key,
value varchar(100) not null
)
END_SQL
#
$dbh->do($create_tbl_sql);
}
#
my $insert_sql = "insert into ${table_name} (name, value) values (?, ?)";
#
foreach my $key (keys %{$pprod_db->{$section}->{data}})
{
$dbh->do($insert_sql,
undef,
$key,
$pprod_db->{$section}->{data}->{$key});
}
#
return;
#
if (($combine_lot_files == FALSE) ||
($section !~ m/<([0-9]+)>/))
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>/_$1/g;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
if ($print_cols == TRUE)
{
printf $outfh "NAME%sVALUE\n", $delimiter;
}
#
foreach my $key (keys %{$pprod_db->{$section}->{data}})
{
printf $outfh "%s%s%s\n",
$key,
$delimiter,
$pprod_db->{$section}->{data}->{$key};
}
#
close($outfh);
}
else
{
my $csv_file = $section;
$csv_file =~ s/[\[\]]//g;
$csv_file =~ s/<([0-9]+)>//g;
my $lotno = $1;
#
my $outnm = $prod_dir . '/' . $csv_file . ".csv";
#
my $print_cols = FALSE;
$print_cols = TRUE if ( ! -r $outnm );
#
open(my $outfh, "+>>" , $outnm) || die $!;
#
if ($print_cols == TRUE)
{
printf $outfh "LOTNO%sNAME%sVALUE\n", $delimiter, $delimiter;
}
#
foreach my $key (keys %{$pprod_db->{$section}->{data}})
{
printf $outfh "%s%s%s%s%s\n",
$lotno,
$delimiter,
$key,
$delimiter,
$pprod_db->{$section}->{data}->{$key};
}
#
close($outfh);
}
}
#
sub export_to_db
{
my ($prod_file, $pprod_db) = @_;
#
return SUCCESS unless ($save_to_db == TRUE);
#
printf $log_fh "\t%d: Writing product data to DB: %s\n",
__LINE__, $prod_file;
#
my $prod_name = basename($prod_file);
$prod_name =~ tr/a-z/A-Z/;
my $prod_db_path = $csv_dir . '/DB_' . $prod_name;
#
unlink($prod_db_path) if ($rmv_prod_dir == TRUE);
#
printf $log_fh "\t\t%d: product %s DB file: %s\n",
__LINE__, $prod_name, $prod_db_path;
#
printf $log_fh "\t\t%d: Creating DB: %s\n",
__LINE__, $prod_db_path;
#
$pprod_db->{sqlite}->{dsn} = "dbi:SQLite:dbname=${prod_db_path}";
$pprod_db->{sqlite}->{user} = "";
$pprod_db->{sqlite}->{password} = "";
$pprod_db->{sqlite}->{dbh} =
DBI->connect(
$pprod_db->{sqlite}->{dsn},
$pprod_db->{sqlite}->{user},
$pprod_db->{sqlite}->{password},
{
PrintError => 0,
RaiseError => 1,
AutoCommit => 1,
FetchHashKeyName => 'NAME_lc'
});
#
my $file_type = get_file_type($pprod_db);
#
foreach my $section (sort keys %{$pprod_db->{found_data}})
{
if ($pprod_db->{found_data}->{$section} != TRUE)
{
printf $log_fh "\t\t%d: No data for section %s. Skipping it.\n",
__LINE__, $section if ($verbose >= MINVERBOSE);
}
elsif ($pprod_db->{section_type}->{$section} == SECTION_NAME_VALUE)
{
printf $log_fh "\t\t%d: Name-Value Section: %s\n",
__LINE__, $section;
export_name_value_to_db($prod_file,
$pprod_db,
$prod_db_path,
$section,
$file_type);
}
elsif ($pprod_db->{section_type}->{$section} == SECTION_LIST)
{
printf $log_fh "\t\t%d: List Section: %s\n",
__LINE__, $section;
export_list_to_db($prod_file,
$pprod_db,
$prod_db_path,
$section,
$file_type);
}
else
{
printf $log_fh "\t\t%d: Unknown type Section: %s\n",
__LINE__, $section;
}
}
#
$pprod_db->{sqlite}->{dbh}->disconnect;
#
return SUCCESS;
}
#
sub process_file
{
my ($prod_file) = @_;
#
printf $log_fh "\n%d: Processing product File: %s\n",
__LINE__, $prod_file;
#
my @raw_data = ();
my %prod_db = ();
#
my $status = FAIL;
if (read_file($prod_file, \@raw_data) != SUCCESS)
{
printf $log_fh "\t%d: ERROR Reading product file: %s\n",
__LINE__, $prod_file;
}
elsif (process_data($prod_file, \@raw_data, \%prod_db) != SUCCESS)
{
printf $log_fh "\t%d: ERROR Processing product file: %s\n",
__LINE__, $prod_file;
}
elsif (export_to_csv($prod_file, \%prod_db) != SUCCESS)
{
printf $log_fh "\t%d: ERROR Exporting product file to CSV: %s\n",
__LINE__, $prod_file;
}
elsif (export_to_db($prod_file, \%prod_db) != SUCCESS)
{
printf $log_fh "\t%d: ERROR Exporting product file to DB: %s\n",
__LINE__, $prod_file;
}
else
{
printf $log_fh "\t%d: Success processing product file: %s\n",
__LINE__, $prod_file;
$status = SUCCESS;
}
#
return $status;
}
#
######################################################################
#
my %opts;
if (getopts('?hwWv:p:l:d:RLD', \%opts) != 1)
{
usage($cmd);
exit 2;
}
#
foreach my $opt (%opts)
{
if (($opt eq 'h') or ($opt eq '?'))
{
usage($cmd);
exit 0;
}
elsif ($opt eq 'R')
{
$rmv_prod_dir = TRUE;
}
elsif ($opt eq 'D')
{
$save_to_db = TRUE;
}
elsif ($opt eq 'L')
{
$combine_lot_files = TRUE;
}
elsif ($opt eq 'w')
{
$verbose = MINVERBOSE;
}
elsif ($opt eq 'W')
{
$verbose = MIDVERBOSE;
}
elsif ($opt eq 'v')
{
if ($opts{$opt} =~ m/^[0123]$/)
{
$verbose = $opts{$opt};
}
elsif (exists($verbose_levels{$opts{$opt}}))
{
$verbose = $verbose_levels{$opts{$opt}};
}
else
{
printf $log_fh "\n%d: Invalid verbose level: $opts{$opt}\n", __LINE__;
usage($cmd);
exit 2;
}
}
elsif ($opt eq 'l')
{
local *FH;
$logfile = $opts{$opt};
open(FH, '>', $logfile) or die $!;
$log_fh = *FH;
printf $log_fh "\n%d: Log File: %s\n", __LINE__, $logfile;
}
elsif ($opt eq 'p')
{
$csv_dir = $opts{$opt} . '/';
( mkpath($csv_dir) || die $! ) unless ( -d $csv_dir );
printf $log_fh "\n%d: CSV directory: %s\n", __LINE__, $csv_dir;
}
elsif ($opt eq 'd')
{
$delimiter = $opts{$opt};
$delimiter = "\t" if ( $delimiter =~ /^$/ );
}
}
#
if (scalar(@ARGV) == 0)
{
printf $log_fh "%d: No product files given.\n", __LINE__;
usage($cmd);
exit 2;
}
#
foreach my $prod_file (@ARGV)
{
process_file($prod_file);
}
#
exit 0;
# #
# # process LNB data files, u01, u03, mpr and write data
# # out as csv files.
# #
# # NOTES:
# #
# # z_cass or feeder table no = FADD/10000
# #
# ######################################################################
# #
# use strict;
# #
# use Carp;
# use Getopt::Std;
# use File::Find;
# use File::Path qw(mkpath);
# #
# use Memory::Usage;
# #
# ######################################################################
# #
# # logical constants
# #
# use constant TRUE => 1;
# use constant FALSE => 0;
# #
# # output types
# #
# use constant PROD_COMPLETE => 3;
# use constant PROD_COMPLETE_LATER => 4;
# use constant DETECT_CHANGE => 5;
# use constant MANUAL_CLEAR => 11;
# use constant TIMER_NOT_RUNNING => 12;
# use constant AUTO_CLEAR => 13;
# #
# # processing states
# #
# use constant RESET => 'reset';
# use constant BASELINE => 'baseline';
# use constant DELTA => 'delta';
# #
# # common sections for all files types: u01, u03, mpr
# #
# use constant INDEX => '[Index]';
# use constant INFORMATION => '[Information]';
# #
# # sections specific to u01
# #
# use constant TIME => '[Time]';
# use constant CYCLETIME => '[CycleTime]';
# use constant COUNT => '[Count]';
# use constant DISPENSER => '[Dispenser]';
# use constant MOUNTPICKUPFEEDER => '[MountPickupFeeder]';
# use constant MOUNTPICKUPNOZZLE => '[MountPickupNozzle]';
# use constant INSPECTIONDATA => '[InspectionData]';
# #
# # sections specific to u03
# #
# use constant BRECG => '[BRecg]';
# use constant BRECGCALC => '[BRecgCalc]';
# use constant ELAPSETIMERECOG => '[ElapseTimeRecog]';
# use constant SBOARD => '[SBoard]';
# use constant HEIGHTCORRECT => '[HeightCorrect]';
# use constant MOUNTQUALITYTRACE => '[MountQualityTrace]';
# use constant MOUNTLATESTREEL => '[MountLatestReel]';
# use constant MOUNTEXCHANGEREEL => '[MountExchangeReel]';
# #
# # sections specfic to mpr
# #
# use constant TIMEDATASP => '[TimeDataSP]';
# use constant COUNTDATASP => '[CountDataSP]';
# use constant COUNTDATASP2 => '[CountDataSP2]';
# use constant TRACEDATASP => '[TraceDataSP]';
# use constant TRACEDATASP_2 => '[TraceDataSP_2]';
# use constant ISPINFODATA => '[ISPInfoData]';
# use constant MASKISPINFODATA => '[MaskISPInfoData]';
# #
# # files types
# #
# use constant LNB_U01_FILE_TYPE => 'u01';
# use constant LNB_U03_FILE_TYPE => 'u03';
# use constant LNB_MPR_FILE_TYPE => 'mpr';
# #
# # verbose levels
# #
# use constant NOVERBOSE => 0;
# use constant MINVERBOSE => 1;
# use constant MIDVERBOSE => 2;
# use constant MAXVERBOSE => 3;
# #
# # processing options
# #
# use constant PROC_OPT_NONE => 0;
# use constant PROC_OPT_IGNRESET12 => 1;
# use constant PROC_OPT_IGNALL12 => 2;
# use constant PROC_OPT_USENEGDELTS => 4;
# use constant PROC_OPT_USEOLDNZ => 8;
# #
# # nozzle key names
# #
# use constant NZ_KEY_HEAD => 'Head';
# use constant NZ_KEY_NHADD => 'NHAdd';
# use constant NZ_KEY_NCADD => 'NCAdd';
# #
# use constant NZ_LABEL_NHADD_NCADD => 'nhadd_ncadd';
# use constant NZ_LABEL_HEAD_NHADD => 'head_nhadd';
# use constant NZ_LABEL_HEAD_NCADD => 'head_ncadd';
# #
# ######################################################################
# #
# # globals
# #
# my $cmd = $0;
# my $log_fh = *STDOUT;
# my $mu = Memory::Usage->new();
# #
# # cmd line options
# #
# my $logfile = '';
# my $verbose = NOVERBOSE;
# my $file_type = "all";
# my $export_dir = '/tmp/';
# my $proc_options = PROC_OPT_NONE;
# my $remove_mount = FALSE;
# #
# my %verbose_levels =
# (
# off => NOVERBOSE(),
# min => MINVERBOSE(),
# mid => MIDVERBOSE(),
# max => MAXVERBOSE()
# );
# #
# my %allowed_proc_options =
# (
# NONE => PROC_OPT_NONE(),
# IGNRESET12 => PROC_OPT_IGNRESET12(),
# IGNALL12 => PROC_OPT_IGNALL12(),
# USENEGDELTS => PROC_OPT_USENEGDELTS(),
# USEOLDNZ => PROC_OPT_USEOLDNZ()
# );
# #
# # fields to ignore for output=12 files if enabled.
# #
# my %ignored_output12_fields =
# (
# 'TPICKUP' => 1,
# 'TPMISS' => 1,
# 'TRMISS' => 1,
# 'TDMISS' => 1,
# 'TMMISS' => 1,
# 'THMISS' => 1,
# 'CPERR' => 1,
# 'CRERR' => 1,
# 'CDERR' => 1,
# 'CMERR' => 1,
# 'CTERR' => 1
# );
# #
# # summary tables.
# #
# my %totals = ();
# #
# # list of colums to export
# #
# my @mount_quality_trace_export_cols =
# (
# { name => 'B', format => '%s' },
# { name => 'IDNUM', format => '%s' },
# { name => 'TURN', format => '%s' },
# { name => 'MS', format => '%s' },
# { name => 'TS', format => '%s' },
# { name => 'FAdd', format => '%s' },
# { name => 'FSAdd', format => '%s' },
# { name => 'FBLKCode', format => '%s' },
# { name => 'FBLKSerial', format => '%s' },
# { name => 'NHAdd', format => '%s' },
# { name => 'NCAdd', format => '%s' },
# { name => 'NBLKCode', format => '%s' },
# { name => 'NBLKSerial', format => '%s' },
# { name => 'ReelID', format => '%s' },
# { name => 'F', format => '%s' },
# { name => 'RCGX', format => '%s' },
# { name => 'RCGY', format => '%s' },
# { name => 'RCGA', format => '%s' },
# { name => 'TCX', format => '%s' },
# { name => 'TCY', format => '%s' },
# { name => 'MPosiRecX', format => '%s' },
# { name => 'MPosiRecY', format => '%s' },
# { name => 'MPosiRecA', format => '%s' },
# { name => 'MPosiRecZ', format => '%s' },
# { name => 'THMAX', format => '%s' },
# { name => 'THAVE', format => '%s' },
# { name => 'MNTCX', format => '%s' },
# { name => 'MNTCY', format => '%s' },
# { name => 'MNTCA', format => '%s' },
# { name => 'TLX', format => '%s' },
# { name => 'TLY', format => '%s' },
# { name => 'InspectArea', format => '%s' },
# { name => 'DIDNUM', format => '%s' },
# { name => 'DS', format => '%s' },
# { name => 'DispenseID', format => '%s' },
# { name => 'PARTS', format => '%s' },
# { name => 'WarpZ', format => '%s' }
# );
#
# my @feeder_export_cols =
# (
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => 'FAdd', format => ',%s' },
# { name => 'FSAdd', format => ',%s' },
# { name => 'ReelID', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# );
# #
# my @feeder_export_cols2 =
# (
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => 'FAdd', format => ',%s' },
# { name => 'FSAdd', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# );
# #
# my @feeder_export_cols3 =
# (
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => 'TableNo', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# );
# #
# my @feeder_count_cols =
# (
# 'Pickup',
# 'PMiss',
# 'RMiss',
# 'DMiss',
# 'MMiss',
# 'HMiss',
# 'TRSMiss',
# 'Mount'
# );
# #
# my @nozzle_export_cols =
# (
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => 'NHAdd', format => ',%s' },
# { name => 'NCAdd', format => ',%s' },
# { name => 'Blkserial', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# );
# #
# my @nozzle_export_cols2 =
# (
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => 'NHAdd', format => ',%s' },
# { name => 'NCAdd', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# );
# #
# my %nozzle_export_cols_new =
# (
# NZ_LABEL_NHADD_NCADD() => [
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => NZ_KEY_NHADD(), format => ',%s' },
# { name => NZ_KEY_NCADD(), format => ',%s' },
# { name => 'Blkserial', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# ],
# NZ_LABEL_HEAD_NHADD() => [
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => NZ_KEY_HEAD(), format => ',%s' },
# { name => NZ_KEY_NHADD(), format => ',%s' },
# { name => 'Blkserial', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# ],
# NZ_LABEL_HEAD_NCADD() => [
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => NZ_KEY_HEAD(), format => ',%s' },
# { name => NZ_KEY_NCADD(), format => ',%s' },
# { name => 'Blkserial', format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# ]
# );
# #
# my %nozzle_export_cols2_new =
# (
# NZ_LABEL_NHADD_NCADD() => [
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => NZ_KEY_NHADD(), format => ',%s' },
# { name => NZ_KEY_NCADD(), format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# ],
# NZ_LABEL_HEAD_NHADD() => [
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => NZ_KEY_HEAD(), format => ',%s' },
# { name => NZ_KEY_NHADD(), format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# ],
# NZ_LABEL_HEAD_NCADD() => [
# { name => 'Machine', format => '%s' },
# { name => 'Lane', format => ',%s' },
# { name => 'Stage', format => ',%s' },
# { name => NZ_KEY_HEAD(), format => ',%s' },
# { name => NZ_KEY_NCADD(), format => ',%s' },
# { name => 'Pickup', format => ',%s' },
# { name => 'PMiss', format => ',%s' },
# { name => 'RMiss', format => ',%s' },
# { name => 'DMiss', format => ',%s' },
# { name => 'MMiss', format => ',%s' },
# { name => 'HMiss', format => ',%s' },
# { name => 'TRSMiss', format => ',%s' },
# { name => 'Mount', format => ',%s' }
# ]
# );
# #
# my @nozzle_count_cols =
# (
# 'Pickup',
# 'PMiss',
# 'RMiss',
# 'DMiss',
# 'MMiss',
# 'HMiss',
# 'TRSMiss',
# 'Mount'
# );
# #
# ########################################################################
# ########################################################################
# #
# # miscellaneous functions
# #
# sub short_usage
# {
# my ($arg0) = @_;
# print $log_fh <<EOF;
#
# usage: $arg0 [-?] [-h] [-H] [-M] \\
# [-w | -W |-v level] \\
# [-t u10|u03|mpr] \\
# [-l logfile] \\
# [-o option] \\
# [-d path] \\
# directory ...
#
# where:
# -? or -h - print this usage.
# -H - print long usage and description.
# -M - remove Mount fields (not in older files).
# -w - enable warning (level=min=1)
# -W - enable warning and trace (level=mid=2)
# -v - verbose level: 0=off,1=min,2=mid,3=max
# -t file-type = type of file to process: u01, u03, mpr.
# default is all files.
# [-l logfile] \\
# -l logfile - log file path
# -o option - enable a procesing option:
# ignreset12 - ignore resetable output=12 fields.
# ignall12 - ignore all output=12 files.
# usenegdelts - use negative deltas in calculations.
# useoldnz - use old nozzle processing.
# -d path - export directory, defaults to '/tmp'.
#
# EOF
# }
# sub long_usage
# {
# my ($arg0) = @_;
# print $log_fh <<EOF;
#
# usage: $arg0 [-?] [-h] [-H] [-M] \\
# [-w | -W |-v level] \\
# [-t u10|u03|mpr] \\
# [-l logfile] \\
# [-o option] \\
# [-d path] \\
# directory ...
#
# where:
# -? or -h - print this usage.
# -H - print long usage and description.
# -M - remove Mount fields (not in older files).
# -w - enable warning (level=min=1)
# -W - enable warning and trace (level=mid=2)
# -v - verbose level: 0=off,1=min,2=mid,3=max
# -t file-type = type of file to process: u01, u03, mpr.
# default is all files.
# -l logfile - log file path
# -o option - enable a procesing option:
# ignreset12 - ignore resetable output=12 fields.
# ignall12 - ignore all output=12 files.
# usenegdelts - use negative deltas in calculations.
# useoldnz - use old nozzle processing.
# -d path - export directory, defaults to '/tmp'.
#
# Description:
#
# The script scans the list of given directories for U01, U03 and
# MPR files, then it processes the files.
#
# For U01 files, the data in the following sections are tabulated
# and reported in CSV files:
#
# [Time]
# [Count]
# [MountPickupFeeder]
# [MountPickupNozzle]
#
# The CSV files are list below. The names indicate how the data
# were grouped, that is, what keys were used:
#
# TIME_BY_MACHINE.csv
# TIME_BY_MACHINE_LANE.csv
# TIME_BY_PRODUCT_MACHINE.csv
# TIME_BY_PRODUCT_MACHINE_LANE.csv
# TIME_TOTALS_BY_PRODUCT.csv
# TIME_TOTALS.csv
#
# COUNT_BY_MACHINE.csv
# COUNT_BY_MACHINE_LANE.csv
# COUNT_BY_PRODUCT_MACHINE.csv
# COUNT_BY_PRODUCT_MACHINE_LANE.csv
# COUNT_TOTALS_BY_PRODUCT.csv
# COUNT_TOTALS.csv
#
# FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD.csv
# FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv
# FEEDER_BY_MACHINE_LANE_STAGE_TABLE_NO.csv
# FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD.csv
# FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv
# FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_TABLE_NO.csv
#
# NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NCADD_BLKSERIAL.csv
# NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NCADD.csv
# NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NHADD_BLKSERIAL.csv
# NOZZLE_BY_MACHINE_LANE_STAGE_HEAD_NHADD.csv
# NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv
# NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD.csv
# NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NCADD_BLKSERIAL.csv
# NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NCADD.csv
# NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NHADD_BLKSERIAL.csv
# NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_HEAD_NHADD.csv
# NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv
# NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD.csv
#
# The U01 file raw data are written to separate files by section. The
# following list of files is generated:
#
# TIME_BY_MACHINE_LANE_STAGE_FILENAME.csv
# TIME_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# CYCLE_TIME_BY_MACHINE_LANE_STAGE_FILENAME.csv
# CYCLE_TIME_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# COUNT_BY_MACHINE_LANE_STAGE_FILENAME.csv
# COUNT_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# DISPENSER_BY_MACHINE_LANE_STAGE_FILENAME.csv
# DISPENSER_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_PICKUP_FEEDER_BY_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_PICKUP_FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_PICKUP_NOZZLE_BY_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_PICKUP_NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# INSPECTION_DATA_BY_MACHINE_LANE_STAGE_FILENAME.csv
# INSPECTION_DATA_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
#
# The U03 file raw data are written to separate files by section. The
# following list of files is generated:
#
# MOUNT_QUALITY_TRACE_BY_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_QUALITY_TRACE_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_LATEST_REEL_BY_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_LATEST_REEL_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_EXCHANGE_REEL_BY_MACHINE_LANE_STAGE_FILENAME.csv
# MOUNT_EXCHANGE_REEL_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
#
# The MPR file raw data are written to separate files by section. The
# following list of files is generated:
#
# TIME_DATA_SP_BY_MACHINE_LANE_STAGE_FILENAME.csv
# TIME_DATA_SP_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# COUNT_DATA_SP_BY_MACHINE_LANE_STAGE_FILENAME.csv
# COUNT_DATA_SP_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# COUNT_DATA_SP2_BY_MACHINE_LANE_STAGE_FILENAME.csv
# COUNT_DATA_SP2_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# TRACE_DATA_SP_BY_MACHINE_LANE_STAGE_FILENAME.csv
# TRACE_DATA_SP_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# TRACE_DATA_SP_2_BY_MACHINE_LANE_STAGE_FILENAME.csv
# TRACE_DATA_SP_2_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# ISP_INFO_DATA_BY_MACHINE_LANE_STAGE_FILENAME.csv
# ISP_INFO_DATA_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
# MASK_ISP_INFO_DATA_BY_MACHINE_LANE_STAGE_FILENAME.csv
# MASK_ISP_INFO_DATA_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv
#
# The command line options '-?' and '-h' list a short version of the
# usage message. This is the default usage message. Option '-H'
# prints out a more detailed version of the usage. This one.
#
# The option '-M' is a hack. Since older U01 and MPR files may not
# support the 'Mount' column, this option removes any reference
# to the 'Mount' field during processing. If you do not use it on
# older files, you will roll in "undefined field" errors.
#
# There are four verbose levels:
#
# 1) The default value is 0 which prints out no warnings. Only error
# messages are printed when the script exits because of a fatal error.
#
# 2) Level 1 which is set eith with '-w' or '-v 1' prints out non-fatal
# warnings. This includes warning for negative deltas, changes in
# blkserial or reel id, change overs, etc. The warnings highlight
# events which may be of interest. I usually run with this warning
# level when debugging.
#
# 3) Level 2 which is set with '-W' or '-v 2' includes all the data
# include with level 1 and 0, and additional messages for tracing. It
# can generate a lot of messages.
#
# 4) Level 3 which is set with '-v 3' generates the most messages. It
# will list the data which are read in, etc. It is *very* verbose.
#
# If you wish to limit the file processing to only one type of
# file, then use the the '-t' option and choose the type: u01, u03,
# or mpr. The default is all types of files if the file type is found.
#
# You can set the output file name using the '-l' option. You give
# it the name of the file. By default all output goes to STDOUT.
#
# The CSV files are written by default in /tmp. If you wish to
# use a different directory, then use the '-d' option and give
# the path as the option argument.
#
# The '-o' option allows you to change how the U01 tabulation is
# performed. The following options are available:
#
# ignreset12 - ignore resetable output=12 fields. This options causes
# the data in the [Count] section of a U01, output=12 file to be
# completely ignored.
#
# ignall12 - ignore all output=12 files. Ths option cause all
# U01, output=12 files to be ignored in all tabulations.
#
# usenegdelts - use negative deltas in calculations. This option
# causes all negative deltas to be used in tabulations. The default
# is to set any negative delta to zero.
#
# useoldnz - use old nozzle processing. This is strictly for testing.
# Do not use.
#
# EOF
# }
# #
# sub remove_mount_fields
# {
# #
# # it's a hack. since some older U01 and MPR files do not
# # have the Mount column, we have to remove any reference
# # to it in any internal data.
# #
# @feeder_export_cols =
# grep { $_->{name} ne 'Mount' } @feeder_export_cols;
# @feeder_export_cols2 =
# grep { $_->{name} ne 'Mount' } @feeder_export_cols2;
# @feeder_export_cols3 =
# grep { $_->{name} ne 'Mount' } @feeder_export_cols3;
# @feeder_count_cols =
# grep { $_ ne 'Mount' } @feeder_count_cols;
# #
# @nozzle_export_cols =
# grep { $_->{name} ne 'Mount' } @nozzle_export_cols;
# @nozzle_export_cols2 =
# grep { $_->{name} ne 'Mount' } @nozzle_export_cols2;
# @nozzle_count_cols =
# grep { $_ ne 'Mount' } @nozzle_count_cols;
# #
# foreach my $key (keys %nozzle_export_cols_new)
# {
# @{$nozzle_export_cols_new{$key}} =
# grep { $_->{name} ne 'Mount' }
# @{$nozzle_export_cols_new{$key}};
# }
# foreach my $key (keys %nozzle_export_cols2_new)
# {
# @{$nozzle_export_cols2_new{$key}} =
# grep { $_->{name} ne 'Mount' }
# @{$nozzle_export_cols2_new{$key}};
# }
# }
# #
# sub set_name_value_section_column_names
# {
# my ($file_type, $pfile, $section) = @_;
# #
# if ( ! exists($pfile->{$section}))
# {
# printf $log_fh "%d: No column data for %s %s.\n", __LINE__, $file_type, $section if ($verbose >= MAXVERBOSE);
# }
# elsif ( ! exists($totals{column_names}{$file_type}{$section}) )
# {
# @{$totals{column_names}{$file_type}{$section}} =
# (sort keys %{$pfile->{$section}->{data}});
# #
# printf $log_fh "\n%d: Setting column names %s %s: %s\n", __LINE__, $file_type, $section, join(' ', @{$totals{column_names}{$file_type}{$section}});
# }
# }
# #
# sub set_list_section_column_names
# {
# my ($file_type, $pfile, $section) = @_;
# #
# if ( ! exists($pfile->{$section}))
# {
# printf $log_fh "%d: No column data for %s %s.\n", __LINE__, $file_type, $section if ($verbose >= MAXVERBOSE);
# }
# elsif ( ! exists($totals{column_names}{$file_type}{$section}) )
# {
# my $pcols = $pfile->{$section}->{column_names};
# $totals{column_names}{$file_type}{$section} = $pcols;
# #
# printf $log_fh "\n%d: Setting column names %s %s: %s\n", __LINE__, $file_type, $section, join(' ', @{$totals{column_names}{$file_type}{$section}});
# }
# }
# #
# sub export_list_section_as_csv
# {
# my ($section, $file_type, $file_name, $machine_label, $do_product) = @_;
# #
# if ( ! exists($totals{$section}))
# {
# printf $log_fh "\n%d: Section %s does NOT exist\n", __LINE__, $section;
# return;
# }
# #
# ###############################################################
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, Filename:\n", __LINE__, $section;
# #
# my $outfnm = "${export_dir}/${file_name}_BY_MACHINE_LANE_STAGE_FILENAME.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# #
# my $pcols = $totals{column_names}{$file_type}{$section};
# #
# printf $outfh "${machine_label},lane,stage,filename";
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $col;
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}})
# {
# foreach my $filename (sort keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
# {
# foreach my $prow (@{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}})
# {
# printf $outfh "%s,%s,%s,%s", $machine, $lane, $stage, $filename;
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $prow->{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# close($outfh);
# #
# return unless ($do_product == TRUE);
# #
# ###############################################################
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, Filename:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/${file_name}_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# #
# printf $outfh "product,${machine_label},lane,stage,filename";
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $col;
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}})
# {
# foreach my $filename (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
# {
# foreach my $prow (@{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}})
# {
# printf $outfh "%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $filename;
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $prow->{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# }
# #
# sub export_name_value_section_as_csv
# {
# my ($section, $file_type, $file_name, $machine_label, $do_product) = @_;
# #
# if ( ! exists($totals{$section}))
# {
# printf $log_fh "\n%d: Section %s does NOT exist\n", __LINE__, $section;
# return;
# }
# #
# ###############################################################
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, Filename:\n", __LINE__, $section;
# #
# my $outfnm = "${export_dir}/${file_name}_BY_MACHINE_LANE_STAGE_FILENAME.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# #
# my $pcols = $totals{column_names}{$file_type}{$section};
# #
# printf $outfh "${machine_label},lane,stage,filename";
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $col;
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}})
# {
# foreach my $filename (sort keys %{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
# {
# printf $outfh "%s,%s,%s,%s", $machine, $lane, $stage, $filename;
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# close($outfh);
# #
# return unless ($do_product == TRUE);
# #
# ###############################################################
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, Filename:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/${file_name}_BY_PRODUCT_MACHINE_LANE_STAGE_FILENAME.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# #
# printf $outfh "product,${machine_label},lane,stage,filename";
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $col;
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}})
# {
# foreach my $filename (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}})
# {
# printf $outfh "%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $filename;
# foreach my $col (@{$pcols})
# {
# printf $outfh ",%s", $totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# close($outfh);
# }
# #
# sub tabulate_list_section
# {
# my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
# #
# my $filename = $pfile->{file_name};
# my $machine = $pfile->{mach_no};
# my $lane = $pfile->{lane};
# my $stage = $pfile->{stage};
# my $output_no = $pfile->{output_no};
# #
# #
# if ( ! exists($pfile->{$section}))
# {
# printf $log_fh "%d: WARNING: Section %s does NOT exist in file %s\n", __LINE__, $section, $filename if ($verbose >= MINVERBOSE);
# return;
# }
# #
# @{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}} = ();
# #
# foreach my $prow (@{$pfile->{$section}->{data}})
# {
# unshift @{$totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}}, $prow;
# }
# #
# return unless ($do_product == TRUE);
# #
# my $product = $pdb->{product}{$file_type}{$machine}{$lane}{$stage};
# #
# @{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}} = ();
# #
# foreach my $prow (@{$pfile->{$section}->{data}})
# {
# unshift @{$totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}}, $prow;
# }
# }
# #
# sub prepare_list_section
# {
# my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
# #
# if ($pfile->{found_data}->{$section} == FALSE)
# {
# printf $log_fh "%d: WARNING: No data for %s.\n", __LINE__, $section if ($verbose >= MIDVERBOSE);
# return;
# }
# #
# my $machine = $pfile->{mach_no};
# my $lane = $pfile->{lane};
# my $stage = $pfile->{stage};
# my $output_no = $pfile->{output_no};
# my $filename = $pfile->{file_name};
# #
# set_list_section_column_names($file_type, $pfile, $section);
# #
# printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# if ($verbose >= MAXVERBOSE)
# {
# printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
# printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
# printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
# printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
# printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pfile->{data}});
# printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pfile->{$section}->{data}}) if (defined(@{$pfile->{$section}->{data}}));
# }
# #
# tabulate_list_section($pdb, $pfile, $file_type, $section, $do_product);
# #
# return;
# }
# #
# sub tabulate_name_value_section
# {
# my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
# #
# my $filename = $pfile->{file_name};
# my $machine = $pfile->{mach_no};
# my $lane = $pfile->{lane};
# my $stage = $pfile->{stage};
# my $output_no = $pfile->{output_no};
# #
# if ( ! exists($pfile->{$section}))
# {
# printf $log_fh "%d: WARNING: Section %s does NOT exist in file %s\n", __LINE__, $section, $filename if ($verbose >= MINVERBOSE);
# return;
# }
# #
# foreach my $key (keys %{$pfile->{$section}->{data}})
# {
# $totals{$section}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$key} = $pfile->{$section}->{data}->{$key};
# }
# #
# return unless ($do_product == TRUE);
# #
# my $product = $pdb->{product}{$file_type}{$machine}{$lane}{$stage};
# #
# foreach my $key (keys %{$pfile->{$section}->{data}})
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_filename}{$machine}{$lane}{$stage}{$filename}{data}{$key} = $pfile->{$section}->{data}->{$key};
# }
# }
# #
# sub prepare_name_value_section
# {
# my ($pdb, $pfile, $file_type, $section, $do_product) = @_;
# #
# if ($pfile->{found_data}->{$section} == FALSE)
# {
# printf $log_fh "%d: WARNING: No data for %s.\n", __LINE__, $section if ($verbose >= MIDVERBOSE);
# return;
# }
# #
# my $machine = $pfile->{mach_no};
# my $lane = $pfile->{lane};
# my $stage = $pfile->{stage};
# my $output_no = $pfile->{output_no};
# my $filename = $pfile->{file_name};
# #
# set_name_value_section_column_names($file_type, $pfile, $section);
# #
# printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# if ($verbose >= MAXVERBOSE)
# {
# printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
# printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
# printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
# printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
# printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pfile->{data}});
# printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(keys %{$pfile->{$section}->{data}}) if (defined(keys %{$pfile->{$section}->{data}}));
# }
# #
# tabulate_name_value_section($pdb, $pfile, $file_type, $section, $do_product);
# #
# return;
# }
# #
# ########################################################################
# ########################################################################
# #
# # current product functions
# #
# sub get_product_info
# {
# my ($pdata, $pmjsid, $plotname, $plotnumber) = @_;
# #
# my $section = INDEX;
# $$pmjsid = $pdata->{$section}->{data}->{MJSID};
# $$pmjsid = $1 if ($$pmjsid =~ m/"([^"]*)"/);
# #
# $section = INFORMATION;
# $$plotname = $pdata->{$section}->{data}->{LotName};
# $$plotname = $1 if ($$plotname =~ m/"([^"]*)"/);
# $$plotnumber = $pdata->{$section}->{data}->{LotNumber};
# }
# #
# sub set_product_info
# {
# my ($pdb, $pfile, $ftype) = @_;
# #
# my $filename = $pfile->{file_name};
# #
# my $machine = $pfile->{mach_no};
# my $lane = $pfile->{lane};
# my $stage = $pfile->{stage};
# my $output_no = $pfile->{output_no};
# #
# my $mjsid = 'UNKNOWN';
# my $lotname = 'UNKNOWN';
# my $lotnumber = 0;
# #
# if ( ! exists($pdb->{product}{$ftype}{$machine}{$lane}{$stage}))
# {
# $pdb->{product}{$ftype}{$machine}{$lane}{$stage} = "${mjsid}_${lotname}_${lotnumber}";
# $pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = FALSE;
# }
# elsif (($output_no == PROD_COMPLETE) ||
# ($output_no == PROD_COMPLETE_LATER))
# {
# get_product_info($pfile, \$mjsid, \$lotname, \$lotnumber);
# #
# if (($pdb->{product}{$ftype}{$machine}{$lane}{$stage} ne "${mjsid}_${lotname}_${lotnumber}") &&
# ($pdb->{product}{$ftype}{$machine}{$lane}{$stage} ne "UNKNOWN_UNKNOWN_0"))
# {
# $pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = TRUE;
# }
# else
# {
# $pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = FALSE;
# }
# #
# $pdb->{product}{$ftype}{$machine}{$lane}{$stage} = "${mjsid}_${lotname}_${lotnumber}";
# }
# else
# {
# # clear this flag.
# $pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} = FALSE;
# }
# #
# printf $log_fh "%d: Product %s: %s, Change Over: %d\n", __LINE__, $ftype, $pdb->{product}{$ftype}{$machine}{$lane}{$stage}, $pdb->{change_over}{$ftype}{$machine}{$lane}{$stage} if ($verbose >= MIDVERBOSE);
# }
# #
# ########################################################################
# ########################################################################
# #
# # scan directories for U01, U03 and MPR files.
# #
# my %all_list = ();
# my $one_type = '';
# #
# sub want_one_type
# {
# if ($_ =~ m/^.*\.${one_type}$/)
# {
# printf $log_fh "%d: FOUND %s FILE: %s\n", __LINE__, $one_type, $File::Find::name
# if ($verbose >= MAXVERBOSE);
# #
# my $file_name = $_;
# #
# my $date = '';
# my $mach_no = '';
# my $stage = '';
# my $lane = '';
# my $pcb_serial = '';
# my $pcb_id = '';
# my $output_no = '';
# my $pcb_id_lot_no = '';
# #
# my @parts = split('\+-\+', $file_name);
# if (scalar(@parts) >= 9)
# {
# $date = $parts[0];
# $mach_no = $parts[1];
# $stage = $parts[2];
# $lane = $parts[3];
# $pcb_serial = $parts[4];
# $pcb_id = $parts[5];
# $output_no = $parts[6];
# $pcb_id_lot_no = $parts[7];
# }
# else
# {
# @parts = split('-', $file_name);
# if (scalar(@parts) >= 9)
# {
# $date = $parts[0];
# $mach_no = $parts[1];
# $stage = $parts[2];
# $lane = $parts[3];
# $pcb_serial = $parts[4];
# $pcb_id = $parts[5];
# $output_no = $parts[6];
# $pcb_id_lot_no = $parts[7];
# }
# }
# #
# unshift @{$all_list{$one_type}},
# {
# 'file_name' => $file_name,
# 'full_path' => $File::Find::name,
# 'directory' => $File::Find::dir,
# 'date' => $date,
# 'mach_no' => $mach_no,
# 'stage' => $stage,
# 'lane' => $lane,
# 'pcb_serial' => $pcb_serial,
# 'pcb_id' => $pcb_id,
# 'output_no' => $output_no,
# 'pcb_id_lot_no' => $pcb_id_lot_no
# };
# }
# }
# #
# sub want_all_types
# {
# my $dt = '';
# #
# if ($_ =~ m/^.*\.u01$/)
# {
# printf $log_fh "%d: FOUND u01 FILE: %s\n", __LINE__, $File::Find::name
# if ($verbose >= MAXVERBOSE);
# $dt = 'u01';
# }
# elsif ($_ =~ m/^.*\.u03$/)
# {
# printf $log_fh "%d: FOUND u03 FILE: %s\n", __LINE__, $File::Find::name
# if ($verbose >= MAXVERBOSE);
# $dt = 'u03';
# }
# elsif ($_ =~ m/^.*\.mpr$/)
# {
# printf $log_fh "%d: FOUND mpr FILE: %s\n", __LINE__, $File::Find::name
# if ($verbose >= MAXVERBOSE);
# $dt = 'mpr';
# }
# #
# if ($dt ne '')
# {
# my $file_name = $_;
# #
# my $date = '';
# my $mach_no = '';
# my $stage = '';
# my $lane = '';
# my $pcb_serial = '';
# my $pcb_id = '';
# my $output_no = '';
# my $pcb_id_lot_no = '';
# #
# my @parts = split('\+-\+', $file_name);
# if (scalar(@parts) >= 9)
# {
# $date = $parts[0];
# $mach_no = $parts[1];
# $stage = $parts[2];
# $lane = $parts[3];
# $pcb_serial = $parts[4];
# $pcb_id = $parts[5];
# $output_no = $parts[6];
# $pcb_id_lot_no = $parts[7];
# }
# else
# {
# @parts = split('-', $file_name);
# if (scalar(@parts) >= 9)
# {
# $date = $parts[0];
# $mach_no = $parts[1];
# $stage = $parts[2];
# $lane = $parts[3];
# $pcb_serial = $parts[4];
# $pcb_id = $parts[5];
# $output_no = $parts[6];
# $pcb_id_lot_no = $parts[7];
# }
# }
# #
# unshift @{$all_list{$dt}},
# {
# 'file_name' => $file_name,
# 'full_path' => $File::Find::name,
# 'directory' => $File::Find::dir,
# 'date' => $date,
# 'mach_no' => $mach_no,
# 'stage' => $stage,
# 'lane' => $lane,
# 'pcb_serial' => $pcb_serial,
# 'pcb_id' => $pcb_id,
# 'output_no' => $output_no,
# 'pcb_id_lot_no' => $pcb_id_lot_no
# };
# }
# }
# #
# sub get_all_files
# {
# my ($ftype, $pargv, $pu01, $pu03, $pmpr) = @_;
# #
# # optimize for file type
# #
# if ($ftype eq 'u01')
# {
# $one_type = $ftype;
# $all_list{$one_type} = $pu01;
# #
# find(\&want_one_type, @{$pargv});
# #
# @{$pu01} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu01};
# }
# elsif ($ftype eq 'u03')
# {
# $one_type = $ftype;
# $all_list{$one_type} = $pu03;
# #
# find(\&want_one_type, @{$pargv});
# #
# @{$pu03} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu03};
# }
# elsif ($ftype eq 'mpr')
# {
# $one_type = $ftype;
# $all_list{$one_type} = $pmpr;
# #
# find(\&want_one_type, @{$pargv});
# #
# @{$pmpr} = sort { $a->{file_name} cmp $b->{file_name} } @{$pmpr};
# }
# else
# {
# $all_list{u01} = $pu01;
# $all_list{u03} = $pu03;
# $all_list{mpr} = $pmpr;
# #
# find(\&want_all_types, @{$pargv});
# #
# @{$pu01} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu01};
# @{$pu03} = sort { $a->{file_name} cmp $b->{file_name} } @{$pu03};
# @{$pmpr} = sort { $a->{file_name} cmp $b->{file_name} } @{$pmpr};
# }
# }
# #
# ######################################################################
# ######################################################################
# #
# # read in data file and load all sections
# #
# sub load
# {
# my ($pdata) = @_;
# #
# my $path = $pdata->{full_path};
# #
# if ( ! -r $path )
# {
# printf $log_fh "\n%d: ERROR: file $path is NOT readable\n\n", __LINE__;
# return 0;
# }
# #
# unless (open(INFD, $path))
# {
# printf $log_fh "\n%d: ERROR: unable to open $path.\n\n", __LINE__;
# return 0;
# }
# @{$pdata->{data}} = <INFD>;
# close(INFD);
# #
# # remove newlines
# #
# chomp(@{$pdata->{data}});
# printf $log_fh "%d: Lines read: %d\n", __LINE__, scalar(@{$pdata->{data}})
# if ($verbose >= MAXVERBOSE);
# #
# return 1;
# }
# #
# sub load_name_value
# {
# my ($pdata, $section) = @_;
# #
# $pdata->{found_data}->{$section} = FALSE;
# #
# printf $log_fh "\n%d: Loading Name-Value Section: %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# my $re_section = '\\' . $section;
# @{$pdata->{raw}->{$section}} =
# grep /^${re_section}\s*$/ .. /^\s*$/, @{$pdata->{data}};
# #
# # printf $log_fh "<%s>\n", join("\n", @{$pdata->{raw}->{$section}});
# #
# if (scalar(@{$pdata->{raw}->{$section}}) <= 2)
# {
# # $pdata->{$section} = {};
# delete $pdata->{$section};
# printf $log_fh "%d: No data found.\n", __LINE__
# if ($verbose >= MAXVERBOSE);
# return 0;
# }
# #
# shift @{$pdata->{raw}->{$section}};
# pop @{$pdata->{raw}->{$section}};
# #
# printf $log_fh "%d: Section Lines: %d\n", __LINE__, scalar(@{$pdata->{raw}->{$section}})
# if ($verbose >= MAXVERBOSE);
# #
# %{$pdata->{$section}->{data}} =
# map { split /\s*=\s*/, $_, 2 } @{$pdata->{raw}->{$section}};
# printf $log_fh "%d: Number of Keys: %d\n", __LINE__, scalar(keys %{$pdata->{$section}->{data}})
# if ($verbose >= MAXVERBOSE);
# #
# $pdata->{found_data}->{$section} = TRUE;
# #
# return 1;
# }
# #
# sub split_quoted_string
# {
# my $rec = shift;
# #
# my $rec_len = length($rec);
# #
# my $istart = -1;
# my $iend = -1;
# my $in_string = 0;
# #
# my @tokens = ();
# my $token = "";
# #
# for (my $i=0; $i<$rec_len; $i++)
# {
# my $c = substr($rec, $i, 1);
# #
# if ($in_string == 1)
# {
# if ($c eq '"')
# {
# $in_string = 0;
# }
# else
# {
# $token .= $c;
# }
# }
# elsif ($c eq '"')
# {
# $in_string = 1;
# }
# elsif ($c eq ' ')
# {
# # printf $log_fh "Token ... <%s>\n", $token;
# push (@tokens, $token);
# $token = '';
# }
# else
# {
# $token .= $c;
# }
# }
# #
# if (length($token) > 0)
# {
# # printf $log_fh "Token ... <%s>\n", $token;
# push (@tokens, $token);
# $token = '';
# }
# #
# # printf $log_fh "Tokens: \n%s\n", join("\n",@tokens);
# #
# return @tokens;
# }
# #
# sub load_list
# {
# my ($pdata, $section) = @_;
# #
# printf $log_fh "\n%d: Loading List Section: %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# $pdata->{found_data}->{$section} = FALSE;
# #
# my $re_section = '\\' . $section;
# @{$pdata->{raw}->{$section}} =
# grep /^${re_section}\s*$/ .. /^\s*$/, @{$pdata->{data}};
# #
# # printf $log_fh "<%s>\n", join("\n", @{$pdata->{raw}->{$section}});
# #
# if (scalar(@{$pdata->{raw}->{$section}}) <= 3)
# {
# # $pdata->{$section} = {};
# delete $pdata->{$section};
# printf $log_fh "%d: No data found.\n", __LINE__
# if ($verbose >= MAXVERBOSE);
# return 0;
# }
# shift @{$pdata->{raw}->{$section}};
# pop @{$pdata->{raw}->{$section}};
# $pdata->{$section}->{header} = shift @{$pdata->{raw}->{$section}};
# @{$pdata->{$section}->{column_names}} =
# split / /, $pdata->{$section}->{header};
# my $number_columns = scalar(@{$pdata->{$section}->{column_names}});
# #
# @{$pdata->{$section}->{data}} = ();
# #
# printf $log_fh "%d: Section Lines: %d\n", __LINE__, scalar(@{$pdata->{raw}->{$section}})
# if ($verbose >= MAXVERBOSE);
# # printf $log_fh "Column Names: %d\n", $number_columns;
# foreach my $record (@{$pdata->{raw}->{$section}})
# {
# # printf $log_fh "\nRECORD: %s\n", $record;
# #
# # printf $log_fh "\nRECORD (original): %s\n", $record;
# # $record =~ s/"\s+"\s/"" /g;
# # $record =~ s/"\s+"\s*$/""/g;
# # printf $log_fh "\nRECORD (final): %s\n", $record;
# # my @tokens = split / /, $record;
# #
# my @tokens = split_quoted_string($record);
# my $number_tokens = scalar(@tokens);
# printf $log_fh "%d: Number of tokens in record: %d\n", __LINE__, $number_tokens
# if ($verbose >= MAXVERBOSE);
# #
# if ($number_tokens == $number_columns)
# {
# my %data = ();
# @data{@{$pdata->{$section}->{column_names}}} = @tokens;
# my $data_size = scalar(keys %data);
# # printf $log_fh "Current Data Size: %d\n", $data_size;
# unshift @{$pdata->{$section}->{data}}, \%data;
# printf $log_fh "%d: Current Number of Records: %d\n", __LINE__, scalar(@{$pdata->{$section}->{data}})
# if ($verbose >= MAXVERBOSE);
# }
# else
# {
# printf $log_fh "%d: SKIPPING RECORD - NUMBER TOKENS (%d) != NUMBER COLUMNS (%d)\n", __LINE__, $number_tokens, $number_columns;
# }
# }
# #
# $pdata->{found_data}->{$section} = TRUE;
# #
# return 1;
# }
# #
# sub backfill_list
# {
# my ($pdata, $section, $pcols) = @_;
# #
# foreach my $prow (@{$pdata->{$section}->{data}})
# {
# foreach my $col (@{$pcols})
# {
# # $prow->{$col} = 0 unless (defined($prow->{$col}));
# if (( ! exists($prow->{$col})) ||
# ( ! defined($prow->{$col})))
# {
# # printf "%d: WARNING - assigning ZERO to undefined column %s %s\n", __LINE__, $section, $col;
# $prow->{$col} = 0;
# }
# }
# }
# }
# #
# ########################################################################
# ########################################################################
# #
# # process U01 files.
# #
# sub export_u01_count_data
# {
# my ($pdb) = @_;
# #
# ###############################################################
# #
# my $section = COUNT;
# #
# printf $log_fh "\n%d: Export Total Data For %s:\n", __LINE__, $section;
# #
# my $first_time = TRUE;
# #
# my $outfnm = "${export_dir}/COUNT_TOTALS.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# #
# foreach my $key (sort keys %{$totals{$section}{totals}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "%s", $key;
# $first_time = FALSE;
# }
# else
# {
# printf $outfh ",%s", $key;
# }
# }
# printf $outfh "\n";
# #
# $first_time = TRUE;
# foreach my $key (sort keys %{$totals{$section}{totals}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "%d", $totals{$section}{totals}{$key};
# $first_time = FALSE;
# }
# else
# {
# printf $outfh ",%d", $totals{$section}{totals}{$key};
# }
# }
# printf $outfh "\n";
# close($outfh);
# #
# $section = COUNT;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# #
# $outfnm = "${export_dir}/COUNT_BY_MACHINE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "machine";
# foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s", $machine;
# foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
# {
# printf $outfh ",%d", $totals{$section}{by_machine}{$machine}{$key};
# }
# printf $outfh "\n";
# }
# close($outfh);
# #
# $section = COUNT;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine and Lane:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/COUNT_BY_MACHINE_LANE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}{$machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "machine,lane";
# foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s,%s", $machine, $lane;
# foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%d", $totals{$section}{by_machine_lane}{$machine}{$lane}{$key};
# }
# printf $outfh "\n";
# }
# }
# close($outfh);
# #
# ###############################################################
# #
# $section = COUNT;
# #
# printf $log_fh "\n%d: Export Total Data For %s by Product:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/COUNT_TOTALS_BY_PRODUCT.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "product";
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s", $product;
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{totals}{$key};
# }
# printf $outfh "\n";
# }
# close($outfh);
# #
# $section = COUNT;
# #
# printf $log_fh "\n%d: Export Data For %s by Product and Machine:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/COUNT_BY_PRODUCT_MACHINE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "product,machine";
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s,%s", $product, $machine;
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key};
# }
# printf $outfh "\n";
# }
# }
# close($outfh);
# #
# $section = COUNT;
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine and Lane:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/COUNT_BY_PRODUCT_MACHINE_LANE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "product,machine,lane";
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s,%s,%s", $product, $machine, $lane;
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key};
# }
# printf $outfh "\n";
# }
# }
# }
# close($outfh);
# }
# #
# sub export_u01_time_data
# {
# my ($pdb) = @_;
# #
# ###############################################################
# #
# my $section = TIME;
# #
# printf $log_fh "\n%d: Export Total Data For %s:\n", __LINE__, $section;
# #
# my $first_time = TRUE;
# #
# my $outfnm = "${export_dir}/TIME_TOTALS.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# #
# foreach my $key (sort keys %{$totals{$section}{totals}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "%s", $key;
# $first_time = FALSE;
# }
# else
# {
# printf $outfh ",%s", $key;
# }
# }
# printf $outfh "\n";
# #
# $first_time = TRUE;
# foreach my $key (sort keys %{$totals{$section}{totals}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "%s", $totals{$section}{totals}{$key};
# $first_time = FALSE;
# }
# else
# {
# printf $outfh ",%s", $totals{$section}{totals}{$key};
# }
# }
# printf $outfh "\n";
# close($outfh);
# #
# ###############################################################
# #
# $section = TIME;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# #
# $outfnm = "${export_dir}/TIME_BY_MACHINE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "machine";
# foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s", $machine;
# foreach my $key (sort keys %{$totals{$section}{by_machine}{$machine}})
# {
# printf $outfh ",%s", $totals{$section}{by_machine}{$machine}{$key};
# }
# printf $outfh "\n",
# }
# close($outfh);
# #
# $section = TIME;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine and Lane:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/TIME_BY_MACHINE_LANE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane}{$machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "machine,lane";
# foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s,%s", $machine, $lane;
# foreach my $key (sort keys %{$totals{$section}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%s", $totals{$section}{by_machine_lane}{$machine}{$lane}{$key};
# }
# printf $outfh "\n";
# }
# }
# close($outfh);
# #
# ###############################################################
# #
# $section = TIME;
# #
# printf $log_fh "\n%d: Export Total Data For %s by Product:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/TIME_TOTALS_BY_PRODUCT.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "product";
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s", $product;
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{totals}})
# {
# printf $outfh ",%s", $totals{$section}{by_product}{$product}{totals}{$key};
# }
# printf $outfh "\n";
# }
# close($outfh);
# #
# ###############################################################
# #
# $section = TIME;
# #
# printf $log_fh "\n%d: Export Data For %s by Product and Machine:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/TIME_BY_PRODUCT_MACHINE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "product,machine";
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s,%s", $product, $machine;
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine}{$machine}})
# {
# printf $outfh ",%s", $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key};
# }
# printf $outfh "\n";
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine and Lane:\n", __LINE__, $section;
# #
# $first_time = TRUE;
# $outfnm = "${export_dir}/TIME_BY_PRODUCT_MACHINE_LANE.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}})
# {
# if ($first_time == TRUE)
# {
# printf $outfh "product,machine,lane";
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%s", $key;
# }
# printf $outfh "\n";
# $first_time = FALSE;
# }
# #
# printf $outfh "%s,s,%s", $product, $machine, $lane;
# foreach my $key (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}})
# {
# printf $outfh ",%s", $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key};
# }
# printf $outfh "\n";
# }
# }
# }
# close($outfh);
# }
# #
# sub export_u01_feeder_data
# {
# my ($pdb) = @_;
# #
# ###############################################################
# #
# my $section = MOUNTPICKUPFEEDER;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, FAdd, FSAdd, ReelID:\n", __LINE__, $section;
# #
# my $outfnm = "${export_dir}/FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@feeder_export_cols)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}})
# {
# foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}})
# {
# foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}})
# {
# foreach my $reelid (sort keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s", $machine, $lane, $stage, $fadd, $fsadd, $reelid;
# foreach my $col (@feeder_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, FAdd, FSAdd:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/FEEDER_BY_MACHINE_LANE_STAGE_FADD_FSADD.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@feeder_export_cols2)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}})
# {
# foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}})
# {
# foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s", $machine, $lane, $stage, $fadd, $fsadd;
# foreach my $col (@feeder_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, TableNo:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/FEEDER_BY_MACHINE_LANE_STAGE_TABLE_NO.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@feeder_export_cols3)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}})
# {
# foreach my $table_no (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}})
# {
# printf $outfh "%s,%s,%s,%s", $machine, $lane, $stage, $table_no;
# foreach my $col (@feeder_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# close($outfh);
# #
# ###############################################################
# #
# $section = MOUNTPICKUPFEEDER;
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, FAdd, FSAdd, ReelID:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD_REELID.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@feeder_export_cols)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}})
# {
# foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}})
# {
# foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}})
# {
# foreach my $reelid (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $fadd, $fsadd, $reelid;
# foreach my $col (@feeder_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, FAdd, FSAdd:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_FADD_FSADD.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@feeder_export_cols2)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}})
# {
# foreach my $fadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}})
# {
# foreach my $fsadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $fadd, $fsadd;
# foreach my $col (@feeder_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, TableNo:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/FEEDER_BY_PRODUCT_MACHINE_LANE_STAGE_TABLE_NO.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@feeder_export_cols3)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}})
# {
# foreach my $table_no (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}})
# {
# printf $outfh "%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $table_no;
# foreach my $col (@feeder_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# close($outfh);
# }
# #
# sub export_u01_nozzle_data
# {
# my ($pdb) = @_;
# #
# ###############################################################
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, NHAdd, NCAdd, Blkserial:\n", __LINE__, $section;
# #
# my $outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@nozzle_export_cols)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}})
# {
# foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}})
# {
# foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}})
# {
# foreach my $blkserial (sort keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s",
# $machine, $lane, $stage, $nhadd, $ncadd, $blkserial;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d",
# $totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, NHAdd, NCAdd:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_NHADD_NCADD.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@nozzle_export_cols2)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}})
# {
# foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}})
# {
# foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s", $machine, $lane, $stage, $nhadd, $ncadd;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# close($outfh);
# #
# ###############################################################
# #
# $section = MOUNTPICKUPNOZZLE;
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, NHAdd, NCAdd, Blkserial:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD_BLKSERIAL.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@nozzle_export_cols)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}})
# {
# foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}})
# {
# foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}})
# {
# foreach my $blkserial (sort keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $nhadd, $ncadd, $blkserial;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, NHAdd, NCAdd:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_NHADD_NCADD.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@nozzle_export_cols2)
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}})
# {
# foreach my $nhadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}})
# {
# foreach my $ncadd (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s",
# $product, $machine, $lane, $stage, $nhadd, $ncadd;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# }
# #
# sub export_u01_nozzle_data_keys
# {
# my ($pdb, $nmkey1, $nmkey2, $label) = @_;
# #
# my $NMKEY1 = $nmkey1;
# $NMKEY1 =~ tr/[a-z]/[A-Z]/;
# my $NMKEY2 = $nmkey2;
# $NMKEY2 =~ tr/[a-z]/[A-Z]/;
# my $LABEL = $label;
# $LABEL =~ tr/[a-z]/[A-Z]/;
# #
# ###############################################################
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, $nmkey1, $nmkey2, Blkserial:\n", __LINE__, $section;
# #
# my $outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}_BLKSERIAL.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open(my $outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@{$nozzle_export_cols_new{$label}})
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}})
# {
# foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}})
# {
# foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}})
# {
# foreach my $blkserial (sort keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s",
# $machine, $lane, $stage, $key1, $key2, $blkserial;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d",
# $totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Machine, Lane, Stage, $nmkey1, $nmkey2:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/NOZZLE_BY_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# foreach my $pcol (@{$nozzle_export_cols2_new{$label}})
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}})
# {
# foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}})
# {
# foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}})
# {
# printf $outfh "%s,%s,%s,%s,%s", $machine, $lane, $stage, $key1, $key2;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# close($outfh);
# #
# ###############################################################
# #
# $section = MOUNTPICKUPNOZZLE;
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, $nmkey1, $nmkey2, Blkserial:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}_BLKSERIAL.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@{$nozzle_export_cols_new{$label}})
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}})
# {
# foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}})
# {
# foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}})
# {
# foreach my $blkserial (sort keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s,%s", $product, $machine, $lane, $stage, $key1, $key2, $blkserial;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# }
# close($outfh);
# #
# printf $log_fh "\n%d: Export Data For %s by Product, Machine, Lane, Stage, ${nmkey1}, ${nmkey2}:\n", __LINE__, $section;
# #
# $outfnm = "${export_dir}/NOZZLE_BY_PRODUCT_MACHINE_LANE_STAGE_${NMKEY1}_${NMKEY2}.csv";
# printf $log_fh "%d: File %s already exists\n", __LINE__, $outfnm if ( -e $outfnm);
# open($outfh, ">" , $outfnm) || die $!;
# printf $outfh "product,";
# foreach my $pcol (@{$nozzle_export_cols2_new{$label}})
# {
# printf $outfh $pcol->{format}, $pcol->{name};
# }
# printf $outfh "\n";
# #
# foreach my $product (sort keys %{$totals{$section}{by_product}})
# {
# foreach my $machine (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}})
# {
# foreach my $lane (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}})
# {
# foreach my $stage (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}})
# {
# foreach my $key1 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}})
# {
# foreach my $key2 (sort { $a <=> $b } keys %{$totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}})
# {
# printf $outfh "%s,%s,%s,%s,%s,%s",
# $product, $machine, $lane, $stage, $key1, $key2;
# foreach my $col (@nozzle_count_cols)
# {
# printf $outfh ",%d", $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col};
# }
# printf $outfh "\n";
# }
# }
# }
# }
# }
# }
# close($outfh);
# }
# #
# sub export_u01_nozzle_data_new
# {
# my ($pdb) = @_;
# #
# export_u01_nozzle_data_keys($pdb,
# NZ_KEY_NHADD,
# NZ_KEY_NCADD,
# NZ_LABEL_NHADD_NCADD);
# export_u01_nozzle_data_keys($pdb,
# NZ_KEY_HEAD,
# NZ_KEY_NHADD,
# NZ_LABEL_HEAD_NHADD);
# export_u01_nozzle_data_keys($pdb,
# NZ_KEY_HEAD,
# NZ_KEY_NCADD,
# NZ_LABEL_HEAD_NCADD);
# }
# #
# sub export_u01_data
# {
# my ($pdb) = @_;
# #
# export_u01_count_data($pdb);
# export_u01_time_data($pdb);
# export_u01_feeder_data($pdb);
# if (($proc_options & PROC_OPT_USEOLDNZ) != 0)
# {
# export_u01_nozzle_data($pdb);
# }
# else
# {
# export_u01_nozzle_data_new($pdb);
# }
# }
# #
# ######################################################################
# #
# # high-level u01 file audit functions
# #
# sub calculate_u01_name_value_delta
# {
# my ($pdb, $pu01, $section) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# foreach my $key (keys %{$pu01->{$section}->{data}})
# {
# my $delta = 0;
# #
# if (exists($pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$key}))
# {
# $delta =
# $pu01->{$section}->{data}->{$key} -
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$key};
# #
# if ($delta >= 0)
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = $delta;
# }
# elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
# {
# printf $log_fh "%d: WARNING: [%s] using NEGATIVE delta for %s key %s: %d\n", __LINE__, $filename, $section, $key, $delta if ($verbose >= MINVERBOSE);
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = $delta;
# }
# else
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = 0;
# printf $log_fh "%d: WARNING: [%s] setting NEGATIVE delta (%d) for %s key %s to ZERO\n", __LINE__, $filename, $delta, $section, $key if ($verbose >= MINVERBOSE);
# }
# #
# printf $log_fh "%d: %s: %s = %d\n", __LINE__, $section, $key, $delta if ($verbose >= MAXVERBOSE);
# }
# else
# {
# printf $log_fh "%d: ERROR: [%s] %s key %s NOT found in cache. Ignoring counts (%d).\n", __LINE__, $filename, $section, $key, $pu01->{$section}->{data}->{$key};
# die "ERROR: [$filename] $section key $key NOT found it cache. Stopped";
# }
# }
# }
# #
# sub copy_u01_name_value_cache
# {
# my ($pdb, $pu01, $section) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# foreach my $key (keys %{$pu01->{$section}->{data}})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$key} =
# $pu01->{$section}->{data}->{$key};
# }
# }
# #
# sub copy_u01_name_value_delta
# {
# my ($pdb, $pu01, $section) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $key (keys %{$pu01->{$section}->{data}})
# {
# my $delta = $pu01->{$section}->{data}->{$key};
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key} = $delta;
# printf $log_fh "%d: %s: %s = %d\n", __LINE__, $section, $key, $delta
# if ($verbose >= MAXVERBOSE);
# }
# }
# #
# sub tabulate_u01_name_value_delta
# {
# my ($pdb, $pu01, $section) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
# #
# foreach my $key (keys %{$pu01->{$section}->{data}})
# {
# #
# # product dependent totals
# #
# if (exists($totals{$section}{by_product}{$product}{totals}{$key}))
# {
# $totals{$section}{by_product}{$product}{totals}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_product}{$product}{totals}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# printf $log_fh "%d: %s %s %s total=%d\n", __LINE__, $product, $section, $key, $totals{$section}{by_product}{$product}{totals}{$key} if ($verbose >= MAXVERBOSE);
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine}{$machine}{$key}))
# {
# $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# printf $log_fh "%d: %s %s %s %s total=%d\n", __LINE__, $product, $section, $machine, $key, $totals{$section}{by_product}{$product}{by_machine}{$machine}{$key} if ($verbose >= MAXVERBOSE);
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key}))
# {
# $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# printf $log_fh "%d: %s %s %s %s %s total=%d\n", __LINE__, $product, $section, $machine, $lane, $key, $totals{$section}{by_product}{$product}{by_machine_lane}{$machine}{$lane}{$key} if ($verbose >= MAXVERBOSE);
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key}))
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# printf $log_fh "%d: %s %s %s %s %s %s total=%d\n", __LINE__, $product, $section, $machine, $lane, $stage, $key, $totals{$section}{by_product}{$product}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} if ($verbose >= MAXVERBOSE);
# #
# # product independent totals
# #
# if (exists($totals{$section}{totals}{$key}))
# {
# $totals{$section}{totals}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{totals}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# #
# if (exists($totals{$section}{by_machine}{$machine}{$key}))
# {
# $totals{$section}{by_machine}{$machine}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_machine}{$machine}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# #
# if (exists($totals{$section}{by_machine_lane}{$machine}{$lane}{$key}))
# {
# $totals{$section}{by_machine_lane}{$machine}{$lane}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_machine_lane}{$machine}{$lane}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# #
# if (exists($totals{$section}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key}))
# {
# $totals{$section}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} +=
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# else
# {
# $totals{$section}{by_machine_lane_stage}{$machine}{$lane}{$stage}{$key} =
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$key};
# }
# }
# }
# #
# sub audit_u01_name_value
# {
# my ($pdb, $pu01, $section) = @_;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# return if ((($proc_options & PROC_OPT_IGNRESET12) != 0) &&
# ($output_no == TIMER_NOT_RUNNING) &&
# ($section eq COUNT));
# #
# my $mjsid = '';
# my $lotname = '';
# my $lotnumber = 0;
# #
# my $change_over = $pdb->{change_over}{u01}{$machine}{$lane}{$stage};
# printf $log_fh "%d: Change Over: %s\n", __LINE__, $change_over if ($verbose >= MAXVERBOSE);
# #
# get_product_info($pu01, \$mjsid, \$lotname, \$lotnumber);
# #
# printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# if ($verbose >= MAXVERBOSE)
# {
# printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
# printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
# printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
# printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
# printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
# printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(keys %{$pu01->{$section}->{data}});
# }
# #
# # output 3,4,5,12 U01 files have both Time and Count sections.
# # these output types can all be treated the same.
# #
# if (($output_no == PROD_COMPLETE) ||
# ($output_no == PROD_COMPLETE_LATER) ||
# ($output_no == DETECT_CHANGE) ||
# ($output_no == TIMER_NOT_RUNNING))
# {
# if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
# {
# #
# # first file of any of these types to be processed.
# #
# printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__
# if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# copy_u01_name_value_cache($pdb, $pu01, $section);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# else
# {
# my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
# #
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
# #
# if ($change_over == TRUE)
# {
# copy_u01_name_value_delta($pdb, $pu01, $section);
# tabulate_u01_name_value_delta($pdb, $pu01, $section);
# copy_u01_name_value_cache($pdb, $pu01, $section);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($state eq DELTA)
# {
# calculate_u01_name_value_delta($pdb, $pu01, $section);
# tabulate_u01_name_value_delta($pdb, $pu01, $section);
# copy_u01_name_value_cache($pdb, $pu01, $section);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($state eq RESET)
# {
# copy_u01_name_value_delta($pdb, $pu01, $section);
# tabulate_u01_name_value_delta($pdb, $pu01, $section);
# copy_u01_name_value_cache($pdb, $pu01, $section);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($state eq BASELINE)
# {
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# copy_u01_name_value_cache($pdb, $pu01, $section);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# else
# {
# die "ERROR: unknown $section state: $state. Stopped";
# }
# printf $log_fh "%d: EXIT STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
# }
# }
# elsif (($output_no == MANUAL_CLEAR) ||
# ($output_no == AUTO_CLEAR))
# {
# #
# # reset files have no data. they indicate the machine
# # and counters were all reset to zero.
# #
# my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = RESET;
# printf $log_fh "%d: EXIT STATE: %s\n", __LINE__, $state if ($verbose >= MAXVERBOSE);
# }
# else
# {
# die "ERROR: unknown output type: $output_no. Stopped";
# }
# #
# return;
# }
# #
# ######################################################################
# #
# # routines for feeder section
# #
# sub calculate_u01_feeder_delta
# {
# my ($pdb, $pu01) = @_;
# #
# my $section = MOUNTPICKUPFEEDER;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $fadd = $prow->{FAdd};
# my $fsadd = $prow->{FSAdd};
# my $reelid = $prow->{ReelID};
# #
# my $is_tray = substr($fadd, -4, 2);
# if ($is_tray > 0)
# {
# $is_tray = TRUE;
# printf $log_fh "%d: [%s] %s IS tray part (%s) fadd: %s, fsadd: %s\n", __LINE__, $filename, $section, $is_tray, $fadd, $fsadd
# if ($verbose >= MAXVERBOSE);
# }
# else
# {
# $is_tray = FALSE;
# printf $log_fh "%d: [%s] %s IS NOT tray part (%s) fadd: %s, fsadd: %s\n", __LINE__, $filename, $section, $is_tray, $fadd, $fsadd
# if ($verbose >= MAXVERBOSE);
# }
# #
# if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}))
# {
# #
# # unlike name-value (count,time) sections, it is possible
# # to get new entries which have not been seen before. for
# # example, new reelids or new feeders may not be in the
# # previous u01 file, but appear as new. in those cases,
# # take the counts as is.
# #
# printf $log_fh "%d: WARNING: [%s] %s FAdd %s, FSAdd %s NOT found in cache. Taking all counts as is.\n", __LINE__, $filename, $section, $fadd, $fsadd if ($verbose >= MINVERBOSE);
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
# }
# }
# else
# {
# my $cache_reelid = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}{ReelID};
# my $cache_filename = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{filename};
# if (($reelid eq $cache_reelid) || ($is_tray == TRUE))
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{ReelID} = $reelid;
# #
# foreach my $col (@feeder_count_cols)
# {
# my $u01_value = $prow->{$col};
# my $cache_value = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}{$col};
# #
# my $delta = $u01_value - $cache_value;
# #
# if ($delta >= 0)
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $delta;
# }
# elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
# {
# printf $log_fh "%d: WARNING: [%s] [%s] %s FAdd %s, FSAdd %s using NEGATIVE delta for key %s: %d\n", __LINE__, $filename, $cache_filename, $section, $fadd, $fsadd, $col, $delta if ($verbose >= MINVERBOSE);
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $delta;
# }
# else
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = 0;
# printf $log_fh "%d: WARNING: [%s] [%s] %s FAdd %s, FSAdd %s setting NEGATIVE delta (%d) for key %s to ZERO; current value %d, cache value %d\n", __LINE__, $filename, $cache_filename, $section, $fadd, $fsadd, $delta, $col, $u01_value, $cache_value if ($verbose >= MINVERBOSE);
# }
# }
# }
# else
# {
# printf $log_fh "%d: WARNING: [%s] %s FAdd %s, FSAdd %s REELID CHANGED: CACHED %s, CURRENT U01 %s\n", __LINE__, $filename, $section, $fadd, $fsadd, $cache_reelid, $reelid if ($verbose >= MINVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
# }
# }
# }
# }
# }
# #
# sub copy_u01_feeder_cache
# {
# my ($pdb, $pu01, $state) = @_;
# #
# my $section = MOUNTPICKUPFEEDER;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $fadd = $prow->{FAdd};
# my $fsadd = $prow->{FSAdd};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
# }
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{state} = $state;
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$fadd}{$fsadd}{filename} = $filename;
# }
# }
# #
# sub copy_u01_feeder_delta
# {
# my ($pdb, $pu01) = @_;
# #
# my $section = MOUNTPICKUPFEEDER;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $fadd = $prow->{FAdd};
# my $fsadd = $prow->{FSAdd};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col} = $prow->{$col};
# }
# }
# }
# #
# sub tabulate_u01_feeder_delta
# {
# my ($pdb, $pu01) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# my $section = MOUNTPICKUPFEEDER;
# #
# my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
# #
# foreach my $fadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}})
# {
# my $table_no = int($fadd/10000); # truncate
# #
# foreach my $fsadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}})
# {
# my $reelid = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{ReelID};
# #
# # product-independent totals
# #
# # by_machine_lane_stage_fadd_fsadd_reelid
# #
# if (exists($totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}))
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# #
# # by_machine_lane_stage_fadd_fsadd
# #
# if (exists($totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}))
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# #
# # by_machine_lane_stage_table_no
# #
# if (exists($totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}))
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# #
# # product-dependent totals
# #
# # by_product by_machine_lane_stage_fadd_fsadd_reelid
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}))
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd_reelid}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$reelid}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# #
# # by_product by_machine_lane_stage_fadd_fsadd
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}))
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_fadd_fsadd}{$machine}{$lane}{$stage}{$fadd}{$fsadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# #
# # by_product by_machine_lane_stage_table_no
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}))
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@feeder_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_table_no}{$machine}{$lane}{$stage}{$table_no}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$fadd}{$fsadd}{data}{$col};
# }
# }
# }
# }
# }
# #
# sub audit_u01_feeders
# {
# my ($pdb, $pu01) = @_;
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# my $section = MOUNTPICKUPFEEDER;
# my $filename = $pu01->{file_name};
# #
# set_list_section_column_names(LNB_U01_FILE_TYPE, $pu01, $section);
# #
# printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# if ($verbose >= MAXVERBOSE)
# {
# printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
# printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
# printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
# printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
# printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
# printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pu01->{$section}->{data}}) if (defined(@{$pu01->{$section}->{data}}));
# }
# #
# # check if the file has a feeder data section.
# #
# if ($output_no == TIMER_NOT_RUNNING)
# {
# printf $log_fh "%d: No Feeder data in Output=%d U01 files. Skipping.\n", __LINE__, $output_no if ($verbose >= MAXVERBOSE);
# return;
# }
# elsif (($output_no == PROD_COMPLETE) ||
# ($output_no == PROD_COMPLETE_LATER))
# {
# if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
# {
# printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__
# if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq RESET)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# copy_u01_feeder_delta($pdb, $pu01);
# tabulate_u01_feeder_delta($pdb, $pu01);
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq DELTA)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# calculate_u01_feeder_delta($pdb, $pu01);
# tabulate_u01_feeder_delta($pdb, $pu01);
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq BASELINE)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# #
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# else
# {
# my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
# die "ERROR: unknown $section state: $state. Stopped";
# }
# }
# elsif ($output_no == DETECT_CHANGE)
# {
# if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
# {
# printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__,
# if ($verbose >= MAXVERBOSE);
# #
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = BASELINE;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq RESET)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# copy_u01_feeder_delta($pdb, $pu01);
# tabulate_u01_feeder_delta($pdb, $pu01);
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq DELTA)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,__LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# calculate_u01_feeder_delta($pdb, $pu01);
# tabulate_u01_feeder_delta($pdb, $pu01);
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq BASELINE)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# #
# copy_u01_feeder_cache($pdb, $pu01, DELTA);
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# else
# {
# my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
# die "ERROR: unknown $section state: $state. Stopped";
# }
# }
# elsif (($output_no == MANUAL_CLEAR) ||
# ($output_no == AUTO_CLEAR))
# {
# printf $log_fh "%D: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = RESET;
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# }
# else
# {
# die "ERROR: unknown $section output type: $output_no. Stopped";
# }
# #
# printf $log_fh "%d: EXIT STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# #
# return;
# }
# #
# ######################################################################
# #
# # routines for nozzle section
# #
# sub calculate_u01_nozzle_delta
# {
# my ($pdb, $pu01) = @_;
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $nhadd = $prow->{NHAdd};
# my $ncadd = $prow->{NCAdd};
# my $blkserial = $prow->{BLKSerial};
# #
# if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}))
# {
# printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s NOT found in cache. Taking all counts as is.\n", __LINE__, $filename, $section, $nhadd, $ncadd if ($verbose >= MINVERBOSE);
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
# }
# }
# else
# {
# my $cache_blkserial = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}{BLKSerial};
# if ($blkserial eq $cache_blkserial)
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{BLKSerial} = $blkserial;
# #
# foreach my $col (@nozzle_count_cols)
# {
# my $u01_value = $prow->{$col};
# my $cache_value = $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}{$col};
# #
# my $delta = $u01_value - $cache_value;
# #
# if ($delta >= 0)
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $delta;
# }
# elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
# {
# printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s using NEGATIVE delta for key %s: %d\n", __LINE__, $filename, $section, $nhadd, $ncadd, $col, $delta if ($verbose >= MINVERBOSE);
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $delta;
# }
# else
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = 0;
# printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s setting NEGATIVE delta (%d) for key %s to ZERO\n", __LINE__, $filename, $section, $nhadd, $ncadd, $delta, $col if ($verbose >= MINVERBOSE);
# }
# }
# }
# else
# {
# printf $log_fh "%d: WARNING: [%s] %s NHAdd %s, NCAdd %s BLKSERIAL CHANGED: CACHED %s, CURRENT U01 %s\n", __LINE__, $filename, $section, $nhadd, $ncadd, $cache_blkserial, $blkserial if ($verbose >= MINVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
# }
# }
# }
# }
# }
# #
# sub copy_u01_nozzle_cache
# {
# my ($pdb, $pu01, $state) = @_;
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $nhadd = $prow->{NHAdd};
# my $ncadd = $prow->{NCAdd};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
# }
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{cache}{$nhadd}{$ncadd}{state} = $state;
# }
# }
# #
# sub copy_u01_nozzle_delta
# {
# my ($pdb, $pu01) = @_;
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $nhadd = $prow->{NHAdd};
# my $ncadd = $prow->{NCAdd};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col} = $prow->{$col};
# }
# }
# }
# #
# sub tabulate_u01_nozzle_delta
# {
# my ($pdb, $pu01) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
# #
# foreach my $nhadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}})
# {
# foreach my $ncadd (sort { $a <=> $b } keys %{$pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}})
# {
# my $blkserial = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{BLKSerial};
# #
# # product-independent totals
# #
# if (exists($totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# #
# if (exists($totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# #
# # product-dependent totals
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd_blkserial}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$blkserial}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# #
# if (exists($totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} += $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{by_machine_lane_stage_nhadd_ncadd}{$machine}{$lane}{$stage}{$nhadd}{$ncadd}{$col} = $pdb->{$section}{$machine}{$lane}{$stage}{delta}{$nhadd}{$ncadd}{data}{$col};
# }
# }
# }
# }
# }
# #
# sub audit_u01_nozzles
# {
# my ($pdb, $pu01) = @_;
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# my $section = MOUNTPICKUPNOZZLE;
# my $filename = $pu01->{file_name};
# #
# set_list_section_column_names(LNB_U01_FILE_TYPE, $pu01, $section);
# #
# printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# if ($verbose >= MAXVERBOSE)
# {
# printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
# printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
# printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
# printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
# printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
# printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pu01->{$section}->{data}}) if (defined(@{$pu01->{$section}->{data}}));
# }
# #
# # check if the file has a nozzle data section.
# #
# if (($output_no == DETECT_CHANGE) ||
# ($output_no == TIMER_NOT_RUNNING))
# {
# printf $log_fh "%d: No Nozzle data in Output=%d U01 files. Skipping.\n", __LINE__, $output_no if ($verbose >= MAXVERBOSE);
# return;
# }
# elsif (($output_no == PROD_COMPLETE) ||
# ($output_no == PROD_COMPLETE_LATER))
# {
# if ( ! exists($pdb->{$section}->{$machine}{$lane}{$stage}{state}))
# {
# printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__,
# if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# copy_u01_nozzle_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq RESET)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# copy_u01_nozzle_delta($pdb, $pu01);
# tabulate_u01_nozzle_delta($pdb, $pu01);
# copy_u01_nozzle_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq DELTA)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# calculate_u01_nozzle_delta($pdb, $pu01);
# tabulate_u01_nozzle_delta($pdb, $pu01);
# copy_u01_nozzle_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$machine}{$lane}{$stage}{state} eq BASELINE)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# copy_u01_nozzle_cache($pdb, $pu01, DELTA);
# #
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# else
# {
# my $state = $pdb->{$section}->{$machine}{$lane}{$stage}{state};
# die "ERROR: unknown $section state: $state. Stopped";
# }
# }
# elsif (($output_no == MANUAL_CLEAR) ||
# ($output_no == AUTO_CLEAR))
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# $pdb->{$section}->{$machine}{$lane}{$stage}{state} = RESET;
# delete $pdb->{$section}->{$machine}{$lane}{$stage}{cache};
# }
# else
# {
# die "ERROR: unknown $section output type: $output_no. Stopped";
# }
# #
# printf $log_fh "%d: EXIT STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# #
# return;
# }
# #
# ######################################################################
# #
# # routines for nozzle section
# #
# sub calculate_u01_nozzle_delta_keys
# {
# my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $key1 = $prow->{$nmkey1};
# my $key2 = $prow->{$nmkey2};
# my $blkserial = $prow->{BLKSerial};
# #
# if ( ! exists($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}))
# {
# printf $log_fh "%d: WARNING: [%s] %s $nmkey2 %s, $nmkey2 %s NOT found in cache. Taking all counts as is.\n", __LINE__, $filename, $section, $key1, $key2 if ($verbose >= MINVERBOSE);
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $prow->{$col};
# }
# }
# else
# {
# my $cache_blkserial = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}{BLKSerial};
# if ($blkserial eq $cache_blkserial)
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{BLKSerial} = $blkserial;
# #
# foreach my $col (@nozzle_count_cols)
# {
# my $u01_value = $prow->{$col};
# my $cache_value = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}{$col};
# #
# my $delta = $u01_value - $cache_value;
# #
# if ($delta >= 0)
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $delta;
# }
# elsif (($proc_options & PROC_OPT_USENEGDELTS) != 0)
# {
# printf $log_fh "%d: WARNING: [%s] %s $nmkey1 %s, $nmkey2 %s using NEGATIVE delta for key %s: %d\n", __LINE__, $filename, $section, $key1, $key2, $col, $delta if ($verbose >= MINVERBOSE);
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $delta;
# }
# else
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = 0;
# printf $log_fh "%d: WARNING: [%s] %s $nmkey1 %s, $nmkey2 %s setting NEGATIVE delta (%d) for key %s to ZERO\n", __LINE__, $filename, $section, $key1, $key2, $delta, $col if ($verbose >= MINVERBOSE);
# }
# }
# }
# else
# {
# printf $log_fh "%d: WARNING: [%s] %s $nmkey1 %s, $nmkey2 %s BLKSERIAL CHANGED: CACHED %s, CURRENT U01 %s\n", __LINE__, $filename, $section, $key1, $key2, $cache_blkserial, $blkserial if ($verbose >= MINVERBOSE);
# #
# delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $prow->{$col};
# }
# }
# }
# }
# }
# #
# sub copy_u01_nozzle_cache_keys
# {
# my ($pdb, $pu01, $state, $nmkey1, $nmkey2, $label) = @_;
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $key1 = $prow->{$nmkey1};
# my $key2 = $prow->{$nmkey2};
# # printf $log_fh "%d: $label $nmkey1 %d $nmkey2 %d\n", __LINE__, $key1, $key2;
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{data}{$col} = $prow->{$col};
# # printf $log_fh "%d: $label $nmkey1 %d $nmkey2 %d $col %s\n", __LINE__, $key1, $key2, $prow->{$col}
# }
# #
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache}{$key1}{$key2}{state} = $state;
# }
# }
# #
# sub copy_u01_nozzle_delta_keys
# {
# my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
# #
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $filename = $pu01->{file_name};
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# #
# my $pcols = $pu01->{$section}->{column_names};
# #
# delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta};
# #
# foreach my $prow (@{$pu01->{$section}->{data}})
# {
# my $key1 = $prow->{$nmkey1};
# my $key2 = $prow->{$nmkey2};
# #
# foreach my $col (@{$pcols})
# {
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col} = $prow->{$col};
# }
# }
# }
# #
# sub tabulate_u01_nozzle_delta_keys
# {
# my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
# #
# my $filename = $pu01->{file_name};
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# my $section = MOUNTPICKUPNOZZLE;
# #
# my $product = $pdb->{product}{u01}{$machine}{$lane}{$stage};
# #
# foreach my $key1 (sort { $a <=> $b } keys %{$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}})
# {
# foreach my $key2 (sort { $a <=> $b } keys %{$pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}})
# {
# my $blkserial = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{BLKSerial};
# #
# # product-independent totals
# #
# if (exists($totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# #
# if (exists($totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# #
# # product-dependent totals
# #
# if (exists($totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2_blkserial}{$machine}{$lane}{$stage}{$key1}{$key2}{$blkserial}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# #
# if (exists($totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}))
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} += $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# else
# {
# foreach my $col (@nozzle_count_cols)
# {
# $totals{$section}{by_product}{$product}{$label}{by_machine_lane_stage_key1_key2}{$machine}{$lane}{$stage}{$key1}{$key2}{$col} = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{delta}{$key1}{$key2}{data}{$col};
# }
# }
# }
# }
# }
# #
# sub audit_u01_nozzles_keys
# {
# my ($pdb, $pu01, $nmkey1, $nmkey2, $label) = @_;
# #
# my $machine = $pu01->{mach_no};
# my $lane = $pu01->{lane};
# my $stage = $pu01->{stage};
# my $output_no = $pu01->{output_no};
# my $section = MOUNTPICKUPNOZZLE;
# my $filename = $pu01->{file_name};
# #
# printf $log_fh "\n%d: SECTION : %s\n", __LINE__, $section
# if ($verbose >= MAXVERBOSE);
# #
# if ($verbose >= MAXVERBOSE)
# {
# printf $log_fh "%d: MACHINE : %s\n", __LINE__, $machine;
# printf $log_fh "%d: LANE : %d\n", __LINE__, $lane;
# printf $log_fh "%d: STAGE : %d\n", __LINE__, $stage;
# printf $log_fh "%d: OUTPUT NO: %s\n", __LINE__, $output_no;
# printf $log_fh "%d: FILE RECS : %d\n", __LINE__, scalar(@{$pu01->{data}});
# printf $log_fh "%d: %s RECS: %d\n", __LINE__, $section, scalar(@{$pu01->{$section}->{data}}) if (defined(@{$pu01->{$section}->{data}}));
# }
# #
# # check if the file has a nozzle data section.
# #
# if (($output_no == DETECT_CHANGE) ||
# ($output_no == TIMER_NOT_RUNNING))
# {
# printf $log_fh "%d: No Nozzle data in Output=%d U01 files. Skipping.\n", __LINE__, $output_no if ($verbose >= MAXVERBOSE);
# return;
# }
# elsif (($output_no == PROD_COMPLETE) ||
# ($output_no == PROD_COMPLETE_LATER))
# {
# if ( ! exists($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}))
# {
# printf $log_fh "%d: ENTRY STATE: UNKNOWN\n", __LINE__
# if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache};
# copy_u01_nozzle_cache_keys(
# $pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
# #
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} eq RESET)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# copy_u01_nozzle_delta_keys(
# $pdb, $pu01, $nmkey1, $nmkey2, $label);
# tabulate_u01_nozzle_delta_keys(
# $pdb, $pu01, $nmkey1, $nmkey2, $label);
# copy_u01_nozzle_cache_keys(
# $pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
# #
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} eq DELTA)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# calculate_u01_nozzle_delta_keys(
# $pdb, $pu01, $nmkey1, $nmkey2, $label);
# tabulate_u01_nozzle_delta_keys(
# $pdb, $pu01, $nmkey1, $nmkey2, $label);
# copy_u01_nozzle_cache_keys(
# $pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
# #
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# elsif ($pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} eq BASELINE)
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# #
# delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache};
# copy_u01_nozzle_cache_keys(
# $pdb, $pu01, DELTA, $nmkey1, $nmkey2, $label);
# #
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = DELTA;
# }
# else
# {
# my $state = $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state};
# die "ERROR: unknown $section state: $state. Stopped";
# }
# }
# elsif (($output_no == MANUAL_CLEAR) ||
# ($output_no == AUTO_CLEAR))
# {
# printf $log_fh "%d: ENTRY STATE: %s\n", __LINE__,
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state} = RESET;
# delete $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{cache};
# }
# else
# {
# die "ERROR: unknown $section output type: $output_no. Stopped";
# }
# #
# printf $log_fh "%d: EXIT STATE: %s\n", __LINE__,
# $pdb->{$section}->{$label}->{$machine}{$lane}{$stage}{state}
# if ($verbose >= MAXVERBOSE);
# #
# return;
# }
# #
# sub audit_u01_nozzles_new
# {
# my ($pdb, $pu01) = @_;
# #
# audit_u01_nozzles_keys($pdb, $pu01,
# NZ_KEY_NHADD,
# NZ_KEY_NCADD,
# NZ_LABEL_NHADD_NCADD);
# audit_u01_nozzles_keys($pdb, $pu01,
# NZ_KEY_HEAD,
# NZ_KEY_NHADD,
# NZ_LABEL_HEAD_NHADD);
# audit_u01_nozzles_keys($pdb, $pu01,
# NZ_KEY_HEAD,
# NZ_KEY_NCADD,
# NZ_LABEL_HEAD_NCADD);
# }
# #
# #####################################################################
# #
# # high-level audit functions for u01 files.
# #
# sub audit_u01_file
# {
# my ($pdb, $pu01) = @_;
# #
# my $output_no = $pu01->{output_no};
# #
# return if (($output_no == TIMER_NOT_RUNNING) &&
# (($proc_options & PROC_OPT_IGNALL12) != 0));
# #
# set_product_info($pdb, $pu01, LNB_U01_FILE_TYPE);
# #
# audit_u01_name_value($pdb, $pu01, COUNT);
# audit_u01_name_value($pdb, $pu01, TIME);
# audit_u01_feeders($pdb, $pu01);
# #
# if (($proc_options & PROC_OPT_USEOLDNZ) != 0)
# {
# audit_u01_nozzles($pdb, $pu01);
# }
# else
# {
# audit_u01_nozzles_new($pdb, $pu01);
# }
# #
# return;
# }
# #
# sub load_u01_sections
# {
# my ($pu01) = @_;
# #
# load_name_value($pu01, INDEX);
# load_name_value($pu01, INFORMATION);
# #
# load_name_value($pu01, TIME);
# load_name_value($pu01, CYCLETIME);
# load_name_value($pu01, COUNT);
# load_list($pu01, DISPENSER);
# load_list($pu01, MOUNTPICKUPFEEDER);
# # backfill_list($pu01, MOUNTPICKUPFEEDER, \@feeder_count_cols);
# load_list($pu01, MOUNTPICKUPNOZZLE);
# # backfill_list($pu01, MOUNTPICKUPNOZZLE, \@nozzle_count_cols);
# load_name_value($pu01, INSPECTIONDATA);
# }
# #
# sub audit_u01_files
# {
# my ($pu01s, $pdb) = @_;
# #
# printf $log_fh "\n%d: Audit U01 files:\n", __LINE__;
# #
# foreach my $pu01 (@{$pu01s})
# {
# printf $log_fh "\n%d: Audit U01: %s\n", __LINE__, $pu01->{file_name} if ($verbose >= MIDVERBOSE);
# #
# next unless (load($pu01) != 0);
# #
# load_u01_sections($pu01);
# #
# audit_u01_file($pdb, $pu01);
# }
# #
# return;
# }
# #
# ########################################################################
# ########################################################################
# #
# # process U01 files for csv export.
# #
# sub export_u01_data_as_csv
# {
# my ($pdb) = @_;
# #
# export_name_value_section_as_csv(TIME,
# LNB_U01_FILE_TYPE,
# 'TIME',
# 'machine',
# TRUE);
# export_name_value_section_as_csv(CYCLETIME,
# LNB_U01_FILE_TYPE,
# 'CYCLE_TIME',
# 'machine',
# TRUE);
# export_name_value_section_as_csv(COUNT,
# LNB_U01_FILE_TYPE,
# 'COUNT',
# 'machine',
# TRUE);
# #
# export_list_section_as_csv(DISPENSER,
# LNB_U01_FILE_TYPE,
# 'DISPENSER',
# 'machine',
# TRUE);
# export_list_section_as_csv(MOUNTPICKUPFEEDER,
# LNB_U01_FILE_TYPE,
# 'MOUNT_PICKUP_FEEDER',
# 'machine',
# TRUE);
# export_list_section_as_csv(MOUNTPICKUPNOZZLE,
# LNB_U01_FILE_TYPE,
# 'MOUNT_PICKUP_NOZZLE',
# 'machine',
# TRUE);
# export_name_value_section_as_csv(INSPECTIONDATA,
# LNB_U01_FILE_TYPE,
# 'INSPECTION_DATA',
# 'machine',
# TRUE);
# }
# #
# sub prepare_u01_file
# {
# my ($pdb, $pu01) = @_;
# #
# set_product_info($pdb, $pu01, LNB_U01_FILE_TYPE);
# #
# prepare_name_value_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# TIME,
# TRUE);
# prepare_name_value_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# CYCLETIME,
# TRUE);
# prepare_name_value_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# COUNT,
# TRUE);
# prepare_list_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# DISPENSER,
# TRUE);
# prepare_list_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# MOUNTPICKUPFEEDER,
# TRUE);
# prepare_list_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# MOUNTPICKUPNOZZLE,
# TRUE);
# prepare_name_value_section($pdb,
# $pu01,
# LNB_U01_FILE_TYPE,
# INSPECTIONDATA,
# TRUE);
# #
# return;
# }
# #
# sub prepare_u01_files
# {
# my ($pu01s, $pdb) = @_;
# #
# printf $log_fh "\n%d: Audit U01 files:\n", __LINE__;
# #
# foreach my $pu01 (@{$pu01s})
# {
# printf $log_fh "\n%d: Audit u01: %s\n", __LINE__, $pu01->{file_name}
# if ($verbose >= MIDVERBOSE);
# #
# next unless (load($pu01) != 0);
# #
# load_u01_sections($pu01);
# #
# prepare_u01_file($pdb, $pu01);
# }
# #
# return;
# }
# #
# sub process_u01_files
# {
# my ($pu01s) = @_;
# #
# # any files to process?
# #
# if (scalar(@{$pu01s}) <= 0)
# {
# printf $log_fh "%d: No U01 files to process. Returning.\n\n", __LINE__;
# return;
# }
# #
# my %db = ();
# audit_u01_files($pu01s, \%db);
# export_u01_data(\%db);
# #
# my %csv_db = ();
# prepare_u01_files($pu01s, \%csv_db);
# export_u01_data_as_csv(\%csv_db);
# #
# return;
# }
# #
# ########################################################################
# ########################################################################
# #
# # process U03 files.
# #
# sub export_u03_data_as_csv
# {
# my ($pdb) = @_;
# #
# export_list_section_as_csv(MOUNTQUALITYTRACE,
# LNB_U03_FILE_TYPE(),
# 'MOUNT_QUALITY_TRACE',
# 'machine',
# TRUE);
# export_list_section_as_csv(MOUNTLATESTREEL,
# LNB_U03_FILE_TYPE(),
# 'MOUNT_LATEST_REEL',
# 'machine',
# TRUE);
# export_list_section_as_csv(MOUNTEXCHANGEREEL,
# LNB_U03_FILE_TYPE(),
# 'MOUNT_EXCHANGE_REEL',
# 'machine',
# TRUE);
# }
# #
# sub prepare_u03_file
# {
# my ($pdb, $pu03) = @_;
# #
# set_product_info($pdb, $pu03, LNB_U03_FILE_TYPE);
# #
# prepare_list_section($pdb,
# $pu03,
# LNB_U03_FILE_TYPE,
# MOUNTQUALITYTRACE,
# TRUE);
# prepare_list_section($pdb,
# $pu03,
# LNB_U03_FILE_TYPE,
# MOUNTLATESTREEL,
# TRUE);
# prepare_list_section($pdb,
# $pu03,
# LNB_U03_FILE_TYPE,
# MOUNTEXCHANGEREEL,
# TRUE);
# #
# return;
# }
# #
# sub load_u03_sections
# {
# my ($pu03) = @_;
# #
# load_name_value($pu03, INDEX);
# load_name_value($pu03, INFORMATION);
# #
# load_list($pu03, BRECG);
# load_list($pu03, BRECGCALC);
# load_list($pu03, ELAPSETIMERECOG);
# load_list($pu03, SBOARD);
# load_list($pu03, HEIGHTCORRECT);
# load_list($pu03, MOUNTQUALITYTRACE);
# load_list($pu03, MOUNTLATESTREEL);
# load_list($pu03, MOUNTEXCHANGEREEL);
# }
# #
# sub prepare_u03_files
# {
# my ($pu03s, $pdb) = @_;
# #
# printf $log_fh "\n%d: Audit U03 files:\n", __LINE__;
# #
# foreach my $pu03 (@{$pu03s})
# {
# printf $log_fh "\n%d: Audit u03: %s\n", __LINE__, $pu03->{file_name}
# if ($verbose >= MIDVERBOSE);
# #
# next unless (load($pu03) != 0);
# #
# load_u03_sections($pu03);
# #
# prepare_u03_file($pdb, $pu03);
# }
# #
# return;
# }
# #
# sub process_u03_files
# {
# my ($pu03s) = @_;
# #
# # any files to process?
# #
# if (scalar(@{$pu03s}) <= 0)
# {
# printf $log_fh "\n%d: No U03 files to process. Returning.\n\n", __LINE__;
# return;
# }
# #
# my %csv_db = ();
# prepare_u03_files($pu03s, \%csv_db);
# export_u03_data_as_csv(\%csv_db);
# #
# return;
# }
# #
# ########################################################################
# ########################################################################
# #
# # process MPR files.
# #
# sub export_mpr_data_as_csv
# {
# my ($pdb) = @_;
# #
# export_list_section_as_csv(TIMEDATASP,
# LNB_MPR_FILE_TYPE(),
# 'TIME_DATA_SP',
# 'sp',
# FALSE);
# export_list_section_as_csv(COUNTDATASP,
# LNB_MPR_FILE_TYPE(),
# 'COUNT_DATA_SP',
# 'sp',
# FALSE);
# export_list_section_as_csv(COUNTDATASP2,
# LNB_MPR_FILE_TYPE(),
# 'COUNT_DATA_SP2',
# 'sp',
# FALSE);
# export_list_section_as_csv(TRACEDATASP,
# LNB_MPR_FILE_TYPE(),
# 'TRACE_DATA_SP',
# 'sp',
# FALSE);
# export_list_section_as_csv(TRACEDATASP_2,
# LNB_MPR_FILE_TYPE(),
# 'TRACE_DATA_SP_2',
# 'sp',
# FALSE);
# export_list_section_as_csv(ISPINFODATA,
# LNB_MPR_FILE_TYPE(),
# 'ISP_INFO_DATA',
# 'sp',
# FALSE);
# export_list_section_as_csv(MASKISPINFODATA,
# LNB_MPR_FILE_TYPE(),
# 'MASK_ISP_INFO_DATA',
# 'sp',
# FALSE);
# }
# #
# sub prepare_mpr_file
# {
# my ($pdb, $pmpr) = @_;
# #
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# TIMEDATASP,
# FALSE);
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# COUNTDATASP,
# FALSE);
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# COUNTDATASP2,
# FALSE);
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# TRACEDATASP,
# FALSE);
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# TRACEDATASP_2,
# FALSE);
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# ISPINFODATA,
# FALSE);
# prepare_list_section($pdb,
# $pmpr,
# LNB_MPR_FILE_TYPE,
# MASKISPINFODATA,
# FALSE);
# #
# return;
# }
# #
# sub load_mpr_sections
# {
# my ($pmpr) = @_;
# #
# load_name_value($pmpr, INDEX());
# load_name_value($pmpr, INFORMATION());
# #
# load_list($pmpr, TIMEDATASP());
# load_list($pmpr, COUNTDATASP());
# load_list($pmpr, COUNTDATASP2());
# load_list($pmpr, TRACEDATASP());
# load_list($pmpr, TRACEDATASP_2());
# load_list($pmpr, ISPINFODATA());
# load_list($pmpr, MASKISPINFODATA());
# }
# #
# sub prepare_mpr_files
# {
# my ($pmprs, $pdb) = @_;
# #
# printf $log_fh "\n%d: Audit MPR files:\n", __LINE__;
# #
# foreach my $pmpr (@{$pmprs})
# {
# printf $log_fh "\n%d: Audit mpr: %s\n", __LINE__, $pmpr->{file_name}
# if ($verbose >= MIDVERBOSE);
# #
# next unless (load($pmpr) != 0);
# #
# load_mpr_sections($pmpr);
# #
# prepare_mpr_file($pdb, $pmpr);
# }
# #
# return;
# }
# #
# sub process_mpr_files
# {
# my ($pmprs) = @_;
# #
# # any files to process?
# #
# if (scalar(@{$pmprs}) <= 0)
# {
# printf $log_fh "\n%d: No MPR files to process. Returning.\n\n", __LINE__;
# return;
# }
# #
# my %csv_db = ();
# prepare_mpr_files($pmprs, \%csv_db);
# export_mpr_data_as_csv(\%csv_db);
# #
# return;
# }
# #
# ########################################################################
# ########################################################################
# #
# # start main execution.
# #
# $mu->record("Start of script ...");
# #
# my %opts;
# if (getopts('?MHhwWv:t:l:o:d:', \%opts) != 1)
# {
# short_usage($cmd);
# exit 2;
# }
# #
# foreach my $opt (%opts)
# {
# if (($opt eq "h") or ($opt eq "?"))
# {
# short_usage($cmd);
# exit 0;
# }
# elsif ($opt eq "H")
# {
# long_usage($cmd);
# exit 0;
# }
# elsif ($opt eq "M")
# {
# $remove_mount = TRUE;
# }
# elsif ($opt eq "w")
# {
# $verbose = MINVERBOSE;
# }
# elsif ($opt eq "W")
# {
# $verbose = MIDVERBOSE;
# }
# elsif ($opt eq "v")
# {
# if ($opts{$opt} =~ m/^[0123]$/)
# {
# $verbose = $opts{$opt};
# }
# elsif (exists($verbose_levels{$opts{$opt}}))
# {
# $verbose = $verbose_levels{$opts{$opt}};
# }
# else
# {
# printf $log_fh "\n%d: Invalid verbose level: $opts{$opt}\n", __LINE__;
# short_usage($cmd);
# exit 2;
# }
# }
# elsif ($opt eq "t")
# {
# $file_type = $opts{$opt};
# $file_type =~ tr/[A-Z]/[a-z]/;
# if ($file_type !~ m/^(u01|u03|mpr)$/i)
# {
# printf $log_fh "\n%d: Invalid file type: $opts{$opt}\n", __LINE__;
# short_usage($cmd);
# exit 2;
# }
# }
# elsif ($opt eq "l")
# {
# local *FH;
# $logfile = $opts{$opt};
# open(FH, '>', $logfile) or die $!;
# $log_fh = *FH;
# printf $log_fh "\n%d: Log File: %s\n", __LINE__, $logfile;
# }
# elsif ($opt eq "o")
# {
# my $option = $opts{$opt};
# $option =~ tr/[a-z]/[A-Z]/;
# if (exists($allowed_proc_options{$option}))
# {
# $proc_options |= $allowed_proc_options{$option};
# }
# else
# {
# printf $log_fh "\n%d: Invalid option type: $opts{$opt}\n", __LINE__;
# short_usage($cmd);
# exit 2;
# }
# }
# elsif ($opt eq "d")
# {
# $export_dir = $opts{$opt};
# mkpath($export_dir) unless ( -d $export_dir );
# printf $log_fh "\n%d: Export directory: %s\n", __LINE__, $export_dir;
# }
# }
# #
# if (scalar(@ARGV) == 0)
# {
# printf $log_fh "%d: No directories given.\n", __LINE__;
# short_usage($cmd);
# exit 2;
# }
# $mu->record("After getopt ...");
# #
# printf $log_fh "\n%d: Scan directories for U01, U03 and MPR files: \n\n", __LINE__;
# #
# remove_mount_fields() if ($remove_mount == TRUE);
# #
# my @u01_files = ();
# my @u03_files = ();
# my @mpr_files = ();
# #
# get_all_files($file_type,
# \@ARGV,
# \@u01_files,
# \@u03_files,
# \@mpr_files);
# $mu->record("After get_all_files() ...");
# #
# printf $log_fh "%d: Number of U01 files: %d\n", __LINE__, scalar(@u01_files);
# printf $log_fh "%d: Number of U03 files: %d\n", __LINE__, scalar(@u03_files);
# printf $log_fh "%d: Number of MPR files: %d\n\n", __LINE__, scalar(@mpr_files);
# #
# process_u01_files(\@u01_files);
# $mu->record("After process u01 files ...");
# process_u03_files(\@u03_files);
# $mu->record("After process u03 files ...");
# process_mpr_files(\@mpr_files);
# $mu->record("After process mpr files ...");
# #
# $mu->dump();
# #
# printf $log_fh "\n%d: All Done\n", __LINE__;
# #
#
# exit 0;
| ombt/analytics | sql/1507161440-prod2csv/prod2csv.pl | Perl | mit | 208,414 |
#!/usr/bin/perlml
my $username;
my $accname;
my $udid;
my $ip;
my $game;
my $pcode;
my $name;
use v5.10;
use Digest::MD5;
use CGI;
use File::Temp qw(tempfile);
use HTTP::Cookies;
use LWP::UserAgent;
use MIME::Base64;
BEGIN{
$cgi = new CGI;
$flag = $cgi->param("flag");
$ip = $cgi->param("ip");
$pcode = $cgi->param("pcode");
$name = $cgi->param("name");
$tom = defined $cgi->param("tom") ? $cgi->param("tom") : "2";
$gameprefix = defined $cgi->param("game") ? $cgi->param("game") : "wwar";
if(!defined $name){
print $cgi->header(-type=>'text/html', -status=>'403 Forbidden');
exit;
} else {
print $cgi->header(-type=>'text/html', -charset => 'UTF-8');
}
open(STDERR, ">&STDOUT");
}
require "../cookie_login.pl";
$username = $real_username;
########UPDATES########
#first creation
##added funky loops to make a account do mission lots #
### added salt capabilities;get salt from udid
####random udid generator
#####Udid logger to file
###### curl buy equipment
#######stupid comments everywhere
########curl attack... (Not working)
#########Gawd damn peter this is a really 'smart' rest. LOL I kinda just realized it...
#########so nasty.... looping nothing 600,000 times xD damn thats pretty cool idea lol
print "<title>iPac-WWAR 6</title>";
###no reason to require $name Param at first
###just need this page to default at begin block
###done beginner
$game = "wwar";
###start ugh the fugh
###Our very ugly nasty script to
###### create a new account
###### Automission that account up to level 6
###### writes udid to udid.txt for safe keeping
# Includes
# mission script
# attack gen skinner script
# buy equipment scrip
# LOTS of ugly ugly looping involved
# By Ilan K. & Peter M.
# 2013 All Rights kinda reserved lol.
###Begin open
###tom defines amount of times to run the entire script.
## eg. $Tom = 6
## then 6 accounts will be made and run through this script
####MASTA LOOPA OP OVERLORD
if ($name =~ /^(shortland|Shortland|short|shortl4nd|Short|ipac|IPAC|iPac|Ipac)$/) {
print "<br><br><br><br><br><br><h3>Sorry, Those Usernames are not allowed :P</h3>";
exit 0;
}
$tim = "2";
for my $i (1 .. $tim){
{
@set = ('0' ..'9', 'A' .. 'Z');
$udid = join '' => map $set[rand @set], 1 .. 16;
### $udid made from rand string gen
### new udid for each run through
$md5ww = Digest::MD5->new;
$md5ww->add($udid);
$md5ww->add(':' , 'pEarlhAbor156w' );
$pf = $md5ww->hexdigest;
$pf = uc($pf);
#
my $out;
my $CURL = "curl -s -b cookies/cookie_$udid -c cookies/cookie_$udid -L";
my $baseurl = "http://wwar.storm8.com";
my $url = "/missions.php?cat=1";
my $urlz = "/fight.php";
my $urll = "/choose_name.php?ref=%2Ffight.php&strings=story&hideTabBar=true";
#print "$CURL 'http://wwar.storm8.com/apoints.php?version=a1.56&udid=$udid&pf=$pf&model=Droid&sv=9A405f'";
{
`$CURL 'http://wwar.storm8.com/apoints.php?version=a1.56&udid=$udid&pf=$pf&model=Droid&sv=9A405f'`;
$out = `$CURL "${baseurl}${url}"`;
for my $j (0 .. 600000){}
}
{
for my $j (0 .. 600000){}
`$CURL 'http://wwar.storm8.com/choose_class.php?class=1&action=Choose+This+Country'`;
`$CURL 'http://wwar.storm8.com/choose_class.php?class=1&action=Choose+This+Country'`;
for my $j (0 .. 600000){}
###Why is this doubled? it seems that there is a chance the script will 'skip' this part
####Most likely because its speeding too fast from login to this page then missions....
#### 99% sure for my $j (0 .. 600000){} is a small sleep function I THINK
$out = `$CURL "${baseurl}${url}"`;
####For Loop...
}
{
for my $i (1 .. 21){
for my $i (1 .. 3){
for my $j (0 .. 600000){}
##DATA FOR MISSIONS##Need top curl-out##
my ($fsb, $arg) = ($out =~/\/missions\.php\?jid=1&cat=1&.*onclick=.*(fsb\d+)\('([^']+)'\);/);
## print "FSB: $fsb, Arg: $arg\n";
#print "Loading... Done message will appear when completed.";
my ($js) = ($out =~ /function $fsb.*?{(.*?)}/ms);
# print "<!----JS Function: $js ---->";
my @vals = split ',', $1 if $js =~ /b=new Array\(([\d,]+)\)/;
my @use = split ',', $1 if $js =~ /p=new Array\(([\d,]+)\)/;
die "This site needs to be updated to handle new countermeasures
($#vals, $#use)\n"
if #$vals < 1 || #$use < 1;
my $action = "";
for my $i (0 .. $#use)
{
if ($use[$i]){
$action = chr($vals[$i]) . $action;
} else {
$action .= chr($vals[$i]);
}
}
$action .= $arg;
# print "<!----Action: $action----->";
# print "<BR>\n";
# print "<!----Request: $CURL \"${baseurl}/${action}\"\n--->";
# print "<BR>\n";
$out = `$CURL "${baseurl}/${action}"`;
# print "<!--\n";
$out =~ s/<!--//mg;
$out =~ s/-->//mg;
# print $out;
# print "-->\n";
# print "this is begin $action this is end";
}
}
`$CURL --data "ref=/fight.php&name=$name&action=Sign Your Name" "${baseurl}${urll}"`;
for my $j (0 .. 600000){}
###why add this? Just incase user define $name does not meet s8 standards.
`$CURL --data "ref=/fight.php&name=Bob&action=Sign Your Name" "${baseurl}${urll}"`;
}
for my $i (1 .. 21){
for my $i (1 .. 3){
for my $j (0 .. 600000){}
##DATA FOR MISSIONS##Need top curl-out##
my ($fsb, $arg) = ($out =~/\/missions\.php\?jid=1&cat=1&.*onclick=.*(fsb\d+)\('([^']+)'\);/);
## print "FSB: $fsb, Arg: $arg\n";
# print "<BR>";
my ($js) = ($out =~ /function $fsb.*?{(.*?)}/ms);
# print "<!----JS Function: $js ---->";
my @vals = split ',', $1 if $js =~ /b=new Array\(([\d,]+)\)/;
my @use = split ',', $1 if $js =~ /p=new Array\(([\d,]+)\)/;
die "This site needs to be updated to handle new countermeasures
($#vals, $#use)\n"
if #$vals < 1 || #$use < 1;
my $action = "";
for my $i (0 .. $#use)
{
if ($use[$i]){
$action = chr($vals[$i]) . $action;
} else {
$action .= chr($vals[$i]);
}
}
$action .= $arg;
# print "<!----Action: $action----->";
# print "<BR>\n";
#print "<!----Request: $CURL \"${baseurl}/${action}\"\n--->";
# print "<BR>\n";
$out = `$CURL "${baseurl}/${action}"`;
#print "<!--\n";
$out =~ s/<!--//mg;
$out =~ s/-->//mg;
# print $out;
# print "-->\n";
}
##derp....
##print "--->";
}
##fun within here happens once per tom aka once per numb of acc
#print "--->";
`$CURL "http://wwar.storm8.com/fight.php"`;
#####Fight skinner
#############################################
#############################################
#############################################
#############################################
#############################################
#############################################
#############################################
#############################################
####|^^^^^^^^^^^\||____ ###############
####| The STFU Truck |||""'|""\__,_ ###############
####| _____________ l||__|__|__|) ################
####...|(@)@)"""""""**|(@)(@)**|(@) ################
#############################################
#############################################
#############################################
######## This fight.php took overall#########
########### More then 8 hours################
### I forgot a \ before the . in fight.php###
#############################################
########I SPECIAL LOL...gawd damn ###########
###########The joy after its done############
#############################################
{
$outz = `$CURL "${baseurl}${urlz}"`;
for my $i (1 .. 4){
for my $j (0 .. 600000){}
##DATA FOR MISSIONS##Need top curl-out##
my ($fsb, $arg) = ($outz =~/\/fight\.php\?action=fight&rivalId=0&pos=1&.*onclick=.*(fsb0)\('([^']+)'\);/);
## print "FSB: $fsb, Arg: $arg\n";
# print "<BR>";
my ($js) = ($outz =~ /function $fsb.*?{(.*?)}/ms);
# print "<!----JS Function: $js ---->";
my @vals = split ',', $1 if $js =~ /b=new Array\(([\d,]+)\)/;
my @use = split ',', $1 if $js =~ /p=new Array\(([\d,]+)\)/;
die "This site needs to be updated to handle new countermeasures
($#vals, $#use)\n"
if #$vals < 1 || #$use < 1;
my $action = "";
for my $i (0 .. $#use)
{
if ($use[$i]){
$action = chr($vals[$i]) . $action;
} else {
$action .= chr($vals[$i]);
}
}
$action .= $arg;
$outz = `$CURL "${baseurl}/${action}"`;
#print "<!--\n";
$outz =~ s/<!--//mg;
$outz =~ s/-->//mg;
# print $out;
# print "-->\n";
###print "$CURL";
# print "-==- ACTION: $action -===-";
}
}
for my $j (0 .. 600000){}
`$CURL "http://wwar.storm8.com/equipment.php"`;
# Infantry
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=1&cat=1"`;
#supply truck 3
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=51&cat=2"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=51&cat=2"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=51&cat=2"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=102&cat=3"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=102&cat=3"`;
`$CURL "http://wwar.storm8.com/equipment.php?action=buy&iid=152&cat=4"`;
for my $j (0 .. 600000){}
`$CURL "http://wwar.storm8.com/missions.php"`;
for my $i (1 .. 10){
for my $i (1 .. 2){
for my $j (0 .. 600000){}
my ($fsb, $arg) = ($out =~/\/missions\.php\?jid=3&cat=1&.*onclick=.*(fsb\d+)\('([^']+)'\);/);
my ($js) = ($out =~ /function $fsb.*?{(.*?)}/ms);
# print "<!----JS Function: $js ---->";
my @vals = split ',', $1 if $js =~ /b=new Array\(([\d,]+)\)/;
my @use = split ',', $1 if $js =~ /p=new Array\(([\d,]+)\)/;
die "This site needs to be updated to handle new countermeasures
($#vals, $#use)\n"
if #$vals < 1 || #$use < 1;
my $action = "";
for my $i (0 .. $#use)
{
if ($use[$i]){
$action = chr($vals[$i]) . $action;
} else {
$action .= chr($vals[$i]);
}
}
$action .= $arg;
$out = `$CURL "${baseurl}/${action}"`;
$out =~ s/<!--//mg;
$out =~ s/-->//mg;
}
}
}
use DBI;
use strict;
use DBI();
my $dbh = DBI->connect("DBI:mysql:database=shortlando;host=localhost",
"shortland", "password!",
{'RaiseError' => 1});
$dbh->do("INSERT INTO storage (username, name, udid, ip, game, pcode) VALUES ('$username', '$name', '$udid', '$ip', '$game', '$pcode')");
$dbh->disconnect();
print "<FORM>";
print "<BR><BR><BR><BR><BR><BR><BR><BR><BR><BR><H3>";
print "<style>
input[type=\'text\']{
color: #333;
width: 80%;
height: 20px;
left: 50%;
top: 50%;
padding-left: 1px;
padding-right: 1px;
transition: box-shadow 320ms;
box-shadow: 0px 0px 8px 10px rgba(0,0,0,0.1);
border-radius: 2px;
font-size: 15px;
border: 0px;
}
input[type=\'text\']:focus {
outline: 0px;
outline-offset: 0px;
box-shadow: 0px 0px 1px 5px rgba(0,0,0,0.12);
}
input:-moz-placeholder {
color: #000000;
}
</style>";
print "<link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"images/css.css\" />";
print "<br>";
print "<h3>UDID:</h3> <input type=\'text\' id=\'resizer\' value=\"$udid\"name=\"name\"placeholder='udid'>";
print "<BR><BR><BR><BR><BR><BR><BR><BR><BR><BR></H3></form";
}
print "<meta name=\"apple-mobile-web-app-capable\" content=\"yes\">
<meta name=\"apple-mobile-web-app-status-bar-style\" content=\"black\">
<link rel=\"stylesheet\" type=\"text/css\" media=\"screen\" href=\"css.css\" />
<meta name=\"viewport\" content=\"width=device-width\, initial-scale=\.9\, user-scalable=no\"/>";
| shortland/TheBigGlitch | iPacMG [released][server]/6/wwar.pl | Perl | mit | 12,149 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Component::DataExport::Alignments;
use strict;
use warnings;
use EnsEMBL::Web::Constants;
use EnsEMBL::Web::Component::Compara_AlignSliceSelector;
use base qw(EnsEMBL::Web::Component::DataExport);
sub _init {
my $self = shift;
$self->cacheable( 0 );
$self->ajaxable( 0 );
$self->configurable( 0 );
}
sub content {
### N.B. There currently are no additional options for alignment export
my $self = shift;
my $hub = $self->hub;
## Get user's current settings
my $view_config = $self->view_config;
my $settings = $view_config->form_fields({'no_snp_link' => 1});
$settings->{'Hidden'} = ['align'];
## Options per format
my $fields_by_format = {};
## Add formats output by BioPerl
foreach ($self->alignment_formats) {
$fields_by_format->{$_} = [];
}
## Create settings form (comes with some default fields - see parent)
my $form = $self->create_form($settings, $fields_by_format, 1);
return $form->render;
}
sub default_file_name {
my $self = shift;
my $hub = $self->hub;
my $species_defs = $hub->species_defs;
my $name;
my $db_hash = $species_defs->multi_hash;
my $cdb = shift || $hub->param('cdb') || 'compara';
my $alignments = $db_hash->{'DATABASE_COMPARA' . ($cdb =~ /pan_ensembl/ ? '_PAN_ENSEMBL' : '')}{'ALIGNMENTS'} || {}; # Get the compara database hash
my $user_settings = $self->{'viewconfig'}{$hub->param('data_type')}->{_user_settings};
my $align_param = $hub->param('align') || $user_settings->{'align'};
if ($align_param =~ /--/) {
($name = $align_param) =~ s/^(\d+)--/alignment_/;
$name =~ s/--/_/g;
}
else {
my $align = $alignments->{$align_param};
if ($align) {
my $align_name;
if ($align->{'class'} =~ /pairwise/) {
$name = $species_defs->SPECIES_DISPLAY_NAME;
my ($other_species) = grep { $_ ne $self->hub->species } keys %{$align->{'species'}};
$name .= '_'.$species_defs->get_config($other_species, 'SPECIES_DISPLAY_NAME');
my $type = lc($align->{'type'});
$type =~ s/_net//;
$name .= '_'.$type;
}
else {
$name = $align->{'name'};
}
$name =~ s/ /_/g;
}
}
return $name;
}
sub alignment_formats {
### Configure this list to match what's available
### in the installed version of BioPerl
my $self = shift;
return qw(CLUSTALW FASTA Mega MSF Nexus Pfam Phylip PSI Stockholm);
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Component/DataExport/Alignments.pm | Perl | apache-2.0 | 3,155 |
#
# Copyright 2016 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::hddtemp::local::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_simple);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
%{$self->{modes}} = (
'temperature' => 'apps::hddtemp::local::mode::temperature',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Linux through local commands (the plugin can use SSH).
=cut
| bcournaud/centreon-plugins | apps/hddtemp/local/plugin.pm | Perl | apache-2.0 | 1,318 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::qnap::snmp::mode::components::raid;
use strict;
use warnings;
# In MIB 'NAS.mib'
my $oid_raidStatus = '.1.3.6.1.4.1.24681.1.4.1.1.1.2.1.2.1.5';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_raidStatus };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking raids");
$self->{components}->{raid} = {name => 'raids', total => 0, skip => 0};
return if ($self->check_filter(section => 'raid'));
foreach my $oid ($self->{snmp}->oid_lex_sort(keys %{$self->{results}->{$oid_raidStatus}})) {
$oid =~ /\.(\d+)$/;
my $instance = $1;
next if ($self->check_filter(section => 'raid', instance => $instance));
my $status = $self->{results}->{$oid_raidStatus}->{$oid};
$self->{components}->{raid}->{total}++;
$self->{output}->output_add(long_msg => sprintf("raid '%s' status is %s [instance: %s]",
$instance, $status, $instance));
my $exit = $self->get_severity(section => 'raid', value => $status);
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Raid '%s' status is %s.", $instance, $status));
}
}
}
1;
| Sims24/centreon-plugins | storage/qnap/snmp/mode/components/raid.pm | Perl | apache-2.0 | 2,138 |
#!/usr/bin/perl -w
#
# Copyright 2017, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example illustrates how to retrieve all languages and carriers available
# for targeting.
use strict;
use lib "../../../lib";
use utf8;
use Google::Ads::AdWords::Client;
use Google::Ads::AdWords::Logging;
use Cwd qw(abs_path);
# Example main subroutine.
sub get_targetable_languages_and_carriers {
my $client = shift;
# Get all languages from ConstantDataService.
my $languages = $client->ConstantDataService()->getLanguageCriterion();
if ($languages) {
foreach my $language (@{$languages}) {
printf "Language name is '%s', ID is %d and code is '%s'.\n",
$language->get_name(), $language->get_id(),
$language->get_code();
}
}
# Get all carriers from ConstantDataService.
my $carriers = $client->ConstantDataService()->getCarrierCriterion();
if ($carriers) {
foreach my $carrier (@{$carriers}) {
printf "Carrier name is '%s', ID is %d and country code is '%s'.\n",
$carrier->get_name(), $carrier->get_id(),
$carrier->get_countryCode();
}
}
return 1;
}
# Don't run the example if the file is being included.
if (abs_path($0) ne abs_path(__FILE__)) {
return 1;
}
# Log SOAP XML request, response and API errors.
Google::Ads::AdWords::Logging::enable_all_logging();
# Get AdWords Client, credentials will be read from ~/adwords.properties.
my $client = Google::Ads::AdWords::Client->new({version => "v201809"});
# By default examples are set to die on any server returned fault.
$client->set_die_on_faults(1);
# Call the example
get_targetable_languages_and_carriers($client);
| googleads/googleads-perl-lib | examples/v201809/targeting/get_targetable_languages_and_carriers.pl | Perl | apache-2.0 | 2,194 |
=pod
=head1 NAME
BIO_s_core, BIO_new_from_core_bio - OSSL_CORE_BIO functions
=head1 SYNOPSIS
#include <openssl/bio.h>
const BIO_METHOD *BIO_s_core(void);
BIO *BIO_new_from_core_bio(OSSL_LIB_CTX *libctx, OSSL_CORE_BIO *corebio);
=head1 DESCRIPTION
BIO_s_core() returns the core BIO method function.
A core BIO is treated as source/sink BIO which communicates to some external
BIO. This is primarily useful to provider authors. A number of calls from
libcrypto into a provider supply an OSSL_CORE_BIO parameter. This represents
a BIO within libcrypto, but cannot be used directly by a provider. Instead it
should be wrapped using a BIO_s_core().
Once a BIO is constructed based on BIO_s_core(), the associated OSSL_CORE_BIO
object should be set on it using BIO_set_data(3). Note that the BIO will only
operate correctly if it is associated with a library context constructed using
OSSL_LIB_CTX_new_from_dispatch(3). To associate the BIO with a library context
construct it using BIO_new_ex(3).
BIO_new_from_core_bio() is a convenience function that constructs a new BIO
based on BIO_s_core() and that is associated with the given library context. It
then also sets the OSSL_CORE_BIO object on the BIO using BIO_set_data(3).
=head1 RETURN VALUES
BIO_s_core() return a core BIO B<BIO_METHOD> structure.
BIO_new_from_core_bio() returns a BIO structure on success or NULL on failure.
A failure will most commonly be because the library context was not constructed
using OSSL_LIB_CTX_new_from_dispatch(3).
=head1 HISTORY
BIO_s_core() and BIO_new_from_core_bio() were added in OpenSSL 3.0.
=head1 EXAMPLES
Create a core BIO and write some data to it:
int some_function(OSSL_LIB_CTX *libctx, OSSL_CORE_BIO *corebio) {
BIO *cbio = BIO_new_from_core_bio(libctx, corebio);
if (cbio == NULL)
return 0;
BIO_puts(cbio, "Hello World\n");
BIO_free(cbio);
return 1;
}
=head1 COPYRIGHT
Copyright 2021 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| openssl/openssl | doc/man3/BIO_s_core.pod | Perl | apache-2.0 | 2,247 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::FeedMappingService;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseService);
sub mutate {
my $self = shift;
my $request_body = shift;
my $http_method = 'POST';
my $request_path = 'v10/customers/{+customerId}/feedMappings:mutate';
my $response_type =
'Google::Ads::GoogleAds::V10::Services::FeedMappingService::MutateFeedMappingsResponse';
return $self->SUPER::call($http_method, $request_path, $request_body,
$response_type);
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/FeedMappingService.pm | Perl | apache-2.0 | 1,102 |
package Paws::Config::DescribeComplianceByConfigRule;
use Moose;
has ComplianceTypes => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
has ConfigRuleNames => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
has NextToken => (is => 'ro', isa => 'Str');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DescribeComplianceByConfigRule');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Config::DescribeComplianceByConfigRuleResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Config::DescribeComplianceByConfigRule - Arguments for method DescribeComplianceByConfigRule on Paws::Config
=head1 DESCRIPTION
This class represents the parameters used for calling the method DescribeComplianceByConfigRule on the
AWS Config service. Use the attributes of this class
as arguments to method DescribeComplianceByConfigRule.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DescribeComplianceByConfigRule.
As an example:
$service_obj->DescribeComplianceByConfigRule(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 ComplianceTypes => ArrayRef[Str|Undef]
Filters the results by compliance.
The allowed values are C<COMPLIANT>, C<NON_COMPLIANT>, and
C<INSUFFICIENT_DATA>.
=head2 ConfigRuleNames => ArrayRef[Str|Undef]
Specify one or more AWS Config rule names to filter the results by
rule.
=head2 NextToken => Str
The C<NextToken> string returned on a previous page that you use to get
the next page of results in a paginated response.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DescribeComplianceByConfigRule in L<Paws::Config>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Config/DescribeComplianceByConfigRule.pm | Perl | apache-2.0 | 2,230 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::sun::mseries::mode::domains;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my %error_status = (
1 => ["The domain '%s' status is normal", 'OK'],
2 => ["The domain '%s' status is degraded", 'WARNING'],
3 => ["The domain '%s' status is faulted", 'CRITICAL'],
254 => ["The domain '%s' status has changed", 'WARNING'],
255 => ["The domain '%s' status is unknown", 'UNKNOWN'],
);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"skip" => { name => 'skip' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_scfDomainErrorStatus = '.1.3.6.1.4.1.211.1.15.3.1.1.5.2.1.15';
my $oids_domain_status = $self->{snmp}->get_table(oid => $oid_scfDomainErrorStatus, nothing_quit => 1);
$self->{output}->output_add(severity => 'OK',
short_msg => "All domains are ok.");
foreach ($self->{snmp}->oid_lex_sort(keys %$oids_domain_status)) {
/^${oid_scfDomainErrorStatus}\.(.*)/;
my $domain_id = $1;
$self->{output}->output_add(long_msg => sprintf(${$error_status{$oids_domain_status->{$_}}}[0], $domain_id));
if ($oids_domain_status->{$_} == 255 && defined($self->{option_results}->{skip})) {
next;
}
if ($oids_domain_status->{$_} != 1) {
$self->{output}->output_add(severity => ${$error_status{$oids_domain_status->{$_}}}[1],
short_msg => sprintf(${$error_status{$oids_domain_status->{$_}}}[0], $domain_id));
}
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Mseries domains status.
=over 8
=item B<--skip>
Skip 'unknown' domains.
=back
=cut
| Sims24/centreon-plugins | hardware/server/sun/mseries/mode/domains.pm | Perl | apache-2.0 | 2,885 |
package VMOMI::OvfParseDescriptorResult;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['eula', undef, 1, 1],
['network', 'OvfNetworkInfo', 1, 1],
['ipAllocationScheme', undef, 1, 1],
['ipProtocols', undef, 1, 1],
['property', 'VAppPropertyInfo', 1, 1],
['productInfo', 'VAppProductInfo', 0, 1],
['annotation', undef, 0, ],
['approximateDownloadSize', undef, 0, 1],
['approximateFlatDeploymentSize', undef, 0, 1],
['approximateSparseDeploymentSize', undef, 0, 1],
['defaultEntityName', undef, 0, ],
['virtualApp', 'boolean', 0, ],
['deploymentOption', 'OvfDeploymentOption', 1, 1],
['defaultDeploymentOption', undef, 0, ],
['entityName', 'KeyValue', 1, 1],
['annotatedOst', 'OvfConsumerOstNode', 0, 1],
['error', 'LocalizedMethodFault', 1, 1],
['warning', 'LocalizedMethodFault', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/OvfParseDescriptorResult.pm | Perl | apache-2.0 | 1,167 |
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright (c) 2016 by Delphix. All rights reserved.
#
# Program Name : dx_top.pl
# Description : Get analytics information from Delphix Engine
# Author : Marcin Przepiorowski
# Created : 20 Apr 2016 (v2.2.0)
#
#
use strict;
use warnings;
use JSON;
use Getopt::Long qw(:config no_ignore_case no_auto_abbrev); #avoids conflicts with ex host and help
use File::Basename;
use Pod::Usage;
use FindBin;
use Data::Dumper;
use Date::Manip;
my $abspath = $FindBin::Bin;
use lib '../lib';
use Engine;
use Analytics;
use Formater;
use Toolkit_helpers;
use URI::Escape;
my $version = $Toolkit_helpers::version;
# default resolution is 1 sec
my $resolution = '3600';
my $stat = 't';
GetOptions(
'help|?' => \(my $help),
'd|engine=s' => \(my $dx_host),
'debug:i' => \(my $debug),
'st=s' => \(my $st),
'et=s' => \(my $et),
'loop=i' => \(my $loop),
'stat=s' => \($stat),
'dever=s' => \(my $dever),
'interval|i=s' => \($resolution),
'version' => \(my $print_version),
'nohead' => \(my $nohead),
'configfile|c=s' => \(my $config_file)
) or pod2usage(-verbose => 1, -input=>\*DATA);
pod2usage(-verbose => 2, -input=>\*DATA) && exit if $help;
die "$version\n" if $print_version;
my $engine_obj = new Engine ($dever, $debug);
$engine_obj->load_config($config_file);
my %allowedres = (
'1' => 'S',
'60' => 'M',
'3600' => 'H',
'H' => 'H',
'M' => 'M',
'S' => 'S'
);
# this array will have all engines to go through (if -d is specified it will be only one engine)
my $engine_list = Toolkit_helpers::get_engine_list(undef, $dx_host, $engine_obj);
if (scalar(@{$engine_list}) > 1) {
print "More than one engine is default. Use -d parameter\n";
exit(3);
}
if (!defined( $allowedres{$resolution} )) {
print "Wrong interval \n";
pod2usage(-verbose => 1, -input=>\*DATA);
exit (3);
}
if ( ! ( (defined($et) && defined($st) || defined($loop) ) ) ) {
print "Paramerers st and et or loop are required \n";
pod2usage(-verbose => 1, -input=>\*DATA);
exit (3);
}
# End of script parametes checks
for my $engine ( sort (@{$engine_list}) ) {
# main loop for all work
if ($engine_obj->dlpx_connect($engine)) {
print "Can't connect to Dephix Engine $engine\n\n";
exit(3);
}
my $st_timestamp;
if (! defined($st)) {
# take engine time minus 5 min
$st = "-5min";
}
if (! defined($st_timestamp = Toolkit_helpers::timestamp($st,$engine_obj))) {
print "Wrong start time (st) format $st\n";
pod2usage(-verbose => 1, -input=>\*DATA);
exit (3);
}
my $et_timestamp;
my $endtime;
if (defined($et) && (! defined($et_timestamp = Toolkit_helpers::timestamp($et,$engine_obj)))) {
print "Wrong end time (et) format $et\n";
pod2usage(-verbose => 1, -input=>\*DATA);
exit (3);
}
# print Dumper $et;
my $detz = $engine_obj->getTimezone();
my $tz = new Date::Manip::TZ;
# auto
# my $dt = new Date::Manip::Date;
# $dt->config("setdate","zone," . $detz);
# my $err = $dt->parse($et);
# $endtime = $dt->value();
# print Dumper $endtime;
my $count = 99;
my $max = 100;
if (defined($loop)) {
my $time;
my $operation = "resources/json/service/configure/currentSystemTime";
my ($result,$result_fmt, $retcode) = $engine_obj->getJSONResult($operation);
my $starttime;
if ($result->{result} eq "ok") {
$time = $result->{systemTime}->{localTime};
$time =~ s/\s[A-Z]{1,3}$//;
$endtime = ParseDate($time);
$starttime = DateCalc(ParseDate($time), ParseDateDelta('- 5 second'));
} else {
$time = 'N/A';
}
$count = 0;
$max = $loop;
$resolution = 1;
my ($err,$date,$offset,$isdst,$abbrev) = $tz->convert_to_gmt($starttime, $detz);
my $tstz = sprintf("%04.4d-%02.2d-%02.2dT%02.2d:%02.2d:%02.2d.000Z",$date->[0],$date->[1],$date->[2],$date->[3],$date->[4],$date->[5]);
$st_timestamp = uri_escape($tstz);
($err,$date,$offset,$isdst,$abbrev) = $tz->convert_to_gmt($endtime, $detz);
$tstz = sprintf("%04.4d-%02.2d-%02.2dT%02.2d:%02.2d:%02.2d.000Z",$date->[0],$date->[1],$date->[2],$date->[3],$date->[4],$date->[5]);
my $dt = new Date::Manip::Date;
$dt->config("setdate","zone,GMT");
$err = $dt->parse($tstz);
$endtime = $dt->value();
$et_timestamp = uri_escape($tstz);
}
#auto
my $analytic_list = new Analytics($engine_obj, $debug);
my $cpu = $analytic_list->getAnalyticByName('cpu');
my $disk = $analytic_list->getAnalyticByName('disk');
my $nfs = $analytic_list->getAnalyticByName('nfs');
if ($stat eq 't') {
printf("%20s %12s %15s %20s %12s %12s \n", "", "CPU", "", "Disk throughput", "", "NFS throughput");
} else {
printf("%20s %12s %15s %20s %12s %12s \n", "", "CPU", "", "Disk latency", "", "NFS latency");
}
printf("%20s : %5s %5s %5s %5s : %6s %6s %6s %6s : %6s %6s %6s %6s\n", "Timestamp", "avg", "min", "max", "85pct",
"avg", "min", "max", "85pct","avg", "min", "max", "85pct");
while ($max > $count) {
$count++;
my $arguments = "&resolution=$resolution&numberofDatapoints=1000&startTime=$st_timestamp&endTime=$et_timestamp";
if (defined($loop)) {
my ($err,$date,$offset,$isdst,$abbrev) = $tz->convert_from_gmt($endtime, $detz);
my $tstz = sprintf("%04.4d-%02.2d-%02.2d %02.2d:%02.2d:%02.2d",$date->[0],$date->[1],$date->[2],$date->[3],$date->[4],$date->[5]);
printData($arguments, $tstz, $cpu, $disk, $nfs, $stat);
sleep 5;
} else {
printData($arguments, $et, $cpu, $disk, $nfs, $stat);
}
$endtime = DateCalc($endtime, ParseDateDelta('+ 5 second'));
$st_timestamp = $et_timestamp;
$et_timestamp = uri_escape(UnixDate($endtime , "%Y-%m-%dT%H:%M:%S.000Z" ));
}
}
sub printData {
my $arguments = shift;
my $st = shift;
my $cpu = shift;
my $disk = shift;
my $nfs = shift;
my $stat = shift;
$cpu->getData($arguments, $resolution);
$cpu->processData(2);
#$cpu->doAggregation();
$disk->getData($arguments, $resolution);
$disk->processData(2);
#$disk_throughput->doAggregation();
$nfs->getData($arguments, $resolution);
$nfs->processData(2);
#$nfs_throughput->doAggregation();
my ($avgcpu, $mincpu, $maxcpu, $per85cpu) = ("","","");
my ($avgdisk, $mindisk, $maxdisk, $per85disk) = ("","","");
my ($avgnfs, $minnfs, $maxnfs, $per85nfs) = ("","","");
if ($stat eq 't') {
($avgcpu, $mincpu, $maxcpu, $per85cpu) = $cpu->get_stats('utilization');
($avgdisk, $mindisk, $maxdisk, $per85disk) = $disk->get_stats('throughput_t');
($avgnfs, $minnfs, $maxnfs, $per85nfs) = $nfs->get_stats('throughput_t');
} else {
($avgcpu, $mincpu, $maxcpu, $per85cpu) = $cpu->get_stats('utilization');
($avgdisk, $mindisk, $maxdisk, $per85disk) = $disk->get_stats('latency_t');
($avgnfs, $minnfs, $maxnfs, $per85nfs) = $nfs->get_stats('latency_t');
}
printf("%20s : %5.2f %5.2f %5.2f %5.2f : %6.2f %6.2f %6.2f %6.2f : %6.2f %6.2f %6.2f %6.2f\n",
$st, $avgcpu, $mincpu, $maxcpu, $per85cpu,
$avgdisk, $mindisk, $maxdisk, $per85disk, $avgnfs, $minnfs, $maxnfs, $per85nfs);
}
__DATA__
=head1 SYNOPSIS
dx_top [ -engine|d <delphix identifier> ] [ -configfile file ]
[ -st "YYYY-MM-DD [HH24:MI:SS]" -et "YYYY-MM-DD [HH24:MI:SS]" ]
[-loop no]
[-i 1,60,3600]
=head1 DESCRIPTION
Get the information about engine in line format.
=head1 ARGUMENTS
=over 4
=item B<-engine|d>
Specify Delphix Engine name from dxtools.conf file
=item B<-configfile file>
Location of the configuration file.
A config file search order is as follow:
- configfile parameter
- DXTOOLKIT_CONF variable
- dxtools.conf from dxtoolkit location
=item B<-stat t|l>
Statictics t - throughput (default), l - latency
=item B<-st>
Start time
=item B<-et>
End time
=item B<-loop no>
Number of loops for real time monitoring. There is a 5 seconds delay between checks
=item B<-i 1,60,3600>
Sampling resolution in seconds
=item B<-help>
Print this screen
=item B<-debug>
Turn on debugging
=back
=cut
| delphix/dxtoolkit | bin/dx_top.pl | Perl | apache-2.0 | 8,652 |
#!/usr/bin/perl -w
use strict;
my $cnt = 0;
while(my $line = <STDIN>) {
chomp($line);
if($line=~/^>/) {
$cnt++;
$line = ">s_".$cnt;
}
print "$line\n";
}
| jason-weirather/Au-public | iron/utilities/make_unique_named_fasta.pl | Perl | apache-2.0 | 170 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::RunnableDB::Accumulator -
=head1 SYNOPSIS
my $accumulator = Bio::EnsEMBL::Analysis::RunnableDB::Accumulator->
new(
-input_id => 'ACCUMULATOR',
-db => $db,
-analysis => $analysis,
);
$accumulator->fetch_input;
$accumulator->run;
$accumulator->write_output;
=head1 DESCRIPTION
This is a simple place holder module to allow the accumulator wait for all
stages in the pipeline to work. It does nothing just
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::RunnableDB::Accumulator;
use warnings ;
use strict;
use Bio::EnsEMBL::Analysis::RunnableDB;
use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning);
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Analysis::RunnableDB);
=head2 fetch_input
Title : fetch_input
Usage : $self->fetch_input
Function: Dummy method to comply to the interface
Returns : none
Args : none
=cut
sub fetch_input {
my( $self) = @_;
throw("No input id") unless defined($self->input_id);
return 1;
}
sub run {
my ($self) = @_;
print "Dummy RunnableDB - no runnable to run\n";
}
sub write_output {
my ($self) = @_;
print "Dummy RunnableDB - no output to write\n";
return 1;
}
1;
| mn1/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/RunnableDB/Accumulator.pm | Perl | apache-2.0 | 2,192 |
package OpenXPKI::Server::API2::Plugin::Workflow::get_workflow_base_info;
use OpenXPKI::Server::API2::EasyPlugin;
=head1 NAME
OpenXPKI::Server::API2::Plugin::Workflow::get_workflow_base_info
=cut
# Project modules
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Server::API2::Types;
use OpenXPKI::Server::API2::Plugin::Workflow::Util;
=head1 COMMANDS
=head2 get_workflow_base_info
Querys workflow config for the given workflow type and returns a
I<HashRef> with informations:
{
workflow => {
type => ...,
id => ...,
state => ...,
label => ...,
description => ...,
},
activity => { ... },
state => {
button => { ... },
option => [ ... ],
output => [ ... ],
},
}
}
B<Parameters>
=over
=item * C<type> I<Str> - workflow type
=back
=cut
command "get_workflow_base_info" => {
type => { isa => 'AlphaPunct', },
} => sub {
my ($self, $params) = @_;
my $util = OpenXPKI::Server::API2::Plugin::Workflow::Util->new;
##! 2: 'start'
# TODO we might use the OpenXPKI::Workflow::Config object for this
# Note: Using create_workflow shreds a workflow id and creates an orphaned entry in the history table
if (not $util->factory->authorize_workflow({ ACTION => 'create', TYPE => $params->type })) {
OpenXPKI::Exception->throw(
message => 'User is not authorized to fetch workflow info',
params => { type => $params->type }
);
}
my $state = 'INITIAL';
my $head = CTX('config')->get_hash([ 'workflow', 'def', $params->type, 'head' ]);
# fetch actions in state INITIAL from the config
my $wf_config = $util->factory->_get_workflow_config($params->type);
my @actions;
for my $state (@{$wf_config->{state}}) {
next unless $state->{name} eq 'INITIAL';
@actions = ($state->{action}->[0]->{name});
last;
}
return {
workflow => {
type => $params->type,
id => 0,
state => $state,
label => $head->{label},
description => $head->{description},
},
# activity =>
# state =>
%{ $util->get_activity_and_state_info($params->type, $head->{prefix}, $state, \@actions, undef) },
};
};
__PACKAGE__->meta->make_immutable;
| oliwel/openxpki | core/server/OpenXPKI/Server/API2/Plugin/Workflow/get_workflow_base_info.pm | Perl | apache-2.0 | 2,507 |
package Google::Ads::AdWords::v201809::AdwordsUserListService::query;
use strict;
use warnings;
{ # BLOCK to scope variables
sub get_xmlns { 'https://adwords.google.com/api/adwords/rm/v201809' }
__PACKAGE__->__set_name('query');
__PACKAGE__->__set_nillable();
__PACKAGE__->__set_minOccurs();
__PACKAGE__->__set_maxOccurs();
__PACKAGE__->__set_ref();
use base qw(
SOAP::WSDL::XSD::Typelib::Element
Google::Ads::SOAP::Typelib::ComplexType
);
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %query_of :ATTR(:get<query>);
__PACKAGE__->_factory(
[ qw( query
) ],
{
'query' => \%query_of,
},
{
'query' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
},
{
'query' => 'query',
}
);
} # end BLOCK
} # end of BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::AdwordsUserListService::query
=head1 DESCRIPTION
Perl data type class for the XML Schema defined element
query from the namespace https://adwords.google.com/api/adwords/rm/v201809.
Returns the list of user lists that match the query. @param query The SQL-like AWQL query string @return A list of UserList @throws ApiException when the query is invalid or there are errors processing the request.
=head1 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * query
$element->set_query($data);
$element->get_query();
=back
=head1 METHODS
=head2 new
my $element = Google::Ads::AdWords::v201809::AdwordsUserListService::query->new($data);
Constructor. The following data structure may be passed to new():
{
query => $some_value, # string
},
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/AdwordsUserListService/query.pm | Perl | apache-2.0 | 1,929 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Common::PolicySummary;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
approvalStatus => $args->{approvalStatus},
policyTopicEntries => $args->{policyTopicEntries},
reviewStatus => $args->{reviewStatus}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Common/PolicySummary.pm | Perl | apache-2.0 | 1,142 |
package Sisimai::Mail::Memory;
use feature ':5.10';
use strict;
use warnings;
use Class::Accessor::Lite (
'new' => 0,
'ro' => [
'path', # [String] Fixed string "<MEMORY>"
'size', # [Integer] data size
],
'rw' => [
'payload', # [Array] entire bounce mail message
'offset', # [Integer] Index of "data"
]
);
sub new {
# Constructor of Sisimai::Mail::Memory
# @param [String] argv1 Entire email string
# @return [Sisimai::Mail::Memory] Object
# [Undef] is not a valid email text
my $class = shift;
my $argv1 = shift // return undef;
my $param = {
'payload' => [],
'path' => '<MEMORY>',
'size' => length $$argv1 || 0,
'offset' => 0,
};
return undef unless $param->{'size'};
if( (substr($$argv1, 0, 5) || '') eq 'From ') {
# UNIX mbox
$param->{'payload'} = [split(/^From /m, $$argv1)];
shift @{ $param->{'payload'} };
$_ = 'From '.$_ for @{ $param->{'payload'} };
} else {
$param->{'payload'} = [$$argv1];
}
return bless($param, __PACKAGE__);
}
sub read {
# Memory reader, works as a iterator.
# @return [String] Contents of a bounce mail
my $self = shift;
return undef unless scalar @{ $self->{'payload'} };
$self->{'offset'} += 1;
return shift @{ $self->{'payload'} };
}
1;
__END__
=encoding utf-8
=head1 NAME
Sisimai::Mail::Memory - Mailbox reader
=head1 SYNOPSIS
use Sisimai::Mail::Memory;
my $mailtxt = 'From Mailer-Daemon ...';
my $mailobj = Sisimai::Mail::Memory->new(\$mailtxt);
while( my $r = $mailobj->read ) {
print $r; # print contents of each mail in the mailbox or Maildir/
}
=head1 DESCRIPTION
Sisimai::Mail::Memory is a class for reading a mailbox, files in Maildir/, or
JSON string from variable.
=head1 CLASS METHODS
=head2 C<B<new(I<\$scalar>)>>
C<new()> is a constructor of Sisimai::Mail::Memory
my $mailtxt = 'From Mailer-Daemon ...';
my $mailobj = Sisimai::Mail::Memory->new(\$mailtxt);
=head1 INSTANCE METHODS
=head2 C<B<path()>>
C<path()> returns "<MEMORY>"
print $mailbox->path; # "<MEMORY>"
=head2 C<B<size()>>
C<size()> returns a memory size of the mailbox or JSON string.
print $mailobj->size; # 94515
=head2 C<B<payload()>>
C<payload()> returns an array reference to each email message or JSON string
print scalar @{ $mailobj->payload }; # 17
=head2 C<B<offset()>>
C<offset()> returns an offset position for seeking "payload" list. The value of
"offset" is an index number which have already read.
print $mailobj->offset; # 0
=head2 C<B<read()>>
C<read()> works as a iterator for reading each email in the mailbox.
my $mailtxt = 'From Mailer-Daemon ...';
my $mailobj = Sisimai::Mail->new(\$mailtxt);
while( my $r = $mailobj->read ) {
print $r; # print each email in the first argument of new().
}
=head1 AUTHOR
azumakuniyuki
=head1 COPYRIGHT
Copyright (C) 2018-2020 azumakuniyuki, All rights reserved.
=head1 LICENSE
This software is distributed under The BSD 2-Clause License.
=cut
| sisimai/p5-Sisimai | lib/Sisimai/Mail/Memory.pm | Perl | bsd-2-clause | 3,195 |
package Tapper::MCP::State::Details;
use 5.010;
use strict;
use warnings;
use Moose;
use List::Util qw/max min/;
use Tapper::Model 'model';
use YAML qw/Dump Load/;
has state_details => (is => 'rw',
default => sub { {current_state => 'invalid'} },
);
has persist => (is => 'rw',);
sub BUILD
{
my ($self, $args) = @_;
my $testrun_id = $args->{testrun_id};
my $result = model('TestrunDB')->resultset('State')->find_or_create({testrun_id => $testrun_id});
$self->persist($result);
$self->state_details($result->state);
}
=head2 db_update
Update database entry.
@return success - 0
@return error - error string
=cut
sub db_update
{
my ($self) = @_;
$self->persist->state($self->state_details);
$self->persist->update;
return 0;
}
=head1 NAME
Tapper::MCP::State::Details - Encapsulate state_details attribute of MCP::State
=head1 SYNOPSIS
use Tapper::MCP::State::Details;
my $state_details = Tapper::MCP::State::Details->new();
$state_details->prc_results(0, {success => 0, mg => 'No success'});
=head1 FUNCTIONS
=head2 results
Getter and setter for results array for whole test. Setter adds given
parameter instead of substituting.
@param hash ref - containing success(bool) and msg(string)
=cut
sub results
{
my ($self, $result) = @_;
if ($result) {
push @{$self->state_details->{results}}, $result;
$self->db_update();
}
return $self->state_details->{results};
}
=head2 state_init
Initialize the state or read it back from database.
@return success - 0
@return error - error string
=cut
sub state_init
{
my ($self, $data) = @_;
$self->state_details($data);
$self->state_details->{current_state} = 'started';
$self->state_details->{results} = [];
$self->state_details->{prcs} ||= [];
$self->state_details->{keep_alive}{timeout_date} = $self->state_details->{keep_alive}{timeout_span} + time if defined $self->state_details->{keep_alive}{timeout_span};
foreach my $this_prc (@{$self->state_details->{prcs}}) {
$this_prc->{results} ||= [];
}
$self->db_update();
return 0;
}
=head2 takeoff
The reboot call was successfully executed, now update the state for
waiting for the first message.
@return int - new timeout
=cut
sub takeoff
{
my ($self, $skip_install) = @_;
my $timeout_current_date;
if ($skip_install) {
$self->current_state('reboot_test');
my $prc = $self->state_details->{prcs}->[0];
$timeout_current_date = $prc->{timeout_current_date} = $prc->{timeout_boot_span} + time();
} else {
$self->current_state('reboot_install');
my $install = $self->state_details->{install};
$timeout_current_date = $install->{timeout_current_date} = $install->{timeout_boot_span} + time();
}
$self->db_update();
return ($timeout_current_date);
}
=head2 current_state
Getter and setter for current state name.
@param string - state name (optional)
@return string - state name
=cut
sub current_state
{
my ($self, $state) = @_;
if (defined $state) {
$self->state_details->{current_state} = $state;
$self->db_update;
}
return $self->state_details->{current_state};
}
=head2 set_all_prcs_current_state
Set current_state of all PRCs to given state.
@param string - state name
=cut
sub set_all_prcs_current_state
{
my ($self, $state) = @_;
if (defined $state) {
for ( my $prc_num = 0; $prc_num < @{$self->state_details->{prcs}}; $prc_num++) {
$self->state_details->{prcs}[$prc_num]{current_state} = $state;
}
$self->db_update;
}
}
=head2 keep_alive_timeout_date
Getter and setter for keep_alive_timeout_date
@optparam int - new timeout_date for keep_alive
@return int - timeout date for keep_alive
=cut
sub keep_alive_timeout_date
{
my ($self, $timeout_date) = @_;
$self->state_details->{keep_alive}{timeout_date} = $timeout_date if defined $timeout_date;
$self->state_details->{keep_alive}{timeout_date};
}
=head2 set_keep_alive_timeout_span
Getter for keep_alive_timeout_date
@param int - new timeout date for keep_alive
@return int - new timeout date for keep_alive
=cut
sub set_keep_alive_timeout_span
{
my ($self, $timeout_span) = @_;
$self->state_details->{keep_alive}{timeout_date} = $timeout_span;
}
=head2 keep_alive_timeout_span
Getter and setter for keep_alive_timeout_span.
Note: This function can not set the timeout to undef.
@optparam int - new timeout_span
@return int - timeout date for keep_alive
=cut
sub keep_alive_timeout_span
{
my ($self) = @_;
return $self->state_details->{keep_alive}{timeout_span};
}
=head2 installer_timeout_current_date
Getter and setter for installer timeout date.
@param int - new installer timeout date
@return string - installer timeout date
=cut
sub installer_timeout_current_date
{
my ($self, $timeout_date) = @_;
if (defined $timeout_date) {
$self->state_details->{install}{timeout_current_date} = $timeout_date;
$self->db_update;
}
return $self->state_details->{install}{timeout_current_date};
}
=head2 start_install
Update timeouts for "installation started".
@return int - new timeout span
=cut
sub start_install
{
my ($self) = @_;
$self->state_details->{install}->{timeout_current_date} =
time + $self->state_details->{install}->{timeout_install_span};
$self->db_update;
return $self->state_details->{install}->{timeout_install_span};
}
=head2 prc_boot_start
Sets timeouts for given PRC to the ones associated with booting of this
PRC started.
@param int - PRC number
@return int - boot timeout span
=cut
sub prc_boot_start
{
my ($self, $num) = @_;
$self->state_details->{prcs}->[$num]->{timeout_current_date} =
time + $self->state_details->{prcs}->[$num]->{timeout_boot_span};
$self->db_update;
return $self->state_details->{prcs}->[$num]->{timeout_boot_span};
}
=head2 prc_timeout_current_span
Get the current timeout date for given PRC
@param int - PRC number
@return int - timeout date
=cut
sub prc_timeout_current_date
{
my ($self, $num) = @_;
return $self->state_details->{prcs}->[$num]->{timeout_current_date};
}
=head2 prc_results
Getter and setter for results array for of one PRC. Setter adds given
parameter instead of substituting. If no argument is given, all PRC
results are returned.
@param int - PRC number (optional)
@param hash ref - containing success(bool) and msg(string) (optional)
=cut
sub prc_results
{
my ($self, $num, $msg) = @_;
if (not defined $num) {
my @results;
for ( my $prc_num=0; $prc_num < @{$self->state_details->{prcs}}; $prc_num++) {
push @results, $self->state_details->{prcs}->[$prc_num]->{results};
}
return \@results;
}
if ($msg) {
push @{$self->state_details->{prcs}->[$num]->{results}}, $msg;
$self->db_update;
}
return $self->state_details->{prcs}->[$num]->{results};
}
=head2 prc_count
Return number of PRCs
@return int - number of PRCs
=cut
sub prc_count
{
return int @{shift->state_details->{prcs}};
}
=head2 prc_state
Getter and setter for current state of given PRC.
@param int - PRC number
@param string - state name (optional)
@return string - state name
=cut
sub prc_state
{
my ($self, $num, $state) = @_;
return {} if $num >= $self->prc_count;
if (defined $state) {
$self->state_details->{prcs}->[$num]{current_state} = $state;
$self->db_update;
}
return $self->state_details->{prcs}->[$num]{current_state};
}
=head2 is_all_prcs_finished
Check whether all PRCs have finished already.
@param all PRCs finished - 1
@param not all PRCs finished - 0
=cut
sub is_all_prcs_finished
{
my ($self) = @_;
# check whether this is the last PRC we are waiting for
my $all_finished = 1;
for ( my $prc_num=0; $prc_num < @{$self->state_details->{prcs}}; $prc_num++) {
if ($self->state_details->{prcs}->[$prc_num]->{current_state} ne 'finished') {
$all_finished = 0;
last;
}
}
return $all_finished;
}
=head2 prc_next_timeout
Set next PRC timeout as current and return it as timeout span.
@param int - PRC number
@return int - next timeout span
=cut
sub prc_next_timeout
{
my ($self, $num) = @_;
my $prc = $self->state_details->{prcs}->[$num];
my $default_timeout = 60 + 60; # (time between SIGTERM and SIGKILL in PRC) + (grace period for sending the message)
my $next_timeout = $default_timeout;
given ($prc->{current_state}){
when('preload') { $next_timeout = $prc->{timeout_boot_span}}
when('boot') {
if (ref $prc->{timeout_testprograms_span} eq 'ARRAY' and
@{$prc->{timeout_testprograms_span}}) {
$next_timeout = $prc->{timeout_testprograms_span}->[0];
} else {
$next_timeout = $default_timeout;
}
}
when('test') {
my $testprogram_number = $prc->{number_current_test};
++$testprogram_number;
if (ref $prc->{timeout_testprograms_span} eq 'ARRAY' and
exists $prc->{timeout_testprograms_span}[$testprogram_number]){
$prc->{number_current_test} = $testprogram_number;
$next_timeout = $prc->{timeout_testprograms_span}[$testprogram_number];
} else {
$prc->{current_state} = 'lasttest';
$next_timeout = $default_timeout;
}
}
when('lasttest') {
my $result = { error => 1,
msg => "prc_next_timeout called in state testfin. This is a bug. Please report it!"};
$self->prc_results($num, $result);
}
when('finished') {
return;
}
}
$self->state_details->{prcs}->[$num]->{timeout_current_date} = time() + $next_timeout;
$self->db_update;
return $next_timeout;
}
=head2 prc_current_test_number
Get or set the number of the testprogram currently running in given PRC.
@param int - PRC number
@param int - test number (optional)
@return test running - test number starting from 0
@return no test running - undef
=cut
sub prc_current_test_number
{
my ($self, $num, $test_number) = @_;
if (defined $test_number) {
$self->state_details->{prcs}->[$num]{number_current_test} = $test_number;
$self->db_update;
}
return $self->state_details->{prcs}->[$num]{number_current_test};
}
=head2 get_min_prc_timeout
Check all PRCs and return the minimum of their upcoming timeouts in
seconds.
@return timeout span for the next state change during testing
=cut
sub get_min_prc_timeout
{
my ($self) = @_;
my $now = time();
my $timeout = $self->state_details->{prcs}->[0]->{timeout_current_date} - $now;
for ( my $prc_num=1; $prc_num < @{$self->state_details->{prcs}}; $prc_num++) {
next unless $self->state_details->{prcs}->[$prc_num]->{timeout_current_date};
$timeout = min($timeout, $self->state_details->{prcs}->[$prc_num]->{timeout_current_date} - $now);
}
return $timeout;
}
1;
| tapper/Tapper-MCP | lib/Tapper/MCP/State/Details.pm | Perl | bsd-2-clause | 12,305 |
package App::Netdisco::SSHCollector::Platform::Linux;
=head1 NAME
App::Netdisco::SSHCollector::Platform::Linux
=head1 DESCRIPTION
Collect ARP entries from Linux routers
This collector uses "C<arp>" as the command for the arp utility on your
system. If you wish to specify an absolute path, then add an C<arp_command>
item to your configuration:
device_auth:
- tag: sshlinux
driver: cli
platform: Linux
only: '192.0.2.1'
username: oliver
password: letmein
arp_command: '/usr/sbin/arp'
=cut
use strict;
use warnings;
use Dancer ':script';
use Expect;
use Moo;
=head1 PUBLIC METHODS
=over 4
=item B<arpnip($host, $ssh)>
Retrieve ARP entries from device. C<$host> is the hostname or IP address
of the device. C<$ssh> is a Net::OpenSSH connection to the device.
Returns a list of hashrefs in the format C<{ mac =E<gt> MACADDR, ip =E<gt> IPADDR }>.
=back
=cut
sub arpnip {
my ($self, $hostlabel, $ssh, $args) = @_;
debug "$hostlabel $$ arpnip()";
my ($pty, $pid) = $ssh->open2pty;
unless ($pty) {
debug "unable to run remote command [$hostlabel] " . $ssh->error;
return ();
}
my $expect = Expect->init($pty);
my ($pos, $error, $match, $before, $after);
my $prompt = qr/\$/;
($pos, $error, $match, $before, $after) = $expect->expect(10, -re, $prompt);
my $command = ($args->{arp_command} || 'arp');
$expect->send("$command -n | tail -n +2\n");
($pos, $error, $match, $before, $after) = $expect->expect(5, -re, $prompt);
my @arpentries = ();
my @lines = split(m/\n/, $before);
# 192.168.1.1 ether 00:b6:aa:f5:bb:6e C eth1
my $linereg = qr/([0-9\.]+)\s+ether\s+([a-fA-F0-9:]+)/;
foreach my $line (@lines) {
if ($line =~ $linereg) {
my ($ip, $mac) = ($1, $2);
push @arpentries, { mac => $mac, ip => $ip };
}
}
$expect->send("exit\n");
$expect->soft_close();
return @arpentries;
}
1;
| netdisco/netdisco | lib/App/Netdisco/SSHCollector/Platform/Linux.pm | Perl | bsd-3-clause | 1,976 |
package Net::Braintree::CreditCard::Commercial;
use strict;
use constant Yes => "Yes";
use constant No => "No";
use constant Unknown => "Unknown";
1;
| braintree/braintree_perl | lib/Net/Braintree/CreditCard/Commercial.pm | Perl | mit | 152 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::MiscSet - This is a set representing a classification of
a group of miscellaneuos features.
=head1 SYNOPSIS
use Bio::EnsEMBL::MiscSet;
my $misc_set = Bio::EnsEMBL::MiscSet->new(
1234, $adaptor, 'tilepath',
'Assembly Tiling Path',
'The tiling path of clones', 1e6
);
my $misc_feature->add_set($misc_set);
=head1 DESCRIPTION
MiscSets represent classsifications or groupings of MiscFeatures.
Features are classified into sets essentially to define what they are
and how they may be used. Generally MiscFeatures are retrieved on
the basis of their associated sets. See Bio::EnsEMBL::MiscFeature,
Bio::EnsEMBL::DBSQL::MiscFeatureAdaptor.
Note that MiscSets and MiscFeatures were formerly known as MapSets and
MapFrags
=head1 METHODS
=cut
package Bio::EnsEMBL::MiscSet;
use strict;
use Bio::EnsEMBL::Storable;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Storable);
=head2 new
Arg [1] : int $misc_set_id
The internal identifier for this misc set
Arg [2] : string $code
The unique code which identifies this set type
Arg [3] : string $name
The human readable name of this set
Arg [4] : string $desc
The description of this set
Arg [5] : int $max_len
The maximum length of features of this mapset
Example : $set = new Bio::EnsEMBL::MiscSet(1234, 'tilepath',
'Assembly Tiling Path',
'The tiling path of clones',
1e6);
Description: Instantiates a Bio::EnsEMBL::MiscSet
Returntype : Bio::EnsEMBL::MiscSet
Exceptions : none
Caller : MiscFeatureAdaptor
Status : Stable
=cut
sub new {
my $caller = shift;
my $class = ref($caller) || $caller;
my $self = $class->SUPER::new(@_);
my($code, $name, $desc, $max_len) =
rearrange([qw(CODE NAME DESCRIPTION LONGEST_FEATURE)], @_);
$self->{'code'} = $code;
$self->{'name'} = $name;
$self->{'description'} = $desc;
$self->{'longest_feature'} = $max_len;
return $self;
}
=head2 code
Arg [1] : string $newval (optional)
The new value to set the code attribute to
Example : $code = $obj->code()
Description: Getter/Setter for the code attribute
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub code{
my $self = shift;
$self->{'code'} = shift if(@_);
return $self->{'code'};
}
=head2 name
Arg [1] : string $newval (optional)
The new value to set the code attribute to
Example : $name = $obj->name()
Description: Getter/Setter for the name attribute
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub name {
my $self = shift;
$self->{'name'} = shift if(@_);
return $self->{'name'};
}
=head2 description
Arg [1] : string $newval (optional)
The new value to set the description attribute to
Example : $description = $obj->description()
Description: Getter/Setter for the description attribute
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub description{
my $self = shift;
$self->{'description'} = shift if(@_);
return $self->{'description'};
}
=head2 longest_feature
Arg [1] : int $newval (optional)
The new value to set the longest_feature attribute to
Example : $longest_feature = $obj->longest_feature()
Description: Getter/Setter for the longest_feature attribute
Returntype : int
Exceptions : none
Caller : general
Status : Stable
=cut
sub longest_feature{
my $self = shift;
$self->{'longest_feature'} = shift if(@_);
return $self->{'longest_feature'};
}
1;
| at7/ensembl | modules/Bio/EnsEMBL/MiscSet.pm | Perl | apache-2.0 | 4,803 |
#!/usr/bin/perl -w
#
# Copyright 2012, Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example promotes an experiment, which permanently applies all the
# experiment changes made to its related ad groups, criteria and ads. To add an
# experiment, run campaign_management/add_experiment.pl.
#
# Tags: ExperimentService.mutate
# Author: David Torres <api.davidtorres@gmail.com>
use strict;
use lib "../../../lib";
use Google::Ads::AdWords::Client;
use Google::Ads::AdWords::Logging;
use Google::Ads::AdWords::v201409::Experiment;
use Google::Ads::AdWords::v201409::ExperimentOperation;
use Cwd qw(abs_path);
# Replace with valid values of your account.
my $experiment_id = "INSERT_EXPERIMENT_ID_HERE";
# Example main subroutine.
sub promote_experiment {
my $client = shift;
my $experiment_id = shift;
# Set experiment's status to PROMOTED.
my $experiment = Google::Ads::AdWords::v201409::Experiment->new({
id => $experiment_id,
status => "PROMOTED"
});
# Create operation.
my $experiment_operation =
Google::Ads::AdWords::v201409::ExperimentOperation->new({
operator => "SET",
operand => $experiment
});
# Update experiment.
my $result = $client->ExperimentService()->mutate({
operations => [$experiment_operation]
});
# Display experiment.
if ($result->get_value()) {
my $experiment = $result->get_value()->[0];
printf "Experiment with name \"%s\" and id \"%d\" was promoted.\n",
$experiment->get_name(), $experiment->get_id();
} else {
print "No experiment was promoted.\n";
}
return 1;
}
# Don't run the example if the file is being included.
if (abs_path($0) ne abs_path(__FILE__)) {
return 1;
}
# Log SOAP XML request, response and API errors.
Google::Ads::AdWords::Logging::enable_all_logging();
# Get AdWords Client, credentials will be read from ~/adwords.properties.
my $client = Google::Ads::AdWords::Client->new({version => "v201409"});
# By default examples are set to die on any server returned fault.
$client->set_die_on_faults(1);
# Call the example
promote_experiment($client, $experiment_id);
| gitpan/Google-Ads-AdWords-Client | examples/v201409/campaign_management/promote_experiment.pl | Perl | apache-2.0 | 2,658 |
package #
Date::Manip::Offset::off290;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 11:03:46 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.orgtz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '-03:53:48';
%Offset = (
0 => [
'america/argentina/buenos_aires',
],
);
1;
| nriley/Pester | Source/Manip/Offset/off290.pm | Perl | bsd-2-clause | 868 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.2.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
0A81 0A83
0A85 0A8D
0A8F 0A91
0A93 0AA8
0AAA 0AB0
0AB2 0AB3
0AB5 0AB9
0ABC 0AC5
0AC7 0AC9
0ACB 0ACD
0AD0
0AE0 0AE3
0AE6 0AF1
END
| Bjay1435/capstone | rootfs/usr/share/perl/5.18.2/unicore/lib/Sc/Gujr.pl | Perl | mit | 550 |
class Panda::Installer {
use Panda::Common;
use Panda::Project;
use File::Find;
use Shell::Command;
has $.prefix = self.default-prefix();
method sort-lib-contents(@lib) {
my @generated = @lib.grep({ $_ ~~ / \. <{compsuffix}> $/});
my @rest = @lib.grep({ $_ !~~ / \. <{compsuffix}> $/});
return flat @rest, @generated;
}
# default install location
method default-prefix {
my @custom-lib = <site home>.map({CompUnit::RepositoryRegistry.repository-for-name($_)}).grep(*.defined);
for @custom-lib.grep(*.can-install) -> $repo {
return $repo;
}
my $ret = $*REPO.repo-chain.grep(CompUnit::Repository::Installable).first(*.can-install);
return $ret if $ret;
fail "Could not find a repository to install to";
}
sub copy($src, $dest) {
note "Copying $src to $dest";
unless $*DISTRO.is-win {
$dest.IO.unlink;
}
$src.copy($dest);
}
method install($from, $to? is copy, Panda::Project :$bone, Bool :$force) {
unless $to {
$to = $.prefix;
}
$to = $to.IO.absolute if $to ~~ IO::Path; # we're about to change cwd
if $to !~~ CompUnit::Repository and CompUnit::RepositoryRegistry.repository-for-spec($to, :next-repo($*REPO)) -> $cur {
$to = $cur;
}
indir $from, {
# check if $.prefix is under control of a CompUnit::Repository
if $to.can('install') {
fail "'provides' key mandatory in META information" unless $bone.metainfo<provides>:exists;
my %sources = $bone.metainfo<provides>.map({ $_.key => ~$_.value.IO });
my %scripts;
if 'bin'.IO ~~ :d {
for find(dir => 'bin', type => 'file').list -> $bin {
my $basename = $bin.basename;
next if $basename.substr(0, 1) eq '.';
next if !$*DISTRO.is-win and $basename ~~ /\.bat$/;
%scripts{$basename} = ~$bin.IO;
}
}
my %resources = ($bone.metainfo<resources> // []).map({
$_ => $_ ~~ m/^libraries\/(.*)/
?? ~"resources/libraries".IO.child($*VM.platform-library-name($0.Str.IO))
!! ~"resources/$_".IO
});
$to.install(
Distribution.new(|$bone.metainfo),
%sources,
%scripts,
%resources,
:$force,
);
}
else {
if 'lib'.IO ~~ :d {
my @lib = find(dir => 'lib', type => 'file').list;
for self.sort-lib-contents(@lib) -> $i {
next if $i.basename.substr(0, 1) eq '.';
mkpath "$to/{$i.dirname}";
copy($i, "$to/{$i}");
}
}
if 'bin'.IO ~~ :d {
for find(dir => 'bin', type => 'file').list -> $bin {
next if $bin.basename.substr(0, 1) eq '.';
next if !$*DISTRO.is-win and $bin.basename ~~ /\.bat$/;
mkpath "$to/{$bin.dirname}";
copy($bin, "$to/$bin");
"$to/$bin".IO.chmod(0o755) unless $*DISTRO.is-win;
# TODO remove this once CompUnit installation actually works
"$to/$bin.bat".IO.spurt(q:to[SCRIPT]
@rem = '--*-Perl-*--
@echo off
if "%OS%" == "Windows_NT" goto WinNT
perl6 "%~dpn0" %1 %2 %3 %4 %5 %6 %7 %8 %9
goto endofperl
:WinNT
perl6 "%~dpn0" %*
if NOT "%COMSPEC%" == "%SystemRoot%\system32\cmd.exe" goto endofperl
if %errorlevel% == 9009 echo You do not have Perl in your PATH.
if errorlevel 1 goto script_failed_so_exit_with_non_zero_val 2>nul
goto endofperl
@rem ';
__END__
:endofperl
SCRIPT
);
}
}
}
1;
}
}
}
# vim: ft=perl6
| stmuk/panda | lib/Panda/Installer.pm | Perl | mit | 3,817 |
# IBM_PROLOG_BEGIN_TAG
# This is an automatically generated prolog.
#
# $Source: src/build/debug/Hostboot/MemStats.pm $
#
# OpenPOWER HostBoot Project
#
# COPYRIGHT International Business Machines Corp. 2011,2014
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License.
#
# IBM_PROLOG_END_TAG
use strict;
package Hostboot::MemStats;
use Exporter;
our @EXPORT_OK = ('main');
use constant HEAPMGR_INSTANCE_NAME =>
"Singleton<HeapManager>::instance()::instance";
use constant HEAPMGR_CHUNK_OFFSET => 0;
#use constant HEAPMGR_BIGCHUNK_OFFSET => 0;
use constant HEAPMGR_NUMBER_OF_BUCKETS => 12;
use constant MIN_BUCKET_SIZE => 16;
use constant FIB_START_INCR => 16;
sub main
{
my ($packName, $args) = @_;
my $countchunks = 0;
if (defined $args->{"count"})
{
::userDisplay "counting chunks";
$countchunks = 1;
}
my $showchunks = 0;
if (defined $args->{"show"})
{
::userDisplay "showing chunks";
$showchunks = 1;
}
my ($heap_manager_addr, $symSize) =
::findSymbolAddress(HEAPMGR_INSTANCE_NAME);
my @page_manager_addr =
::findSymbolAddress("Singleton<PageManager>::instance()::instance");
my $free_pages =
::read64 @page_manager_addr;
my $total_pages =
::read64 ($page_manager_addr[0] + 8, 8);
my $free_min =
::read64 ::findSymbolAddress("PageManager::cv_low_page_count");
my $page_coal =
::read64 ::findSymbolAddress("PageManager::cv_coalesce_count");
my $big_heap_pages_used =
::read32 ::findSymbolAddress("HeapManager::cv_largeheap_page_count");
my $big_heap_max =
::read32 ::findSymbolAddress("HeapManager::cv_largeheap_page_max");
my $small_heap_pages_used =
::read32 ::findSymbolAddress("HeapManager::cv_smallheap_page_count");
my $heap_coal =
::read32 ::findSymbolAddress("HeapManager::cv_coalesce_count");
my $heap_free =
::read32 ::findSymbolAddress("HeapManager::cv_free_bytes");
my $heap_free_chunks =
::read32 ::findSymbolAddress("HeapManager::cv_free_chunks");
my $heap_total = $big_heap_pages_used + $small_heap_pages_used;
my $heap_max = $big_heap_max + $small_heap_pages_used;
my $castout_ro =
::read32 ::findSymbolAddress("Block::cv_ro_evict_req");
my $castout_rw =
::read32 ::findSymbolAddress("Block::cv_rw_evict_req");
::userDisplay "===================================================\n";
::userDisplay "MemStats:\n";
::userDisplay " Total pages available: $total_pages\n";
::userDisplay " Free pages: $free_pages\n";
::userDisplay " Free pages Low mark: $free_min\n";
::userDisplay " Page chunks coalesced: $page_coal\n";
::userDisplay "\nHeap:\n";
::userDisplay " Pages used by heap: $heap_total ".
"(B:$big_heap_pages_used,S:$small_heap_pages_used)\n";
::userDisplay " Max. Pages used by heap: $heap_max\n";
::userDisplay " heap free bytes/chunks $heap_free/$heap_free_chunks (valid only after a coalescing)\n";
::userDisplay " Heap chunks coalesced: $heap_coal\n";
::userDisplay "\nVirtual Memory Manager page eviction requests:\n";
::userDisplay " RO page requests: $castout_ro\n";
::userDisplay " RW page requests: $castout_rw\n";
::userDisplay "===================================================\n";
if( $showchunks )
{
::userDisplay "Show Buckets - ";
#Show the entire heap
::userDisplay(sprintf("HeapManager at 0x%X\n",$heap_manager_addr));
my $bucketsize = MIN_BUCKET_SIZE;
my $oldbucketsize = MIN_BUCKET_SIZE;
for (my $bucket = 0; $bucket < HEAPMGR_NUMBER_OF_BUCKETS; $bucket++)
{
my $stackAddr =
::read32($heap_manager_addr + HEAPMGR_CHUNK_OFFSET +
(8 * $bucket) + 4);
::userDisplay(sprintf("%d : stackaddr=0x%.8X\n",
$bucket,$stackAddr) );
showPagesInStack($stackAddr,$bucket,$bucketsize);
my $tmpsize = $bucketsize;
$bucketsize = $bucketsize + $oldbucketsize;
$oldbucketsize = $tmpsize;
}
}
if( $countchunks )
{
::userDisplay "Show Buckets - ";
#Show the entire heap
::userDisplay( sprintf("HeapManager at 0x%X\n",$heap_manager_addr) );
my $bucketsize = MIN_BUCKET_SIZE;
my $oldbucketsize = MIN_BUCKET_SIZE;
for (my $bucket = 0; $bucket < HEAPMGR_NUMBER_OF_BUCKETS; $bucket++)
{
my $stackAddr =
::read32($heap_manager_addr + HEAPMGR_CHUNK_OFFSET +
(8 * $bucket) + 4);
my $numchunks = countChunks($stackAddr);
::userDisplay(
sprintf("Bucket %d(=%d) has %d chunks = %d bytes\n",
$bucket,$bucketsize,$numchunks,
$bucketsize*$numchunks));
my $tmpsize = $bucketsize;
$bucketsize = $bucketsize + $oldbucketsize;
$oldbucketsize = $tmpsize;
}
}
}
sub helpInfo
{
my %info = (
name => "MemStats",
intro => ["Displays Hostboot memory usage information."],
);
}
sub showPagesInStack
{
my $stack = shift;
my $bucket = shift;
my $bucketsize = shift;
::userDisplay(sprintf "..stack=0x%.8X-0x%.8X : %d=%d bytes\n",
$stack,$stack+$bucketsize,$bucket,$bucketsize);
return 0 if (0 == $stack);
return 1 + showPagesInStack(::read64($stack+8),$bucket,$bucketsize);
}
sub countChunks
{
my $stack = shift;
return 0 if (0 == $stack);
return 1 + countChunks(::read64($stack+8));
}
| alvintpwang/hostboot | src/build/debug/Hostboot/MemStats.pm | Perl | apache-2.0 | 6,356 |
=pod
=head1 NAME
SSL_CTX_add_extra_chain_cert,
SSL_CTX_get_extra_chain_certs,
SSL_CTX_get_extra_chain_certs_only,
SSL_CTX_clear_extra_chain_certs
- add, get or clear extra chain certificates
=head1 SYNOPSIS
#include <openssl/ssl.h>
long SSL_CTX_add_extra_chain_cert(SSL_CTX *ctx, X509 *x509);
long SSL_CTX_get_extra_chain_certs(SSL_CTX *ctx, STACK_OF(X509) **sk);
long SSL_CTX_get_extra_chain_certs_only(SSL_CTX *ctx, STACK_OF(X509) **sk);
long SSL_CTX_clear_extra_chain_certs(SSL_CTX *ctx);
=head1 DESCRIPTION
SSL_CTX_add_extra_chain_cert() adds the certificate B<x509> to the extra chain
certificates associated with B<ctx>. Several certificates can be added one
after another.
SSL_CTX_get_extra_chain_certs() retrieves the extra chain certificates
associated with B<ctx>, or the chain associated with the current certificate
of B<ctx> if the extra chain is empty.
The returned stack should not be freed by the caller.
SSL_CTX_get_extra_chain_certs_only() retrieves the extra chain certificates
associated with B<ctx>.
The returned stack should not be freed by the caller.
SSL_CTX_clear_extra_chain_certs() clears all extra chain certificates
associated with B<ctx>.
These functions are implemented as macros.
=head1 NOTES
When sending a certificate chain, extra chain certificates are sent in order
following the end entity certificate.
If no chain is specified, the library will try to complete the chain from the
available CA certificates in the trusted CA storage, see
L<SSL_CTX_load_verify_locations(3)>.
The B<x509> certificate provided to SSL_CTX_add_extra_chain_cert() will be
freed by the library when the B<SSL_CTX> is destroyed. An application
B<should not> free the B<x509> object.
=head1 RESTRICTIONS
Only one set of extra chain certificates can be specified per SSL_CTX
structure. Different chains for different certificates (for example if both
RSA and DSA certificates are specified by the same server) or different SSL
structures with the same parent SSL_CTX cannot be specified using this
function. For more flexibility functions such as SSL_add1_chain_cert() should
be used instead.
=head1 RETURN VALUES
SSL_CTX_add_extra_chain_cert() and SSL_CTX_clear_extra_chain_certs() return
1 on success and 0 for failure. Check out the error stack to find out the
reason for failure.
=head1 SEE ALSO
L<ssl(7)>,
L<SSL_CTX_use_certificate(3)>,
L<SSL_CTX_set_client_cert_cb(3)>,
L<SSL_CTX_load_verify_locations(3)>
L<SSL_CTX_set0_chain(3)>
L<SSL_CTX_set1_chain(3)>
L<SSL_CTX_add0_chain_cert(3)>
L<SSL_CTX_add1_chain_cert(3)>
L<SSL_set0_chain(3)>
L<SSL_set1_chain(3)>
L<SSL_add0_chain_cert(3)>
L<SSL_add1_chain_cert(3)>
L<SSL_CTX_build_cert_chain(3)>
L<SSL_build_cert_chain(3)>
=head1 COPYRIGHT
Copyright 2000-2018 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/SSL_CTX_add_extra_chain_cert.pod | Perl | bsd-3-clause | 3,052 |
=pod
=head1 NAME
verify - Utility to verify certificates
=head1 SYNOPSIS
B<openssl> B<verify>
[B<-help>]
[B<-CAfile file>]
[B<-CApath directory>]
[B<-no-CAfile>]
[B<-no-CApath>]
[B<-allow_proxy_certs>]
[B<-attime timestamp>]
[B<-check_ss_sig>]
[B<-CRLfile file>]
[B<-crl_download>]
[B<-crl_check>]
[B<-crl_check_all>]
[B<-engine id>]
[B<-explicit_policy>]
[B<-extended_crl>]
[B<-ignore_critical>]
[B<-inhibit_any>]
[B<-inhibit_map>]
[B<-no_check_time>]
[B<-partial_chain>]
[B<-policy arg>]
[B<-policy_check>]
[B<-policy_print>]
[B<-purpose purpose>]
[B<-suiteB_128>]
[B<-suiteB_128_only>]
[B<-suiteB_192>]
[B<-trusted_first>]
[B<-no_alt_chains>]
[B<-untrusted file>]
[B<-trusted file>]
[B<-use_deltas>]
[B<-verbose>]
[B<-auth_level level>]
[B<-verify_depth num>]
[B<-verify_email email>]
[B<-verify_hostname hostname>]
[B<-verify_ip ip>]
[B<-verify_name name>]
[B<-x509_strict>]
[B<-show_chain>]
[B<->]
[certificates]
=head1 DESCRIPTION
The B<verify> command verifies certificate chains.
=head1 COMMAND OPTIONS
=over 4
=item B<-help>
Print out a usage message.
=item B<-CAfile file>
A B<file> of trusted certificates.
The file should contain one or more certificates in PEM format.
=item B<-CApath directory>
A directory of trusted certificates. The certificates should have names
of the form: hash.0 or have symbolic links to them of this
form ("hash" is the hashed certificate subject name: see the B<-hash> option
of the B<x509> utility). Under Unix the B<c_rehash> script will automatically
create symbolic links to a directory of certificates.
=item B<-no-CAfile>
Do not load the trusted CA certificates from the default file location
=item B<-no-CApath>
Do not load the trusted CA certificates from the default directory location
=item B<-allow_proxy_certs>
Allow the verification of proxy certificates
=item B<-attime timestamp>
Perform validation checks using time specified by B<timestamp> and not
current system time. B<timestamp> is the number of seconds since
01.01.1970 (UNIX time).
=item B<-check_ss_sig>
Verify the signature on the self-signed root CA. This is disabled by default
because it doesn't add any security.
=item B<-CRLfile file>
The B<file> should contain one or more CRLs in PEM format.
This option can be specified more than once to include CRLs from multiple
B<files>.
=item B<-crl_download>
Attempt to download CRL information for this certificate.
=item B<-crl_check>
Checks end entity certificate validity by attempting to look up a valid CRL.
If a valid CRL cannot be found an error occurs.
=item B<-crl_check_all>
Checks the validity of B<all> certificates in the chain by attempting
to look up valid CRLs.
=item B<-engine id>
Specifying an engine B<id> will cause L<verify(1)> to attempt to load the
specified engine.
The engine will then be set as the default for all its supported algorithms.
If you want to load certificates or CRLs that require engine support via any of
the B<-trusted>, B<-untrusted> or B<-CRLfile> options, the B<-engine> option
must be specified before those options.
=item B<-explicit_policy>
Set policy variable require-explicit-policy (see RFC5280).
=item B<-extended_crl>
Enable extended CRL features such as indirect CRLs and alternate CRL
signing keys.
=item B<-ignore_critical>
Normally if an unhandled critical extension is present which is not
supported by OpenSSL the certificate is rejected (as required by RFC5280).
If this option is set critical extensions are ignored.
=item B<-inhibit_any>
Set policy variable inhibit-any-policy (see RFC5280).
=item B<-inhibit_map>
Set policy variable inhibit-policy-mapping (see RFC5280).
=item B<-no_check_time>
This option suppresses checking the validity period of certificates and CRLs
against the current time. If option B<-attime timestamp> is used to specify
a verification time, the check is not suppressed.
=item B<-partial_chain>
Allow verification to succeed even if a I<complete> chain cannot be built to a
self-signed trust-anchor, provided it is possible to construct a chain to a
trusted certificate that might not be self-signed.
=item B<-policy arg>
Enable policy processing and add B<arg> to the user-initial-policy-set (see
RFC5280). The policy B<arg> can be an object name an OID in numeric form.
This argument can appear more than once.
=item B<-policy_check>
Enables certificate policy processing.
=item B<-policy_print>
Print out diagnostics related to policy processing.
=item B<-purpose purpose>
The intended use for the certificate. If this option is not specified,
B<verify> will not consider certificate purpose during chain verification.
Currently accepted uses are B<sslclient>, B<sslserver>, B<nssslserver>,
B<smimesign>, B<smimeencrypt>. See the B<VERIFY OPERATION> section for more
information.
=item B<-suiteB_128_only>, B<-suiteB_128>, B<-suiteB_192>
enable the Suite B mode operation at 128 bit Level of Security, 128 bit or
192 bit, or only 192 bit Level of Security respectively.
See RFC6460 for details. In particular the supported signature algorithms are
reduced to support only ECDSA and SHA256 or SHA384 and only the elliptic curves
P-256 and P-384.
=item B<-trusted_first>
When constructing the certificate chain, use the trusted certificates specified
via B<-CAfile>, B<-CApath> or B<-trusted> before any certificates specified via
B<-untrusted>.
This can be useful in environments with Bridge or Cross-Certified CAs.
As of OpenSSL 1.1.0 this option is on by default and cannot be disabled.
=item B<-no_alt_chains>
By default, unless B<-trusted_first> is specified, when building a certificate
chain, if the first certificate chain found is not trusted, then OpenSSL will
attempt to replace untrusted issuer certificates with certificates from the
trust store to see if an alternative chain can be found that is trusted.
As of OpenSSL 1.1.0, with B<-trusted_first> always on, this option has no
effect.
=item B<-untrusted file>
A B<file> of additional untrusted certificates (intermediate issuer CAs) used
to construct a certificate chain from the subject certificate to a trust-anchor.
The B<file> should contain one or more certificates in PEM format.
This option can be specified more than once to include untrusted certificates
from multiple B<files>.
=item B<-trusted file>
A B<file> of trusted certificates, which must be self-signed, unless the
B<-partial_chain> option is specified.
The B<file> contains one or more certificates in PEM format.
With this option, no additional (e.g., default) certificate lists are
consulted.
That is, the only trust-anchors are those listed in B<file>.
This option can be specified more than once to include trusted certificates
from multiple B<files>.
This option implies the B<-no-CAfile> and B<-no-CApath> options.
This option cannot be used in combination with either of the B<-CAfile> or
B<-CApath> options.
=item B<-use_deltas>
Enable support for delta CRLs.
=item B<-verbose>
Print extra information about the operations being performed.
=item B<-auth_level level>
Set the certificate chain authentication security level to B<level>.
The authentication security level determines the acceptable signature and
public key strength when verifying certificate chains.
For a certificate chain to validate, the public keys of all the certificates
must meet the specified security B<level>.
The signature algorithm security level is enforced for all the certificates in
the chain except for the chain's I<trust anchor>, which is either directly
trusted or validated by means other than its signature.
See L<SSL_CTX_set_security_level(3)> for the definitions of the available
levels.
The default security level is -1, or "not set".
At security level 0 or lower all algorithms are acceptable.
Security level 1 requires at least 80-bit-equivalent security and is broadly
interoperable, though it will, for example, reject MD5 signatures or RSA keys
shorter than 1024 bits.
=item B<-verify_depth num>
Limit the certificate chain to B<num> intermediate CA certificates.
A maximal depth chain can have up to B<num+2> certificates, since neither the
end-entity certificate nor the trust-anchor certificate count against the
B<-verify_depth> limit.
=item B<-verify_email email>
Verify if the B<email> matches the email address in Subject Alternative Name or
the email in the subject Distinguished Name.
=item B<-verify_hostname hostname>
Verify if the B<hostname> matches DNS name in Subject Alternative Name or
Common Name in the subject certificate.
=item B<-verify_ip ip>
Verify if the B<ip> matches the IP address in Subject Alternative Name of
the subject certificate.
=item B<-verify_name name>
Use default verification policies like trust model and required certificate
policies identified by B<name>.
The trust model determines which auxiliary trust or reject OIDs are applicable
to verifying the given certificate chain.
See the B<-addtrust> and B<-addreject> options of the L<x509(1)> command-line
utility.
Supported policy names include: B<default>, B<pkcs7>, B<smime_sign>,
B<ssl_client>, B<ssl_server>.
These mimics the combinations of purpose and trust settings used in SSL, CMS
and S/MIME.
As of OpenSSL 1.1.0, the trust model is inferred from the purpose when not
specified, so the B<-verify_name> options are functionally equivalent to the
corresponding B<-purpose> settings.
=item B<-x509_strict>
For strict X.509 compliance, disable non-compliant workarounds for broken
certificates.
=item B<-show_chain>
Display information about the certificate chain that has been built (if
successful). Certificates in the chain that came from the untrusted list will be
flagged as "untrusted".
=item B<->
Indicates the last option. All arguments following this are assumed to be
certificate files. This is useful if the first certificate filename begins
with a B<->.
=item B<certificates>
One or more certificates to verify. If no certificates are given, B<verify>
will attempt to read a certificate from standard input. Certificates must be
in PEM format.
=back
=head1 VERIFY OPERATION
The B<verify> program uses the same functions as the internal SSL and S/MIME
verification, therefore this description applies to these verify operations
too.
There is one crucial difference between the verify operations performed
by the B<verify> program: wherever possible an attempt is made to continue
after an error whereas normally the verify operation would halt on the
first error. This allows all the problems with a certificate chain to be
determined.
The verify operation consists of a number of separate steps.
Firstly a certificate chain is built up starting from the supplied certificate
and ending in the root CA.
It is an error if the whole chain cannot be built up.
The chain is built up by looking up the issuers certificate of the current
certificate.
If a certificate is found which is its own issuer it is assumed to be the root
CA.
The process of 'looking up the issuers certificate' itself involves a number of
steps.
After all certificates whose subject name matches the issuer name of the current
certificate are subject to further tests.
The relevant authority key identifier components of the current certificate (if
present) must match the subject key identifier (if present) and issuer and
serial number of the candidate issuer, in addition the keyUsage extension of
the candidate issuer (if present) must permit certificate signing.
The lookup first looks in the list of untrusted certificates and if no match
is found the remaining lookups are from the trusted certificates. The root CA
is always looked up in the trusted certificate list: if the certificate to
verify is a root certificate then an exact match must be found in the trusted
list.
The second operation is to check every untrusted certificate's extensions for
consistency with the supplied purpose. If the B<-purpose> option is not included
then no checks are done. The supplied or "leaf" certificate must have extensions
compatible with the supplied purpose and all other certificates must also be valid
CA certificates. The precise extensions required are described in more detail in
the B<CERTIFICATE EXTENSIONS> section of the B<x509> utility.
The third operation is to check the trust settings on the root CA. The root CA
should be trusted for the supplied purpose.
For compatibility with previous versions of OpenSSL, a certificate with no
trust settings is considered to be valid for all purposes.
The final operation is to check the validity of the certificate chain. The validity
period is checked against the current system time and the notBefore and notAfter
dates in the certificate. The certificate signatures are also checked at this
point.
If all operations complete successfully then certificate is considered valid. If
any operation fails then the certificate is not valid.
=head1 DIAGNOSTICS
When a verify operation fails the output messages can be somewhat cryptic. The
general form of the error message is:
server.pem: /C=AU/ST=Queensland/O=CryptSoft Pty Ltd/CN=Test CA (1024 bit)
error 24 at 1 depth lookup:invalid CA certificate
The first line contains the name of the certificate being verified followed by
the subject name of the certificate. The second line contains the error number
and the depth. The depth is number of the certificate being verified when a
problem was detected starting with zero for the certificate being verified itself
then 1 for the CA that signed the certificate and so on. Finally a text version
of the error number is presented.
A partial list of the error codes and messages is shown below, this also
includes the name of the error code as defined in the header file x509_vfy.h
Some of the error codes are defined but never returned: these are described
as "unused".
=over 4
=item B<X509_V_OK>
The operation was successful.
=item B<X509_V_ERR_UNSPECIFIED>
Unspecified error; should not happen.
=item B<X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT>
The issuer certificate of a looked up certificate could not be found. This
normally means the list of trusted certificates is not complete.
=item B<X509_V_ERR_UNABLE_TO_GET_CRL>
The CRL of a certificate could not be found.
=item B<X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE>
The certificate signature could not be decrypted. This means that the actual signature value
could not be determined rather than it not matching the expected value, this is only
meaningful for RSA keys.
=item B<X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE>
The CRL signature could not be decrypted: this means that the actual signature value
could not be determined rather than it not matching the expected value. Unused.
=item B<X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY>
The public key in the certificate SubjectPublicKeyInfo could not be read.
=item B<X509_V_ERR_CERT_SIGNATURE_FAILURE>
The signature of the certificate is invalid.
=item B<X509_V_ERR_CRL_SIGNATURE_FAILURE>
The signature of the certificate is invalid.
=item B<X509_V_ERR_CERT_NOT_YET_VALID>
The certificate is not yet valid: the notBefore date is after the current time.
=item B<X509_V_ERR_CERT_HAS_EXPIRED>
The certificate has expired: that is the notAfter date is before the current time.
=item B<X509_V_ERR_CRL_NOT_YET_VALID>
The CRL is not yet valid.
=item B<X509_V_ERR_CRL_HAS_EXPIRED>
The CRL has expired.
=item B<X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD>
The certificate notBefore field contains an invalid time.
=item B<X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD>
The certificate notAfter field contains an invalid time.
=item B<X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD>
The CRL lastUpdate field contains an invalid time.
=item B<X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD>
The CRL nextUpdate field contains an invalid time.
=item B<X509_V_ERR_OUT_OF_MEM>
An error occurred trying to allocate memory. This should never happen.
=item B<X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT>
The passed certificate is self-signed and the same certificate cannot be found in the list of
trusted certificates.
=item B<X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN>
The certificate chain could be built up using the untrusted certificates but the root could not
be found locally.
=item B<X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY>
The issuer certificate could not be found: this occurs if the issuer
certificate of an untrusted certificate cannot be found.
=item B<X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE>
No signatures could be verified because the chain contains only one certificate and it is not
self signed.
=item B<X509_V_ERR_CERT_CHAIN_TOO_LONG>
The certificate chain length is greater than the supplied maximum depth. Unused.
=item B<X509_V_ERR_CERT_REVOKED>
The certificate has been revoked.
=item B<X509_V_ERR_INVALID_CA>
A CA certificate is invalid. Either it is not a CA or its extensions are not consistent
with the supplied purpose.
=item B<X509_V_ERR_PATH_LENGTH_EXCEEDED>
The basicConstraints pathlength parameter has been exceeded.
=item B<X509_V_ERR_INVALID_PURPOSE>
The supplied certificate cannot be used for the specified purpose.
=item B<X509_V_ERR_CERT_UNTRUSTED>
the root CA is not marked as trusted for the specified purpose.
=item B<X509_V_ERR_CERT_REJECTED>
The root CA is marked to reject the specified purpose.
=item B<X509_V_ERR_SUBJECT_ISSUER_MISMATCH>
not used as of OpenSSL 1.1.0 as a result of the deprecation of the
B<-issuer_checks> option.
=item B<X509_V_ERR_AKID_SKID_MISMATCH>
Not used as of OpenSSL 1.1.0 as a result of the deprecation of the
B<-issuer_checks> option.
=item B<X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH>
Not used as of OpenSSL 1.1.0 as a result of the deprecation of the
B<-issuer_checks> option.
=item B<X509_V_ERR_KEYUSAGE_NO_CERTSIGN>
Not used as of OpenSSL 1.1.0 as a result of the deprecation of the
B<-issuer_checks> option.
=item B<X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER>
Unable to get CRL issuer certificate.
=item B<X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION>
Unhandled critical extension.
=item B<X509_V_ERR_KEYUSAGE_NO_CRL_SIGN>
Key usage does not include CRL signing.
=item B<X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION>
Unhandled critical CRL extension.
=item B<X509_V_ERR_INVALID_NON_CA>
Invalid non-CA certificate has CA markings.
=item B<X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED>
Proxy path length constraint exceeded.
=item B<X509_V_ERR_PROXY_SUBJECT_INVALID>
Proxy certificate subject is invalid. It MUST be the same as the issuer
with a single CN component added.
=item B<X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE>
Key usage does not include digital signature.
=item B<X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED>
Proxy certificates not allowed, please use B<-allow_proxy_certs>.
=item B<X509_V_ERR_INVALID_EXTENSION>
Invalid or inconsistent certificate extension.
=item B<X509_V_ERR_INVALID_POLICY_EXTENSION>
Invalid or inconsistent certificate policy extension.
=item B<X509_V_ERR_NO_EXPLICIT_POLICY>
No explicit policy.
=item B<X509_V_ERR_DIFFERENT_CRL_SCOPE>
Different CRL scope.
=item B<X509_V_ERR_UNSUPPORTED_EXTENSION_FEATURE>
Unsupported extension feature.
=item B<X509_V_ERR_UNNESTED_RESOURCE>
RFC 3779 resource not subset of parent's resources.
=item B<X509_V_ERR_PERMITTED_VIOLATION>
Permitted subtree violation.
=item B<X509_V_ERR_EXCLUDED_VIOLATION>
Excluded subtree violation.
=item B<X509_V_ERR_SUBTREE_MINMAX>
Name constraints minimum and maximum not supported.
=item B<X509_V_ERR_APPLICATION_VERIFICATION>
Application verification failure. Unused.
=item B<X509_V_ERR_UNSUPPORTED_CONSTRAINT_TYPE>
Unsupported name constraint type.
=item B<X509_V_ERR_UNSUPPORTED_CONSTRAINT_SYNTAX>
Unsupported or invalid name constraint syntax.
=item B<X509_V_ERR_UNSUPPORTED_NAME_SYNTAX>
Unsupported or invalid name syntax.
=item B<X509_V_ERR_CRL_PATH_VALIDATION_ERROR>
CRL path validation error.
=item B<X509_V_ERR_PATH_LOOP>
Path loop.
=item B<X509_V_ERR_SUITE_B_INVALID_VERSION>
Suite B: certificate version invalid.
=item B<X509_V_ERR_SUITE_B_INVALID_ALGORITHM>
Suite B: invalid public key algorithm.
=item B<X509_V_ERR_SUITE_B_INVALID_CURVE>
Suite B: invalid ECC curve.
=item B<X509_V_ERR_SUITE_B_INVALID_SIGNATURE_ALGORITHM>
Suite B: invalid signature algorithm.
=item B<X509_V_ERR_SUITE_B_LOS_NOT_ALLOWED>
Suite B: curve not allowed for this LOS.
=item B<X509_V_ERR_SUITE_B_CANNOT_SIGN_P_384_WITH_P_256>
Suite B: cannot sign P-384 with P-256.
=item B<X509_V_ERR_HOSTNAME_MISMATCH>
Hostname mismatch.
=item B<X509_V_ERR_EMAIL_MISMATCH>
Email address mismatch.
=item B<X509_V_ERR_IP_ADDRESS_MISMATCH>
IP address mismatch.
=item B<X509_V_ERR_DANE_NO_MATCH>
DANE TLSA authentication is enabled, but no TLSA records matched the
certificate chain.
This error is only possible in L<s_client(1)>.
=back
=head1 BUGS
Although the issuer checks are a considerable improvement over the old technique they still
suffer from limitations in the underlying X509_LOOKUP API. One consequence of this is that
trusted certificates with matching subject name must either appear in a file (as specified by the
B<-CAfile> option) or a directory (as specified by B<-CApath>). If they occur in both then only
the certificates in the file will be recognised.
Previous versions of OpenSSL assume certificates with matching subject name are identical and
mishandled them.
Previous versions of this documentation swapped the meaning of the
B<X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT> and
B<X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY> error codes.
=head1 SEE ALSO
L<x509(1)>
=head1 HISTORY
The B<-show_chain> option was first added to OpenSSL 1.1.0.
The B<-issuer_checks> option is deprecated as of OpenSSL 1.1.0 and
is silently ignored.
=head1 COPYRIGHT
Copyright 2000-2016 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the OpenSSL license (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| GaloisInc/hacrypto | src/C/openssl/openssl-1.1.0b/doc/apps/verify.pod | Perl | bsd-3-clause | 22,066 |
package Moose::Cookbook::Extending::Mooseish_MooseSugar;
# ABSTRACT: Acting like Moose.pm and providing sugar Moose-style
=pod
=head1 NAME
Moose::Cookbook::Extending::Mooseish_MooseSugar - Acting like Moose.pm and providing sugar Moose-style
=head1 VERSION
version 2.0604
=head1 SYNOPSIS
package MyApp::Mooseish;
use Moose::Exporter;
Moose::Exporter->setup_import_methods(
with_meta => ['has_table'],
class_metaroles => {
class => ['MyApp::Meta::Class::Trait::HasTable'],
},
);
sub has_table {
my $meta = shift;
$meta->table(shift);
}
package MyApp::Meta::Class::Trait::HasTable;
use Moose::Role;
has table => (
is => 'rw',
isa => 'Str',
);
=head1 DESCRIPTION
This recipe expands on the use of L<Moose::Exporter> we saw in
L<Moose::Cookbook::Extending::ExtensionOverview> and the class metaclass trait
we saw in L<Moose::Cookbook::Meta::Table_MetaclassTrait>. In this example we
provide our own metaclass trait, and we also export a C<has_table> sugar
function.
The C<with_meta> parameter specifies a list of functions that should
be wrapped before exporting. The wrapper simply ensures that the
importing package's appropriate metaclass object is the first argument
to the function, so we can do C<S<my $meta = shift;>>.
See the L<Moose::Exporter> docs for more details on its API.
=head1 USING MyApp::Mooseish
The purpose of all this code is to provide a Moose-like
interface. Here's what it would look like in actual use:
package MyApp::User;
use namespace::autoclean;
use Moose;
use MyApp::Mooseish;
has_table 'User';
has 'username' => ( is => 'ro' );
has 'password' => ( is => 'ro' );
sub login { ... }
=head1 CONCLUSION
Providing sugar functions can make your extension look much more
Moose-ish. See L<Fey::ORM> for a more extensive example.
=begin testing
{
package MyApp::User;
use Moose;
MyApp::Mooseish->import;
has_table( 'User' );
has( 'username' => ( is => 'ro' ) );
has( 'password' => ( is => 'ro' ) );
sub login { }
}
can_ok( MyApp::User->meta, 'table' );
is( MyApp::User->meta->table, 'User',
'MyApp::User->meta->table returns User' );
ok( MyApp::User->can('username'),
'MyApp::User has username method' );
=end testing
=head1 AUTHOR
Moose is maintained by the Moose Cabal, along with the help of many contributors. See L<Moose/CABAL> and L<Moose/CONTRIBUTORS> for details.
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2012 by Infinity Interactive, Inc..
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
__END__
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/Moose/Cookbook/Extending/Mooseish_MooseSugar.pod | Perl | mit | 2,712 |
package File::Spec::Cygwin;
use strict;
use vars qw(@ISA $VERSION);
require File::Spec::Unix;
$VERSION = '3.39_02';
$VERSION =~ tr/_//;
@ISA = qw(File::Spec::Unix);
=head1 NAME
File::Spec::Cygwin - methods for Cygwin file specs
=head1 SYNOPSIS
require File::Spec::Cygwin; # Done internally by File::Spec if needed
=head1 DESCRIPTION
See L<File::Spec> and L<File::Spec::Unix>. This package overrides the
implementation of these methods, not the semantics.
This module is still in beta. Cygwin-knowledgeable folks are invited
to offer patches and suggestions.
=cut
=pod
=over 4
=item canonpath
Any C<\> (backslashes) are converted to C</> (forward slashes),
and then File::Spec::Unix canonpath() is called on the result.
=cut
sub canonpath {
my($self,$path) = @_;
return unless defined $path;
$path =~ s|\\|/|g;
# Handle network path names beginning with double slash
my $node = '';
if ( $path =~ s@^(//[^/]+)(?:/|\z)@/@s ) {
$node = $1;
}
return $node . $self->SUPER::canonpath($path);
}
sub catdir {
my $self = shift;
return unless @_;
# Don't create something that looks like a //network/path
if ($_[0] and ($_[0] eq '/' or $_[0] eq '\\')) {
shift;
return $self->SUPER::catdir('', @_);
}
$self->SUPER::catdir(@_);
}
=pod
=item file_name_is_absolute
True is returned if the file name begins with C<drive_letter:>,
and if not, File::Spec::Unix file_name_is_absolute() is called.
=cut
sub file_name_is_absolute {
my ($self,$file) = @_;
return 1 if $file =~ m{^([a-z]:)?[\\/]}is; # C:/test
return $self->SUPER::file_name_is_absolute($file);
}
=item tmpdir (override)
Returns a string representation of the first existing directory
from the following list:
$ENV{TMPDIR}
/tmp
$ENV{'TMP'}
$ENV{'TEMP'}
C:/temp
Since Perl 5.8.0, if running under taint mode, and if the environment
variables are tainted, they are not used.
=cut
my $tmpdir;
sub tmpdir {
return $tmpdir if defined $tmpdir;
$tmpdir = $_[0]->_tmpdir( $ENV{TMPDIR}, "/tmp", $ENV{'TMP'}, $ENV{'TEMP'}, 'C:/temp' );
}
=item case_tolerant
Override Unix. Cygwin case-tolerance depends on managed mount settings and
as with MsWin32 on GetVolumeInformation() $ouFsFlags == FS_CASE_SENSITIVE,
indicating the case significance when comparing file specifications.
Default: 1
=cut
sub case_tolerant {
return 1 unless $^O eq 'cygwin'
and defined &Cygwin::mount_flags;
my $drive = shift;
if (! $drive) {
my @flags = split(/,/, Cygwin::mount_flags('/cygwin'));
my $prefix = pop(@flags);
if (! $prefix || $prefix eq 'cygdrive') {
$drive = '/cygdrive/c';
} elsif ($prefix eq '/') {
$drive = '/c';
} else {
$drive = "$prefix/c";
}
}
my $mntopts = Cygwin::mount_flags($drive);
if ($mntopts and ($mntopts =~ /,managed/)) {
return 0;
}
eval { require Win32API::File; } or return 1;
my $osFsType = "\0"x256;
my $osVolName = "\0"x256;
my $ouFsFlags = 0;
Win32API::File::GetVolumeInformation($drive, $osVolName, 256, [], [], $ouFsFlags, $osFsType, 256 );
if ($ouFsFlags & Win32API::File::FS_CASE_SENSITIVE()) { return 0; }
else { return 1; }
}
=back
=head1 COPYRIGHT
Copyright (c) 2004,2007 by the Perl 5 Porters. All rights reserved.
This program is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
1;
| leighpauls/k2cro4 | third_party/perl/perl/lib/File/Spec/Cygwin.pm | Perl | bsd-3-clause | 3,456 |
package i3test;
# vim:ts=4:sw=4:expandtab
use strict; use warnings;
use File::Temp qw(tmpnam tempfile tempdir);
use Test::Builder;
use X11::XCB::Rect;
use X11::XCB::Window;
use X11::XCB qw(:all);
use AnyEvent::I3;
use List::Util qw(first);
use Time::HiRes qw(sleep);
use Cwd qw(abs_path);
use Scalar::Util qw(blessed);
use SocketActivation;
use v5.10;
# preload
use Test::More ();
use Data::Dumper ();
use Exporter ();
our @EXPORT = qw(
get_workspace_names
get_unused_workspace
fresh_workspace
get_ws_content
get_ws
get_focused
open_empty_con
open_window
open_floating_window
get_dock_clients
cmd
sync_with_i3
exit_gracefully
workspace_exists
focused_ws
get_socket_path
launch_with_config
wait_for_event
wait_for_map
wait_for_unmap
$x
);
=head1 NAME
i3test - Testcase setup module
=encoding utf-8
=head1 SYNOPSIS
use i3test;
my $ws = fresh_workspace;
is_num_children($ws, 0, 'no containers on this workspace yet');
cmd 'open';
is_num_children($ws, 1, 'one container after "open"');
done_testing;
=head1 DESCRIPTION
This module is used in every i3 testcase and takes care of automatically
starting i3 before any test instructions run. It also saves you typing of lots
of boilerplate in every test file.
i3test automatically "use"s C<Test::More>, C<Data::Dumper>, C<AnyEvent::I3>,
C<Time::HiRes>’s C<sleep> and C<i3test::Test> so that all of them are available
to you in your testcase.
See also C<i3test::Test> (L<http://build.i3wm.org/docs/lib-i3test-test.html>)
which provides additional test instructions (like C<ok> or C<is>).
=cut
my $tester = Test::Builder->new();
my $_cached_socket_path = undef;
my $_sync_window = undef;
my $tmp_socket_path = undef;
our $x;
BEGIN {
my $window_count = 0;
sub counter_window {
return $window_count++;
}
}
my $i3_pid;
my $i3_autostart;
END {
# testcases which start i3 manually should always call exit_gracefully
# on their own. Let’s see, whether they really did.
if (! $i3_autostart) {
return unless $i3_pid;
$tester->ok(undef, 'testcase called exit_gracefully()');
}
# don't trigger SIGCHLD handler
local $SIG{CHLD};
# From perldoc -v '$?':
# Inside an "END" subroutine $? contains the value
# that is going to be given to "exit()".
#
# Since waitpid sets $?, we need to localize it,
# otherwise TAP would be misinterpreted our return status
local $?;
# When measuring code coverage, try to exit i3 cleanly (otherwise, .gcda
# files are not written)
if ($ENV{COVERAGE} || $ENV{VALGRIND}) {
exit_gracefully($i3_pid, "/tmp/nested-$ENV{DISPLAY}");
} else {
kill(9, $i3_pid)
or $tester->BAIL_OUT("could not kill i3");
waitpid $i3_pid, 0;
}
}
sub import {
my ($class, %args) = @_;
my $pkg = caller;
$i3_autostart = delete($args{i3_autostart}) // 1;
my $cv = launch_with_config('-default', dont_block => 1)
if $i3_autostart;
my $test_more_args = '';
$test_more_args = join(' ', 'qw(', %args, ')') if keys %args;
local $@;
eval << "__";
package $pkg;
use Test::More $test_more_args;
use Data::Dumper;
use AnyEvent::I3;
use Time::HiRes qw(sleep);
use i3test::Test;
__
$tester->BAIL_OUT("$@") if $@;
feature->import(":5.10");
strict->import;
warnings->import;
$x ||= i3test::X11->new;
# set the pointer to a predictable position in case a previous test has
# disturbed it
$x->root->warp_pointer(0, 0);
$cv->recv if $i3_autostart;
@_ = ($class);
goto \&Exporter::import;
}
=head1 EXPORT
=head2 wait_for_event($timeout, $callback)
Waits for the next event and calls the given callback for every event to
determine if this is the event we are waiting for.
Can be used to wait until a window is mapped, until a ClientMessage is
received, etc.
wait_for_event 0.25, sub { $_[0]->{response_type} == MAP_NOTIFY };
=cut
sub wait_for_event {
my ($timeout, $cb) = @_;
my $cv = AE::cv;
$x->flush;
# unfortunately, there is no constant for this
my $ae_read = 0;
my $guard = AE::io $x->get_file_descriptor, $ae_read, sub {
while (defined(my $event = $x->poll_for_event)) {
if ($cb->($event)) {
$cv->send(1);
last;
}
}
};
# Trigger timeout after $timeout seconds (can be fractional)
my $t = AE::timer $timeout, 0, sub { warn "timeout ($timeout secs)"; $cv->send(0) };
my $result = $cv->recv;
undef $t;
undef $guard;
return $result;
}
=head2 wait_for_map($window)
Thin wrapper around wait_for_event which waits for MAP_NOTIFY.
Make sure to include 'structure_notify' in the window’s event_mask attribute.
This function is called by C<open_window>, so in most cases, you don’t need to
call it on your own. If you need special setup of the window before mapping,
you might have to map it on your own and use this function:
my $window = open_window(dont_map => 1);
# Do something special with the window first
# …
# Now map it and wait until it’s been mapped
$window->map;
wait_for_map($window);
=cut
sub wait_for_map {
my ($win) = @_;
my $id = (blessed($win) && $win->isa('X11::XCB::Window')) ? $win->id : $win;
wait_for_event 4, sub {
$_[0]->{response_type} == MAP_NOTIFY and $_[0]->{window} == $id
};
}
=head2 wait_for_unmap($window)
Wrapper around C<wait_for_event> which waits for UNMAP_NOTIFY. Also calls
C<sync_with_i3> to make sure i3 also picked up and processed the UnmapNotify
event.
my $ws = fresh_workspace;
my $window = open_window;
is_num_children($ws, 1, 'one window on workspace');
$window->unmap;
wait_for_unmap;
is_num_children($ws, 0, 'no more windows on this workspace');
=cut
sub wait_for_unmap {
my ($win) = @_;
# my $id = (blessed($win) && $win->isa('X11::XCB::Window')) ? $win->id : $win;
wait_for_event 4, sub {
$_[0]->{response_type} == UNMAP_NOTIFY # and $_[0]->{window} == $id
};
sync_with_i3();
}
=head2 open_window([ $args ])
Opens a new window (see C<X11::XCB::Window>), maps it, waits until it got mapped
and synchronizes with i3.
The following arguments can be passed:
=over 4
=item class
The X11 window class (e.g. WINDOW_CLASS_INPUT_OUTPUT), not to be confused with
the WM_CLASS!
=item rect
An arrayref with 4 members specifying the initial geometry (position and size)
of the window, e.g. C<< [ 0, 100, 70, 50 ] >> for a window appearing at x=0, y=100
with width=70 and height=50.
Note that this is entirely irrelevant for tiling windows.
=item background_color
The background pixel color of the window, formatted as "#rrggbb", like HTML
color codes (e.g. #c0c0c0). This is useful to tell windows apart when actually
watching the testcases.
=item event_mask
An arrayref containing strings which describe the X11 event mask we use for that
window. The default is C<< [ 'structure_notify' ] >>.
=item name
The window’s C<_NET_WM_NAME> (UTF-8 window title). By default, this is "Window
n" with n being replaced by a counter to keep windows apart.
=item dont_map
Set to a true value to avoid mapping the window (making it visible).
=item before_map
A coderef which is called before the window is mapped (unless C<dont_map> is
true). The freshly created C<$window> is passed as C<$_> and as the first
argument.
=back
The default values are equivalent to this call:
open_window(
class => WINDOW_CLASS_INPUT_OUTPUT
rect => [ 0, 0, 30, 30 ]
background_color => '#c0c0c0'
event_mask => [ 'structure_notify' ]
name => 'Window <n>'
);
Usually, though, calls are simpler:
my $top_window = open_window;
To identify the resulting window object in i3 commands, use the id property:
my $top_window = open_window;
cmd '[id="' . $top_window->id . '"] kill';
=cut
sub open_window {
my %args = @_ == 1 ? %{$_[0]} : @_;
my $dont_map = delete $args{dont_map};
my $before_map = delete $args{before_map};
$args{class} //= WINDOW_CLASS_INPUT_OUTPUT;
$args{rect} //= [ 0, 0, 30, 30 ];
$args{background_color} //= '#c0c0c0';
$args{event_mask} //= [ 'structure_notify' ];
$args{name} //= 'Window ' . counter_window();
my $window = $x->root->create_child(%args);
$window->add_hint('input');
if ($before_map) {
# TODO: investigate why _create is not needed
$window->_create;
$before_map->($window) for $window;
}
return $window if $dont_map;
$window->map;
wait_for_map($window);
return $window;
}
=head2 open_floating_window([ $args ])
Thin wrapper around open_window which sets window_type to
C<_NET_WM_WINDOW_TYPE_UTILITY> to make the window floating.
The arguments are the same as those of C<open_window>.
=cut
sub open_floating_window {
my %args = @_ == 1 ? %{$_[0]} : @_;
$args{window_type} = $x->atom(name => '_NET_WM_WINDOW_TYPE_UTILITY');
return open_window(\%args);
}
sub open_empty_con {
my ($i3) = @_;
my $reply = $i3->command('open')->recv;
return $reply->[0]->{id};
}
=head2 get_workspace_names()
Returns an arrayref containing the name of every workspace (regardless of its
output) which currently exists.
my $workspace_names = get_workspace_names;
is(scalar @$workspace_names, 3, 'three workspaces exist currently');
=cut
sub get_workspace_names {
my $i3 = i3(get_socket_path());
my $tree = $i3->get_tree->recv;
my @outputs = @{$tree->{nodes}};
my @cons;
for my $output (@outputs) {
next if $output->{name} eq '__i3';
# get the first CT_CON of each output
my $content = first { $_->{type} eq 'con' } @{$output->{nodes}};
@cons = (@cons, @{$content->{nodes}});
}
[ map { $_->{name} } @cons ]
}
=head2 get_unused_workspace
Returns a workspace name which has not yet been used. See also
C<fresh_workspace> which directly switches to an unused workspace.
my $ws = get_unused_workspace;
cmd "workspace $ws";
=cut
sub get_unused_workspace {
my @names = get_workspace_names();
my $tmp;
do { $tmp = tmpnam() } while ((scalar grep { $_ eq $tmp } @names) > 0);
$tmp
}
=head2 fresh_workspace([ $args ])
Switches to an unused workspace and returns the name of that workspace.
Optionally switches to the specified output first.
my $ws = fresh_workspace;
# Get a fresh workspace on the second output.
my $ws = fresh_workspace(output => 1);
=cut
sub fresh_workspace {
my %args = @_;
if (exists($args{output})) {
my $i3 = i3(get_socket_path());
my $tree = $i3->get_tree->recv;
my $output = first { $_->{name} eq "fake-$args{output}" }
@{$tree->{nodes}};
die "BUG: Could not find output $args{output}" unless defined($output);
# Get the focused workspace on that output and switch to it.
my $content = first { $_->{type} eq 'con' } @{$output->{nodes}};
my $focused = $content->{focus}->[0];
my $workspace = first { $_->{id} == $focused } @{$content->{nodes}};
$workspace = $workspace->{name};
cmd("workspace $workspace");
}
my $unused = get_unused_workspace;
cmd("workspace $unused");
$unused
}
=head2 get_ws($workspace)
Returns the container (from the i3 layout tree) which represents C<$workspace>.
my $ws = fresh_workspace;
my $ws_con = get_ws($ws);
ok(!$ws_con->{urgent}, 'fresh workspace not marked urgent');
Here is an example which counts the number of urgent containers recursively,
starting from the workspace container:
sub count_urgent {
my ($con) = @_;
my @children = (@{$con->{nodes}}, @{$con->{floating_nodes}});
my $urgent = grep { $_->{urgent} } @children;
$urgent += count_urgent($_) for @children;
return $urgent;
}
my $urgent = count_urgent(get_ws($ws));
is($urgent, 3, "three urgent windows on workspace $ws");
=cut
sub get_ws {
my ($name) = @_;
my $i3 = i3(get_socket_path());
my $tree = $i3->get_tree->recv;
my @outputs = @{$tree->{nodes}};
my @workspaces;
for my $output (@outputs) {
# get the first CT_CON of each output
my $content = first { $_->{type} eq 'con' } @{$output->{nodes}};
@workspaces = (@workspaces, @{$content->{nodes}});
}
# as there can only be one workspace with this name, we can safely
# return the first entry
return first { $_->{name} eq $name } @workspaces;
}
=head2 get_ws_content($workspace)
Returns the content (== tree, starting from the node of a workspace)
of a workspace. If called in array context, also includes the focus
stack of the workspace.
my $nodes = get_ws_content($ws);
is(scalar @$nodes, 4, 'there are four containers at workspace-level');
Or, in array context:
my $window = open_window;
my ($nodes, $focus) = get_ws_content($ws);
is($focus->[0], $window->id, 'newly opened window focused');
Note that this function does not do recursion for you! It only returns the
containers B<on workspace level>. If you want to work with all containers (even
nested ones) on a workspace, you have to use recursion:
# NB: This function does not count floating windows
sub count_urgent {
my ($nodes) = @_;
my $urgent = 0;
for my $con (@$nodes) {
$urgent++ if $con->{urgent};
$urgent += count_urgent($con->{nodes});
}
return $urgent;
}
my $nodes = get_ws_content($ws);
my $urgent = count_urgent($nodes);
is($urgent, 3, "three urgent windows on workspace $ws");
If you also want to deal with floating windows, you have to use C<get_ws>
instead and access C<< ->{nodes} >> and C<< ->{floating_nodes} >> on your own.
=cut
sub get_ws_content {
my ($name) = @_;
my $con = get_ws($name);
return wantarray ? ($con->{nodes}, $con->{focus}) : $con->{nodes};
}
=head2 get_focused($workspace)
Returns the container ID of the currently focused container on C<$workspace>.
Note that the container ID is B<not> the X11 window ID, so comparing the result
of C<get_focused> with a window's C<< ->{id} >> property does B<not> work.
my $ws = fresh_workspace;
my $first_window = open_window;
my $first_id = get_focused();
my $second_window = open_window;
my $second_id = get_focused();
cmd 'focus left';
is(get_focused($ws), $first_id, 'second window focused');
=cut
sub get_focused {
my ($ws) = @_;
my $con = get_ws($ws);
my @focused = @{$con->{focus}};
my $lf;
while (@focused > 0) {
$lf = $focused[0];
last unless defined($con->{focus});
@focused = @{$con->{focus}};
my @cons = grep { $_->{id} == $lf } (@{$con->{nodes}}, @{$con->{'floating_nodes'}});
$con = $cons[0];
}
return $lf;
}
=head2 get_dock_clients([ $dockarea ])
Returns an array of all dock containers in C<$dockarea> (one of "top" or
"bottom"). If C<$dockarea> is not specified, returns an array of all dock
containers in any dockarea.
my @docked = get_dock_clients;
is(scalar @docked, 0, 'no dock clients yet');
=cut
sub get_dock_clients {
my $which = shift;
my $tree = i3(get_socket_path())->get_tree->recv;
my @outputs = @{$tree->{nodes}};
# Children of all dockareas
my @docked;
for my $output (@outputs) {
if (!defined($which)) {
@docked = (@docked, map { @{$_->{nodes}} }
grep { $_->{type} eq 'dockarea' }
@{$output->{nodes}});
} elsif ($which eq 'top') {
my $first = first { $_->{type} eq 'dockarea' } @{$output->{nodes}};
@docked = (@docked, @{$first->{nodes}}) if defined($first);
} elsif ($which eq 'bottom') {
my @matching = grep { $_->{type} eq 'dockarea' } @{$output->{nodes}};
my $last = $matching[-1];
@docked = (@docked, @{$last->{nodes}}) if defined($last);
}
}
return @docked;
}
=head2 cmd($command)
Sends the specified command to i3 and returns the output.
my $ws = unused_workspace;
cmd "workspace $ws";
cmd 'focus right';
=cut
sub cmd {
i3(get_socket_path())->command(@_)->recv
}
=head2 workspace_exists($workspace)
Returns true if C<$workspace> is the name of an existing workspace.
my $old_ws = focused_ws;
# switch away from where we currently are
fresh_workspace;
ok(workspace_exists($old_ws), 'old workspace still exists');
=cut
sub workspace_exists {
my ($name) = @_;
(scalar grep { $_ eq $name } @{get_workspace_names()}) > 0;
}
=head2 focused_ws
Returns the name of the currently focused workspace.
my $ws = focused_ws;
is($ws, '1', 'i3 starts on workspace 1');
=cut
sub focused_ws {
my $i3 = i3(get_socket_path());
my $tree = $i3->get_tree->recv;
my $focused = $tree->{focus}->[0];
my $output = first { $_->{id} == $focused } @{$tree->{nodes}};
my $content = first { $_->{type} eq 'con' } @{$output->{nodes}};
my $first = first { $_->{fullscreen_mode} == 1 } @{$content->{nodes}};
return $first->{name}
}
=head2 sync_with_i3([ $args ])
Sends an I3_SYNC ClientMessage with a random value to the root window.
i3 will reply with the same value, but, due to the order of events it
processes, only after all other events are done.
This can be used to ensure the results of a cmd 'focus left' are pushed to
X11 and that C<< $x->input_focus >> returns the correct value afterwards.
See also L<http://build.i3wm.org/docs/testsuite.html> for a longer explanation.
my $window = open_window;
$window->add_hint('urgency');
# Ensure i3 picked up the change
sync_with_i3;
The only time when you need to use the C<no_cache> argument is when you just
killed your own X11 connection:
cmd 'kill client';
# We need to re-establish the X11 connection which we just killed :).
$x = i3test::X11->new;
sync_with_i3(no_cache => 1);
=cut
sub sync_with_i3 {
my %args = @_ == 1 ? %{$_[0]} : @_;
# Since we need a (mapped) window for receiving a ClientMessage, we create
# one on the first call of sync_with_i3. It will be re-used in all
# subsequent calls.
if (!exists($args{window_id}) &&
(!defined($_sync_window) || exists($args{no_cache}))) {
$_sync_window = open_window(
rect => [ -15, -15, 10, 10 ],
override_redirect => 1,
);
}
my $window_id = delete $args{window_id};
$window_id //= $_sync_window->id;
my $root = $x->get_root_window();
# Generate a random number to identify this particular ClientMessage.
my $myrnd = int(rand(255)) + 1;
# Generate a ClientMessage, see xcb_client_message_t
my $msg = pack "CCSLLLLLLL",
CLIENT_MESSAGE, # response_type
32, # format
0, # sequence
$root, # destination window
$x->atom(name => 'I3_SYNC')->id,
$window_id, # data[0]: our own window id
$myrnd, # data[1]: a random value to identify the request
0,
0,
0;
# Send it to the root window -- since i3 uses the SubstructureRedirect
# event mask, it will get the ClientMessage.
$x->send_event(0, $root, EVENT_MASK_SUBSTRUCTURE_REDIRECT, $msg);
return $myrnd if $args{dont_wait_for_event};
# now wait until the reply is here
return wait_for_event 4, sub {
my ($event) = @_;
# TODO: const
return 0 unless $event->{response_type} == 161;
my ($win, $rnd) = unpack "LL", $event->{data};
return ($rnd == $myrnd);
};
}
=head2 exit_gracefully($pid, [ $socketpath ])
Tries to exit i3 gracefully (with the 'exit' cmd) or kills the PID if that fails.
If C<$socketpath> is not specified, C<get_socket_path()> will be called.
You only need to use this function if you have launched i3 on your own with
C<launch_with_config>. Otherwise, it will be automatically called when the
testcase ends.
use i3test i3_autostart => 0;
my $pid = launch_with_config($config);
# …
exit_gracefully($pid);
=cut
sub exit_gracefully {
my ($pid, $socketpath) = @_;
$socketpath ||= get_socket_path();
my $exited = 0;
eval {
say "Exiting i3 cleanly...";
i3($socketpath)->command('exit')->recv;
$exited = 1;
};
if (!$exited) {
kill(9, $pid)
or $tester->BAIL_OUT("could not kill i3");
}
if ($socketpath =~ m,^/tmp/i3-test-socket-,) {
unlink($socketpath);
}
waitpid $pid, 0;
undef $i3_pid;
}
=head2 get_socket_path([ $cache ])
Gets the socket path from the C<I3_SOCKET_PATH> atom stored on the X11 root
window. After the first call, this function will return a cached version of the
socket path unless you specify a false value for C<$cache>.
my $i3 = i3(get_socket_path());
$i3->command('nop test example')->recv;
To avoid caching:
my $i3 = i3(get_socket_path(0));
=cut
sub get_socket_path {
my ($cache) = @_;
$cache //= 1;
if ($cache && defined($_cached_socket_path)) {
return $_cached_socket_path;
}
my $atom = $x->atom(name => 'I3_SOCKET_PATH');
my $cookie = $x->get_property(0, $x->get_root_window(), $atom->id, GET_PROPERTY_TYPE_ANY, 0, 256);
my $reply = $x->get_property_reply($cookie->{sequence});
my $socketpath = $reply->{value};
if ($socketpath eq "/tmp/nested-$ENV{DISPLAY}") {
$socketpath .= '-activation';
}
$_cached_socket_path = $socketpath;
return $socketpath;
}
=head2 launch_with_config($config, [ $args ])
Launches a new i3 process with C<$config> as configuration file. Useful for
tests which test specific config file directives.
use i3test i3_autostart => 0;
my $config = <<EOT;
# i3 config file (v4)
for_window [class="borderless"] border none
for_window [title="special borderless title"] border none
EOT
my $pid = launch_with_config($config);
# …
exit_gracefully($pid);
=cut
sub launch_with_config {
my ($config, %args) = @_;
$tmp_socket_path = "/tmp/nested-$ENV{DISPLAY}";
$args{dont_create_temp_dir} //= 0;
my ($fh, $tmpfile) = tempfile("i3-cfg-for-$ENV{TESTNAME}-XXXXX", UNLINK => 1);
if ($config ne '-default') {
say $fh $config;
} else {
open(my $conf_fh, '<', './i3-test.config')
or $tester->BAIL_OUT("could not open default config: $!");
local $/;
say $fh scalar <$conf_fh>;
}
say $fh "ipc-socket $tmp_socket_path"
unless $args{dont_add_socket_path};
close($fh);
my $cv = AnyEvent->condvar;
$i3_pid = activate_i3(
unix_socket_path => "$tmp_socket_path-activation",
display => $ENV{DISPLAY},
configfile => $tmpfile,
outdir => $ENV{OUTDIR},
testname => $ENV{TESTNAME},
valgrind => $ENV{VALGRIND},
strace => $ENV{STRACE},
xtrace => $ENV{XTRACE},
restart => $ENV{RESTART},
cv => $cv,
dont_create_temp_dir => $args{dont_create_temp_dir},
);
# force update of the cached socket path in lib/i3test
# as soon as i3 has started
$cv->cb(sub { get_socket_path(0) });
return $cv if $args{dont_block};
# blockingly wait until i3 is ready
$cv->recv;
return $i3_pid;
}
=head1 AUTHOR
Michael Stapelberg <michael@i3wm.org>
=cut
package i3test::X11;
use parent 'X11::XCB::Connection';
sub input_focus {
my $self = shift;
i3test::sync_with_i3();
return $self->SUPER::input_focus(@_);
}
1
| FauxFaux/i3 | testcases/lib/i3test.pm | Perl | bsd-3-clause | 23,551 |
#!/usr/bin/perl
use IO::Socket::INET;
my %items = {};
$items{'key'} = $ARGV[1];
$items{'address'} = $ARGV[2];
my $uwsgi_pkt = '';
foreach(keys %items) {
$uwsgi_pkt .= pack('v', length($_)).$_.pack('v', length($items{$_})).$items{$_};
}
my $udp = new IO::Socket::INET(PeerAddr => $ARGV[0], Proto => 'udp');
$udp->send(pack('CvC', 224, length($uwsgi_pkt), 0).$uwsgi_pkt);
print ''.(length($uwsgi_pkt)+4).' bytes sent to '.$ARGV[0]."\n";
| jyotikamboj/container | uw-contrib/subscribe.pl | Perl | mit | 445 |
#!/usr/bin/perl
# perl -MCPAN -e shell
# install Encode::Escape;
#generate file
#grep -o -E '\[("p")[^\]]*\](.*?)\]' testIN.txt > out.txt
#grep -o -E '(\["p"[^]]*](.*?)])' inl2.txt > outl2.txt
#test 5 records
#grep -o -E '(\["p"[^]]*](.*?)])' inl2.txt | head -n5 | grep . > out.txt
use lib '/opt/local/lib/perl5/site_perl/5.16.1';
use utf8;
use Math::Trig;
# use strict;
# use warnings;
use JSON;
# use Unicode::Escape;
# use Encode::Escape;
# use Encode::Escape::Unicode;
# use URI::Escape;
use Encode qw( decode );
# use URI::Escape;
no warnings 'experimental::smartmatch';
use Data::Dumper;
# binmode(STDOUT,':utf8');
binmode STDIN, ":encoding(UTF-8)";
binmode STDOUT, ":encoding(utf8)";
sub createGPX(@);
# system("clear");
############################
#50.453426, 30.478263 - off
# DEFAULTS
$myLocationLAT = 50.453426;
$myLocationLONG = 30.478263;
# $myLocationLAT = 50.454069;
# $myLocationLONG = 30.474492;
# $myLocationLAT = 50.452441;
# $myLocationLONG = 30.479698;
$allowRadius = 50000; #meters
############################
$radiusEarthKilometres=6372797.560856;
$distanceMetres = 40;
# $angle = 45;
# $initialBearingRadians = deg2rad($angle);
# $startLatRad = deg2rad($myLocationLAT);
# $startLonRad = deg2rad($myLocationLONG);
# $distRatio = $distanceMetres / $radiusEarthKilometres;
# $distRatioSine = sin($distRatio);
# $distRatioCosine = cos($distRatio);
# $startLatCos = cos($startLatRad);
# $startLatSin = sin($startLatRad);
# $endLatRads = asin(($startLatSin * $distRatioCosine) + ($startLatCos * $distRatioSine * cos($initialBearingRadians)));
# $endLonRads = $startLonRad + atan2(
# sin($initialBearingRadians) * $distRatioSine * $startLatCos,
# $distRatioCosine - $startLatSin * sin($endLatRads));
# printf "%f,%f\n",rad2deg($endLatRads),rad2deg($endLonRads);
# die "terminate";
print "############################################\n";
use I18N::Langinfo qw(langinfo CODESET);
my $codeset = langinfo(CODESET);
use Encode qw(decode);
@ARGV = map { decode $codeset, $_ } @ARGV;
my $key = undef;
my $filter = undef;
foreach my $arg (@ARGV) {
if ( $key && ($arg ~~ ['-c', '-r', '-f']) ) {
die "Invalid arguments\n -c <lat,long>\n -r <radius in meters>\n -f <filter>\n";
}
elsif ( !$key && !($arg ~~ ['-c', '-r', '-f']) ) {
die "Undefined argument:\n $arg";
}
if ( $key && $key eq '-c') {
@values = split(',', $arg);
if (@values < 2) {
die "Invalid coordinates:\n -c $arg";
}
($var1,$var2) = @values;
$myLocationLAT = $var1;
$myLocationLONG = $var2;
$key = undef;
}
elsif ($key && $key eq '-r') {
$param = int($arg);
$key = undef;
$allowRadius = $param;
}
elsif ($key && $key eq '-f') {
$filter = lc($arg);
$key = undef;
}
if ($arg eq '-c') {
$key = "-c";
}
elsif ($arg eq '-r') {
$key = "-r";
}
elsif ($arg eq '-f') {
$key = "-f";
}
}
# print $filter;
# die;
unlink glob "./gpx/*.*";
$json = JSON->new->allow_nonref;
my $file = 'outl2.txt';
open my $info, $file or die "Could not open $file: $!";
while( my $line = <$info>) {
my $str = "{\"a\":$line}";
$test= $json->decode($str);
# print $test->{"a"}[2] ;
# @elements = $test->{"a"};
$lat = $test->{"a"}[2] / 1000000;
$long = $test->{"a"}[3] / 1000000;
my $str = $test->{"a"}[8];
# $str =~s/[,]//ig;
$title = $str;
# $title = $json->decode($str);
# $title = $json->decode($elements[8]);
# $json_item = $json->decode($line);
# Dumper($json_item[0]);
# print $json_item[3];
# $lat = $json_item[2] / 1000000;
# $long = $json_item[3] / 1000000;
# $title = $json_item[8];
$title =~s/[^\w\dа-я\s]+/_/ig;
# print $title, $lat , $long;
# die;
my $a = sin( deg2rad($lat - $myLocationLAT)*0.5) ** 2;
my $b = sin( deg2rad($long - $myLocationLONG)*0.5) ** 2;
my $h = $a + cos(deg2rad($lat)) * cos(deg2rad($myLocationLAT)) * $b;
my $theta = 2 * asin(sqrt($h)) * 6372797.560856;
if ($filter && (index(lc($title), $filter) < 0)) {
next;
}
if ($allowRadius>0 && $theta > $allowRadius) {
next;
}
print "$title === $theta \n";
createGPX($title,$lat,$long);
for (my $i = 0; $i<8 ;$i++) {
#$alfa = 45*$i;
$angle=45*$i;# $alfa+int(rand(45));
# $nfname= $title."-".$rand."_".$i;
$nfname= $title."-".$angle;
$initialBearingRadians = deg2rad($angle);
$startLatRad = deg2rad($lat);
$startLonRad = deg2rad($long);
$distRatio = $distanceMetres / $radiusEarthKilometres;
$distRatioSine = sin($distRatio);
$distRatioCosine = cos($distRatio);
$startLatCos = cos($startLatRad);
$startLatSin = sin($startLatRad);
$endLatRads = asin(($startLatSin * $distRatioCosine) + ($startLatCos * $distRatioSine * cos($initialBearingRadians)));
$endLonRads = $startLonRad + atan2(
sin($initialBearingRadians) * $distRatioSine * $startLatCos,
$distRatioCosine - $startLatSin * sin($endLatRads));
# printf "%f,%f\n %d %f,%f\n",$lat,$long ,$angle, rad2deg($endLatRads),rad2deg($endLonRads);
createGPX($nfname,rad2deg($endLatRads),rad2deg($endLonRads));
}
# print $lat.",".$long." ".$bx.",".$by."\n";
# print Dumper($bx, $by);
# print $title;
}
close $info;
print "############################################\n";
sub createGPX(@)
{
my $filename = $_[0];
my $lat = $_[1];
my $lon = $_[2];
my $heredoc =<<"END_HERE_1";
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<gpx version="1.1" creator="Xcode">
<wpt lat="$lat" lon="$lon">
<time>2014-11-07T09:12:05Z</time>
<name>$filename</name>
</wpt>
</gpx>
END_HERE_1
open(my $fh, '>:encoding(UTF-8)', "./gpx/$filename.gpx") or die "Couldn't create file '$filename'";
print $fh $heredoc;
close($fh);
}
| andriitishchenko/ingressgeo | gpxScript/start-v2.pl | Perl | mit | 5,947 |
package NetSNMP::TrapReceiver;
use 5.00006;
use strict;
use Carp;
require Exporter;
require DynaLoader;
use AutoLoader;
use vars qw($VERSION @ISA @EXPORT @EXPORT_OK %EXPORT_TAGS $AUTOLOAD);
@ISA = qw(Exporter
DynaLoader);
require NetSNMP::OID;
# Items to export into callers namespace by default. Note: do not export
# names by default without a very good reason. Use EXPORT_OK instead.
# Do not simply export all your public functions/methods/constants.
# This allows declaration use NetSNMP::TrapReceiver ':all';
# If you do not need this, moving things directly into @EXPORT or @EXPORT_OK
# will save memory.
%EXPORT_TAGS = ( 'all' => [ qw(
NETSNMPTRAPD_AUTH_HANDLER
NETSNMPTRAPD_HANDLER_BREAK
NETSNMPTRAPD_HANDLER_FAIL
NETSNMPTRAPD_HANDLER_FINISH
NETSNMPTRAPD_HANDLER_OK
NETSNMPTRAPD_POST_HANDLER
NETSNMPTRAPD_PRE_HANDLER
netsnmp_add_default_traphandler
netsnmp_add_global_traphandler
netsnmp_add_traphandler
) ] );
@EXPORT_OK = ( @{ $EXPORT_TAGS{'all'} } );
@EXPORT = qw(
NETSNMPTRAPD_AUTH_HANDLER
NETSNMPTRAPD_HANDLER_BREAK
NETSNMPTRAPD_HANDLER_FAIL
NETSNMPTRAPD_HANDLER_FINISH
NETSNMPTRAPD_HANDLER_OK
NETSNMPTRAPD_POST_HANDLER
NETSNMPTRAPD_PRE_HANDLER
);
$VERSION = '5.0405';
# sub new {
# my $type = shift;
# my ($self);
# %$self = @_;
# bless($self, $type);
# return $self;
# }
# sub register($$$$) {
# my ($self, $oid, $sub) = @_;
# my $reg = NetSNMP::TrapReceiver::registration::new($oid, $sub);
# if ($reg) {
# $reg->register();
# $self->{'regobjs'}{$name} = $reg;
# }
# return $reg;
# }
sub AUTOLOAD {
# This AUTOLOAD is used to 'autoload' constants from the constant()
# XS function.
my $constname;
($constname = $AUTOLOAD) =~ s/.*:://;
croak "&NetSNMP::TrapReceiver::constant not defined" if $constname eq 'constant';
my ($error, $val) = constant($constname);
if ($error) { croak $error; }
{
no strict 'refs';
# Fixed between 5.005_53 and 5.005_61
#XXX if ($] >= 5.00561) {
#XXX *$AUTOLOAD = sub () { $val };
#XXX }
#XXX else {
*$AUTOLOAD = sub { $val };
#XXX }
}
goto &$AUTOLOAD;
}
bootstrap NetSNMP::TrapReceiver $VERSION;
# Preloaded methods go here.
# Autoload methods go after =cut, and are processed by the autosplit program.
1;
__END__
=head1 NAME
NetSNMP::TrapReceiver - Embedded perl trap handling for Net-SNMP's snmptrapd
=head1 SYNOPSIS
Put the following lines in your snmptrapd.conf file:
perl NetSNMP::TrapReceiver::register("trapOID", \&myfunc);
=head1 ABSTRACT
The NetSNMP::TrapReceiver module is used to register perl
subroutines into the Net-SNMP snmptrapd process. Net-SNMP MUST have
been configured using --enable-embedded-perl. Registration of
functions is then done through the snmptrapd.conf configuration
file. This module can NOT be used in a normal perl script to
receive traps. It is intended solely for embedded use within the
snmptrapd demon.
=head1 DESCRIPTION
Within the snmptrapd.conf file, the keyword "perl" may be used to call
any perl expression and using this ability, you can use the
NetSNMP::TrapReceiver module to register functions which will be
called every time a given notification (a trap or an inform) is
received. Registered functions are called with 2 arguments. The
first is a reference to a hash containing information about how the
trap was received (what version of the SNMP protocol was used, where
it came from, what SNMP user name or community name it was sent under,
etc). The second argument is a reference to an array containing the
variable bindings (OID and value information) that define the
noification itself. Each variable is itself a reference to an array
containing three values: a NetSNMP::OID object, the value that came
associated with it, and the value's numeric type (see NetSNMP::ASN for
further details on SNMP typing information).
Registered functions should return one of the following values:
=over 2
=item NETSNMPTRAPD_HANDLER_OK
Handling the trap succeeded, but lets the snmptrapd demon check for
further appropriate handlers.
=item NETSNMPTRAPD_HANDLER_FAIL
Handling the trap failed, but lets the snmptrapd demon check for
further appropriate handlers.
=item NETSNMPTRAPD_HANDLER_BREAK
Stops evaluating the list of handlers for this specific trap, but lets
the snmptrapd demon apply global handlers.
=item NETSNMPTRAPD_HANDLER_FINISH
Stops searching for further appropriate handlers.
=back
If a handler function does not return anything appropriate or even
nothing at all, a return value of NETSNMPTRAPD_HANDLER_OK is assumed.
Subroutines are registered using the NetSNMP::TrapReceiver::register
function, which takes two arguments. The first is a string describing
the notification you want to register for (such as "linkUp" or
"MyMIB::MyTrap" or ".1.3.6.1.4.1.2021...."). Two special keywords can
be used in place of an OID: "default" and "all". The "default"
keyword indicates you want your handler to be called in the case where
no other handlers are called. The "all" keyword indicates that the
handler should ALWAYS be called for every notification.
=head1 EXAMPLE
As an example, put the following code into a file (say
"/usr/local/share/snmp/mytrapd.pl"):
#!/usr/bin/perl
sub my_receiver {
print "********** PERL RECEIVED A NOTIFICATION:\n";
# print the PDU info (a hash reference)
print "PDU INFO:\n";
foreach my $k(keys(%{$_[0]})) {
if ($k eq "securityEngineID" || $k eq "contextEngineID") {
printf " %-30s 0x%s\n", $k, unpack('h*', $_[0]{$k});
}
else {
printf " %-30s %s\n", $k, $_[0]{$k};
}
}
# print the variable bindings:
print "VARBINDS:\n";
foreach my $x (@{$_[1]}) {
printf " %-30s type=%-2d value=%s\n", $x->[0], $x->[2], $x->[1];
}
}
NetSNMP::TrapReceiver::register("all", \&my_receiver) ||
warn "failed to register our perl trap handler\n";
print STDERR "Loaded the example perl snmptrapd handler\n";
Then, put the following line in your snmprapd.conf file:
perl do "/usr/local/share/snmp/mytrapd.pl";
Start snmptrapd (as root, and the following other opions make it stay
in the foreground and log to stderr):
snmptrapd -f -Le
You should see it start up and display the final message from the end
of the above perl script:
Loaded the perl snmptrapd handler
2004-02-11 10:08:45 NET-SNMP version 5.2 Started.
Then, if you send yourself a fake trap using the following example command:
snmptrap -v 2c -c mycommunity localhost 0 linkUp ifIndex.1 i 1 \
ifAdminStatus.1 i up ifOperStatus.1 i up ifDescr s eth0
You should see the following output appear from snmptrapd as your perl
code gets executed:
********** PERL RECEIVED A NOTIFICATION:
PDU INFO:
notificationtype TRAP
receivedfrom 127.0.0.1
version 1
errorstatus 0
messageid 0
community mycommunity
transactionid 2
errorindex 0
requestid 765160220
VARBINDS:
sysUpTimeInstance type=67 value=0:0:00:00.00
snmpTrapOID.0 type=6 value=linkUp
ifIndex.1 type=2 value=1
ifAdminStatus.1 type=2 value=1
ifOperStatus.1 type=2 value=1
ifDescr type=4 value="eth0"
=head1 EXPORT
None by default.
# =head2 Exportable constants
# NETSNMPTRAPD_AUTH_HANDLER
# NETSNMPTRAPD_HANDLER_BREAK
# NETSNMPTRAPD_HANDLER_FAIL
# NETSNMPTRAPD_HANDLER_FINISH
# NETSNMPTRAPD_HANDLER_OK
# NETSNMPTRAPD_POST_HANDLER
# NETSNMPTRAPD_PRE_HANDLER
=head1 SEE ALSO
NetSNMP::OID, NetSNMP::ASN
snmptrapd.conf(5) for configuring the Net-SNMP trap receiver.
snmpd.conf(5) for configuring the Net-SNMP snmp agent for sending traps.
http://www.Net-SNMP.org/
=head1 AUTHOR
W. Hardaker, E<lt>hardaker@users.sourceforge.netE<gt>
=head1 COPYRIGHT AND LICENSE
Copyright 2004 by W. Hardaker
This library is free software; you can redistribute it and/or modify
it under the same terms as Perl itself.
=cut
| Ronald-C/SNMP-Mangement | lib/net-snmp/perl/TrapReceiver/TrapReceiver.pm | Perl | mit | 8,222 |
#!/usr/bin/perl -w
#
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2006, 2007, 2009, 2010 Zimbra, Inc.
#
# The contents of this file are subject to the Zimbra Public License
# Version 1.3 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
use strict;
use lib '.';
use LWP::UserAgent;
use Getopt::Long;
use XmlDoc;
use Soap;
use ZimbraSoapTest;
# specific to this app
my ($root);
#standard options
my ($user, $pw, $host, $help); #standard
GetOptions("u|user=s" => \$user,
"pw=s" => \$pw,
"h|host=s" => \$host,
"help|?" => \$help,
# add specific params below:
"b|base=s" => \$root
);
if (!defined($user) || defined($help)) {
my $usage = <<END_OF_USAGE;
USAGE: $0 -u USER [-b BASE_FOLDER]
END_OF_USAGE
die $usage;
}
my $z = ZimbraSoapTest->new($user, $host, $pw);
$z->doStdAuth();
my $d = new XmlDoc;
if (defined($root)) {
$d->start('GetFolderRequest', $Soap::ZIMBRA_MAIL_NS);
{
$d->add('folder', undef, { 'l' => $root });
} $d->end();
} else {
$d->add('GetFolderRequest', $Soap::ZIMBRA_MAIL_NS);
}
my $response = $z->invokeMail($d->root());
print "REQUEST:\n-------------\n".$z->to_string_simple($d);
print "RESPONSE:\n--------------\n".$z->to_string_simple($response);
| nico01f/z-pec | ZimbraServer/src/perl/soap/getFolders.pl | Perl | mit | 1,673 |
#!/usr/bin/perl
use strict;
use Time::HiRes qw ( time );
sub extwtime {
my $cmd = @_[0];
my $s = time();
my $o = `$cmd`;
my $e = time();
my %ret = ('result' => $o, 'elapsed' => $e - $s);
return %ret;
}
sub parse {
my $lang = @_[0];
my $result = @_[1];
my $timing = @_[2];
my $pt = sprintf("%05f", $timing);
# TODO: Print out failed vectors.
my ($stats) = ($result =~ m/(!?\s{3,4}\d+ of \d+ passed)/);
$stats =~ s/[! ](?<=\s{2})\s/ $lang - $pt sec -/;
print "$stats\n";
return;
}
sub c {
my $dir = "./c";
my $cmd = "make test -C $dir";
my $post = "make clean -C $dir";
my %ret = extwtime($cmd);
parse("c", $ret{'result'}, $ret{'elapsed'});
$_ = `$post`;
return;
}
sub main {
my %langs = ( 'c' => \&c );
print "\nTest Results\n\n";
my ($inp) = @ARGV;
unless (defined $inp) {
foreach my $lang (values %langs) { $lang->(); }
return;
}
my $op = $langs{$inp};
if (defined $op) { return $op->(); }
}
main();
print "\n";
| sjudson/polytrivium | runner.pl | Perl | mit | 1,069 |
#====================================================================
#
# Chart::Mountain
#
# Inspired by Chart::Lines
# by davidb bonner
# dbonner@cs.bu.edu
#
# Updated for
# compatibility with
# changes to Chart::Base
# by peter clark
# ninjaz@webexpress.com
#
# Copyright 1998, 1999 by James F. Miner.
# All rights reserved.
# This program is free software; you can redistribute it
# and/or modify it under the same terms as Perl itself.
#
# maintained by the Chart Group
# Chart@wettzell.ifag.de
#
#---------------------------------------------------------------------
# History:
#----------
# $RCSfile: Mountain.pm,v $ $Revision: 1.4 $ $Date: 2003/02/14 14:16:23 $
# $Author: dassing $
# $Log: Mountain.pm,v $
# Revision 1.4 2003/02/14 14:16:23 dassing
# First setup to cvs
#
#
#====================================================================
package Chart::Mountain;
use Chart::Base '2.4.1';
use GD;
use Carp;
use strict;
@Chart::Mountain::ISA = qw ( Chart::Base );
$Chart::Mountain::VERSION = '2.4.1';
## Some Mountain chart details:
#
# The effective y data value for a given x point and dataset
# is the sum of the actual y data values of that dataset and
# all datasets "below" it (i.e., with higher dataset indexes).
#
# If the y data value in any dataset is undef or negative for
# a given x, then all datasets are treated as missing for that x.
#
# The y minimum is always forced to zero.
#
# To avoid a dataset area "cutting into" the area of the dataset below
# it, the y pixel for each dataset point will never be below the y pixel for
# the same point in the dataset below the dataset.
# This probably should have a custom legend method, because each
# dataset is identified by the fill color (and optional pattern)
# of its area, not just a line color. So the legend shou a square
# of the color and pattern for each dataset.
#===================#
# private methods #
#===================#
sub _find_y_range {
my $self = shift;
# This finds the maximum point-sum over all x points,
# where the point-sum is the sum of the dataset values at that point.
# If the y value in any dataset is undef for a given x, then all datasets
# are treated as missing for that x.
my $data = $self->{'dataref'};
my $max = undef;
for my $i (0..$#{$data->[0]}) {
my $y_sum = $data->[1]->[$i];
if ( defined $y_sum && $y_sum >= 0 ) {
for my $dataset ( @$data[2..$#$data] ) { # order not important
my $datum = $dataset->[$i];
if ( defined $datum && $datum >= 0 ) {
$y_sum += $datum
}
else { # undef or negative, treat all at same x as missing.
$y_sum = undef;
last
}
}
}
if ( defined $y_sum ) {
$max = $y_sum unless defined $max && $y_sum <= $max;
}
}
(0, $max);
}
sub _draw_data {
my $self = shift;
my $data = $self->{'dataref'};
my @patterns = @{ $self->{'patterns'} || [] };
# Calculate array of x pixel positions (@x).
my $x_step = ($self->{'curr_x_max'} - $self->{'curr_x_min'}) / ($self->{'num_datapoints'} > 0 ? $self->{'num_datapoints'} : 1);
my $x_min = $self->{'curr_x_min'} + $x_step / 2;
my $x_max = $self->{'curr_x_max'} - $x_step / 2;
my @x = map { $_ * $x_step + $x_min } 0..$self->{'num_datapoints'}-1;
my ($t_x_min, $t_x_max, $t_y_min, $t_y_max, $abs_x_max, $abs_y_max);
my $repair_top_flag = 0;
# Calculate array of y pixel positions for upper boundary each dataset (@y).
my $map = ($self->{'max_val'})
? ($self->{'curr_y_max'} - $self->{'curr_y_min'}) / $self->{'max_val'}
: ($self->{'curr_y_max'} - $self->{'curr_y_min'}) / 10;
my $y_max = $self->{'curr_y_max'}; # max pixel point (lower y values)
my @y;
for my $j (0..$#{$data->[0]}) {
my $sum = 0;
for my $i (reverse 1..$#{$data}) { # bottom to top of chart
my $datum = $data->[$i][$j];
#set the repair flag, if the datum is out of the borders of the chart
if ( defined $datum && $datum > $self->{'max_val'}) { $repair_top_flag = 1;}
if ( defined $datum && $datum >= 0 ) {
$sum += $datum;
$y[$i-1][$j] = $y_max - $map * $sum;
}
else { # missing value, force all to undefined
foreach my $k (1..$#{$data}) { $y[$k-1][$j] = undef }
last;
}
}
}
# Find first and last x where y is defined in the bottom dataset.
my $x_begin = 0;
my $x_end = $self->{'num_datapoints'}-1;
while ( $x_begin <= $x_end && ! defined $y[-1]->[$x_begin] ) { $x_begin++ }
while ( $x_begin <= $x_end && ! defined $y[-1]->[$x_end] ) { $x_end-- }
if ( $x_begin > $x_end ) { croak "Internal error: x_begin > x_end ($x_begin > $x_end)"; }
# For each dataset, generate a polygon for the dataset's area of the chart,
# and fill the polygon with the dataset's color/pattern.
my $poly = GD::Polygon->new;
$poly->addPt($x[$x_end], $y_max); # right end of x axis
$poly->addPt($x[$x_begin], $y_max); # left end of x axis (right-to-left)
for my $dataset (reverse 0..@y-1) {
my $y_ref = $y[$dataset];
# Append points for this dataset to polygon, direction depends on $dataset % 2.
my $last_vertex_count = $poly->length;
if ( (@y - 1 - $dataset) % 2 ) { # right-to-left
for (reverse $x_begin..$x_end) {
$poly->addPt($x[$_], $y_ref->[$_]) if defined $y_ref->[$_]
}
}
else { # left-to-right
for ($x_begin..$x_end) {
$poly->addPt($x[$_], $y_ref->[$_]) if defined $y_ref->[$_]
}
}
# draw the polygon
my $color = $self->_color_role_to_index('dataset'.$dataset);
if ( $patterns[$dataset] ) {
$self->{'gd_obj'}->filledPolygon($poly, $color) if $patterns[$dataset]->transparent >= 0;
$self->{'gd_obj'}->setTile($patterns[$dataset]);
$self->{'gd_obj'}->filledPolygon($poly, gdTiled);
}
else {
$self->{'gd_obj'}->filledPolygon($poly, $color);
}
# delete previous dataset's points from the polygon, update $last_vertex_count.
unless ( $dataset == 0 ) { # don't bother do delete points after last area
while ( $last_vertex_count ) { $poly->deletePt(0); $last_vertex_count-- }
}
}
# Enclose the plots
$self->{'gd_obj'}->rectangle(
$self->{'curr_x_min'}, $self->{'curr_y_min'},
$self->{'curr_x_max'}, $self->{'curr_y_max'},
$self->_color_role_to_index('misc')
);
#get the width and the heigth of the complete picture
($abs_x_max, $abs_y_max) = $self->{'gd_obj'}->getBounds();
#repair the chart, if the lines are out of the borders of the chart
if ($repair_top_flag) {
#overwrite the ugly mistakes
$self->{'gd_obj'}->filledRectangle ($self->{'curr_x_min'}, 0,
$self->{'curr_x_max'}, $self->{'curr_y_min'}-1,
$self->_color_role_to_index('background'));
#save the actual x and y values
$t_x_min = $self->{'curr_x_min'};
$t_x_max = $self->{'curr_x_max'};
$t_y_min = $self->{'curr_y_min'};
$t_y_max = $self->{'curr_y_max'};
#get back to the point, where everything began
$self->{'curr_x_min'} = 0;
$self->{'curr_y_min'} = 0;
$self->{'curr_x_max'} = $abs_x_max;
$self->{'curr_y_max'} = $abs_y_max;
#draw the title again
if ($self->{'title'}) {
$self->_draw_title
}
#draw the sub title again
if ($self->{'sub_title'}) {
$self->_draw_sub_title
}
#draw the top legend again
if ($self->{'legend'} =~ /^top$/i) {
$self->_draw_top_legend;
}
#reset the actual values
$self->{'curr_x_min'} = $t_x_min;
$self->{'curr_x_max'} = $t_x_max;
$self->{'curr_y_min'} = $t_y_min;
$self->{'curr_y_max'} = $t_y_max;
}
}
###############################################################
### Fix a bug in GD::Polygon.
### A patch has been submitted to Lincoln Stein.
require GD;
unless ( defined &GD::Polygon::deletePt ) {
*GD::Polygon::deletePt = sub {
my($self,$index) = @_;
unless (($index >= 0) && ($index < @{$self->{'points'}})) {
warn "Attempt to set an undefined polygon vertex";
return undef;
}
my($vertex) = splice(@{$self->{'points'}},$index,1);
$self->{'length'}--;
return @$vertex;
}
}
###############################################################
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Chart/Mountain.pm | Perl | mit | 8,611 |
#!/usr/local/bin/perl
use warnings;
use strict;
use Data::Dumper qw(Dumper);
use Statistics::Basic qw(:all);
my @inputData;
my $highError = 0;
my $lowError = 10000;
my $outputFile = 'Results/processed_varError.txt';
####Finding the nodes limits based on the file.
my $filename = $ARGV[0];
open(my $fh, "<", $filename)
or die "Could not open file '$filename' $!";
while (my $row = <$fh>)
{
if($row !~ /^#/)
{
chomp($row);
@inputData = split(/\s+/, $row);
$highError = $inputData[34]
if ($highError < $inputData[34]);
$lowError = $inputData[34]
if ($lowError > $inputData[34]);
}
}
####Normalizing the errors
$highError *= 100;
$lowError *= 100;
####Finding the metrics and outputting the results to file.
open(my $fw, ">", $outputFile)
or die "Could not open write file $outputFile $!";
##Odd indexes are standard deviations if not defined
print $fw
("#1 Nodes, #2 AvgThroughput, #4 totalBKThroughput,
#6 totalBEThroughput, #8 totalVIThroughput, #10 totalVOThroughput,
#12 totalCollisions, #14 totalBKCollisions, #16 totalBECollisions,
#18 totalVICollisions, #20 TotalVOCollisions, #22 totalInternalCollisions,
#24 totalBKIntCol, #26 totalBEIntCol, #28 totalVIIntCol,
#30 totalVOIntCol, #32 overallFairness, #34 BKFairness,
#36 BEFairness, #38 VIFairness, #40 VOFairness,
#42 avgTimeBtSxTxBK, #44 avgTimeBtSxTxBE, #46 avgTimeBtSxTxVI,
#48 avgTimeBtSxTxVO, #50 qEmptyBK, #52 qEmptyBE,
#54 qEmptyVI, #56 qEmptyVO #58 totalDropped,
#60 droppedBK, #62 droppedBE #64 droppedVI,
#66 droppedVO #68 channelErrors #70 stickiness
#72 totalThroughputUnsat #74 totalThroughputSat #76 totalThroughputEDCA
#78 totalBKthroughputEDCA #80 totalBEthroughputEDCA #82 totalVIthroughputEDCA
#84 totalVOthroughputEDCA #86 totalThroughputECA #88 totalBKthroughputECA
#90 totalBEthroughputECA #92 totalVIthroughputECA #94 totalVOthroughputECA
#96 avgTimeBtSxTxBkEDCA #98 avgTimeBtSxTxBeEDCA #100 avgTimeBtSxTxViEDCA
#102 avgTimeBtSxTxVoEDCA #104 avgTimeBtSxTxBkECA #106 avgTimeBtSxTxBeECA
#108 avgTimeBtSxTxViECA #110 avgTimeBtSxTxVoECA #112 BKCollisionsEDCA
#114 BECollisionsEDCA #116 VICollisionsEDCA #118 VOCollisionsEDCA
#120 BKCollisionsECA #122 BECollisionsECA #124 VICollisionsECA
#126 VOCollisionsEDCA #128 lastCollision #130 avgQueuingDelayBK
#132 avgQueuingDelayBE #134 avgQueuingDelayVI #136 avgQueuingDelayVO
#138 avgBackoffStageECABK #140 avgBackoffStageDCFBK #142 PercentageEDCA
#144 sxSlots #146 colSlots #148 errorSlots
#150 emptySlots\n");
OUTTER: foreach($lowError .. $highError)
{
open(my $fh, "<", $filename)
or die "Could not open file $filename $!";
my @metrics;
my $thereIsData = 0;
while (my $row = <$fh>)
{
if($row !~ /^#/)
{
chomp($row);
@inputData = split(/\s+/, $row);
if ($inputData[34] == ($_/100))
{
$thereIsData = 1;
foreach my $i (1 .. $#inputData)
{
push @{$metrics[$i]}, $inputData[$i];
}
# print ("@inputData\n\n");
}
}
}
next OUTTER
if($thereIsData == 0);
my $average;
my $std;
print $fw "$inputData[0] ";
foreach my $i (1 .. $#metrics)
{
$average = avg(@{$metrics[$i]}) + 0; #forcing the result to a scalar instead of an Object.
$std = stddev(@{$metrics[$i]}) + 0;
print $fw "$average $std ";
# print ("$average $std\n");
}
print $fw "\n";
} | SanabriaRusso/CSMA-ECA-HEW | processVarError.pl | Perl | mit | 4,007 |
#!/usr/bin/perl
use strict;
use warnings;
use integer;
use constant DURATION => 2503;
use List::Util qw(max sum);
my %reindeer;
open(INFILE, 'input.txt') or die "Cannot open file input.txt for reading: $!";
while (<INFILE>) {
/^(\w+) can fly (\d+) km\/s for (\d+) seconds, but then must rest for (\d+) seconds\.\n$/ or die $_;
$reindeer{$1} = [0];
for (my $t = 1; $t <= DURATION; ++$t) {
my $r = $t % ($3 + $4);
$reindeer{$1}[$t] = $reindeer{$1}[$t-1] + (($r > 0 && $r <= $3) ? $2 : 0);
}
}
close INFILE;
for (my $t = 1; $t <= DURATION; ++$t) {
my %set = map {($_ => $reindeer{$_}[$t])} keys %reindeer;
my $max = max(values %set);
$reindeer{$_}[$t] = ($set{$_} == $max) ? 1 : 0 for keys %reindeer;
}
$reindeer{$_} = sum(@{$reindeer{$_}}) for keys %reindeer;
print max(values %reindeer) . "\n";
| ezarko/adventofcode | 2015/day14/part2.pl | Perl | cc0-1.0 | 844 |
new13(A,B,C,D,E,F):-A=0.
new8(A,B,C,D,E):-F=1, D=0, new7(A,B,C,D,F).
new8(A,B,C,D,E):-F=0, D=< -1, new7(A,B,C,D,F).
new8(A,B,C,D,E):-F=0, D>=1, new7(A,B,C,D,F).
new7(A,B,C,D,E):-F=:=E, new13(F,A,B,C,D,E).
new6(A,B,C,D,E):-F=1, 2*B=:=D, new7(A,B,C,D,F).
new6(A,B,C,D,E):-2*B-D>=1, new8(A,B,C,D,E).
new6(A,B,C,D,E):-2*B-1*D=< -1, new8(A,B,C,D,E).
new4(A,B,C,D,E):-F=1+A, G=2+D, A-B=<0, new4(F,B,C,G,E).
new4(A,B,C,D,E):-A-B>=1, new6(A,B,C,D,E).
new3(A,B,C,D,E):-F=1, G=:=H, I=0, new4(F,H,G,I,E).
new2:-new3(A,B,C,D,E).
new1:-new2.
false:-new1.
| bishoksan/RAHFT | benchmarks_scp/misc/programs-clp/SVCOMP13-loops-sum01_safe.map.c.map.pl | Perl | apache-2.0 | 542 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::infoblox::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
%{$self->{modes}} = (
'cpu' => 'network::infoblox::snmp::mode::cpu',
'dhcp-usage' => 'network::infoblox::snmp::mode::dhcpusage',
'dns-usage' => 'network::infoblox::snmp::mode::dnsusage',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'memory' => 'network::infoblox::snmp::mode::memory',
'services' => 'network::infoblox::snmp::mode::services',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Infoblox equipments in SNMP.
=cut
| Sims24/centreon-plugins | network/infoblox/snmp/plugin.pm | Perl | apache-2.0 | 1,810 |
#
# Ensembl module for Bio::EnsEMBL::Funcgen::ResultFeature
#
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <ensembl-dev@ebi.ac.uk>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=head1 NAME
Bio::EnsEMBL::Funcgen::ResultFeature - A module to represent a lightweight ResultFeature object
=head1 SYNOPSIS
use Bio::EnsEMBL::Funcgen::ResultFeature;
my $rfeature = Bio::EnsEMBL::Funcgen::ResultFeature->new_fast([$start, $end, $score ]);
my @rfeatures = @{$rset->get_displayable_ResultFeature_by_Slice($slice)};
foreach my $rfeature (@rfeatures){
my $score = $rfeature->score();
my $rf_start = $rfeature->start();
my $rf_end = $rfeature->end();
}
=head1 DESCRIPTION
This is a very sparse class designed to be as lightweight as possible to enable fast rendering in the web browser.
As such only the information absolutely required is contained. Any a piori information is omitted e.g. seq_region_id,
this will already be known as ResultFeatures are retrieved via a Slice method in ResultSet via the ResultSetAdaptor,
likewise with analysis and experimental_chip information. ResultFeatures are transient objects, in that they are not
stored in the DB, but are a very small subset of information from the result and oligo_feature tables. ResultFeatures
should only be generated by the ResultSetAdaptor as there is no parameter checking in place.
=cut
use strict;
use warnings;
#Could set global named vars here for element names. Would take more memory
package Bio::EnsEMBL::Funcgen::ResultFeature;
use base ('Bio::EnsEMBL::Feature');
=head2 new_fast
Args : Array with attributes start, end, strand, score, probe, result_set_id, winow_size IN THAT ORDER.
WARNING: None of these are validated, hence can omit some where not needed
Example : none
Description: Fast and list version of new. Only works if the code is very disciplined.
Returntype : Bio::EnsEMBL::Funcgen::ResultFeature
Exceptions : None
Caller : ResultSetAdaptor
Status : At Risk
=cut
sub new_fast {
my ($class, @args) = @_;
#return bless ($arrayref, $class);
#Passing arrayref here may cause problems with changing vars after obj creation
#warn "in new fast with @args";
bless \@args, $class;
}
=head2 start
Example : my $start = $rf->start();
Description: Getter of the start attribute for ResultFeature
objects.
Returntype : int
Exceptions : None
Caller : General
Status : At Risk - Now also sets to enable projection
=cut
sub start {
$_[0]->[0] = $_[1] if $_[1];
$_[0]->[0];
}
=head2 end
Example : my $start = $rf->end();
Description: Getter of the end attribute for ResultFeature
objects.
Returntype : int
Exceptions : None
Caller : General
Status : At Risk - Now also sets to enable projection
=cut
sub end {
$_[0]->[1] = $_[1] if $_[1];
$_[0]->[1];
}
#Do we need to chacnge this to strand and have slice strand context, as with start and end
sub strand { $_[0]->[2];}
=head2 score
Example : my $score = $rf->score();
Description: Getter of the score attribute for ResultFeature
objects
Returntype : string/float/double?
Exceptions : None
Caller : General
Status : At Risk
=cut
sub score { $_[0]->[3];}
=head2 probe
Example : my $probe = $rf->probe();
Description: Getter of the probe attribute for ResultFeature
objects
Returntype : Bio::EnsEMBL::Funcgen::Probe
Exceptions : None
Caller : General
Status : At Risk - This can only be used for Features with window 0.
=cut
#probe_id is currently not available in the result_feature table, so this would be a result/probe_feature query.
sub probe { $_[0]->[4];}
#The following are only used for storage and retrieval, hence why they are not included in new_fast which is streamlined
#for performance
#These have no validation so all thi smust be done in the caller/storer i.e. the adaptor
sub result_set_id { $_[0]->[5];}
sub window_size { $_[0]->[6];}
#May not ever need this
#We pass the slice to store
#Don't normally want to remap, so don't need furing fetch
#Now also sets for to enable projection
sub slice {
$_[0]->[7] = $_[1] if $_[1];
$_[0]->[7];
}
#Had to reimplement these as they used direct hash calls rather than acessor
#redefined to use accessors to array
sub length {
my $self = shift;
return $self->end - $self->start + 1;
}
=head2 move
Arg [1] : int start
Arg [2] : int end
Arg [3] : (optional) int strand
Example : None
Description: Sets the start, end and strand in one call rather than in
3 seperate calls to the start(), end() and strand() methods.
This is for convenience and for speed when this needs to be
done within a tight loop.
Returntype : none
Exceptions : Thrown is invalid arguments are provided
Caller : general
Status : Stable
=cut
sub move {
my $self = shift;
throw('start and end arguments are required') if(@_ < 2);
my $start = shift;
my $end = shift;
my $strand = shift;
if(defined($start) && defined($end) && $end < $start) {
throw('start must be less than or equal to end');
}
if(defined($strand) && $strand != 0 && $strand != -1 && $strand != 1) {
throw('strand must be 0, -1 or 1');
}
$self->[0] = $start;
$self->[1] = $end;
$self->[2] = $strand if(defined($strand));
}
=head2 feature_Slice
Args : none
Example : $slice = $feature->feature_Slice()
Description: Reimplementation of Bio::EnsEMBL::Feature method to enable
assembly mapping
Returntype : Bio::EnsEMBL::Slice or undef if this feature has no attached
Slice.
Exceptions : warning if Feature does not have attached slice.
Caller : web drawing code
Status : Stable
=cut
sub feature_Slice {
my ($self) = @_;
my $slice = $self->[7];
if(!$slice) {
warning('Cannot obtain Feature_Slice for feature without attached slice');
return undef;
}
return $slice->sub_Slice($self->[0], $self->[1]);
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-functgenomics/modules/Bio/EnsEMBL/Funcgen/ResultFeature.pm | Perl | apache-2.0 | 6,516 |
use strict;
use warnings;
=begin
# All the wonderful things we can do with our new sample schema
- An individual can have multiple samples
- One sample is associated with one individual
- The name for individual and a sample for this individual can be different
- creat a new sample:
- pass the individual object to sample constructor
- if creating a new sample object always require an individual object or id,
if no indiviual information is available creat a new individual object before
creating the sample object
- Store: display, has_coverage, variation_set_id on sample level
- Store: gender, mother_individual_id, father_individual_id, individual_type_id on individual level
- store synonyms for a sample
- associate a study with genotypes for a sample
# Rewrite:
# VCFCollection
# StructuralVariationSample.pm
=end
=cut
my $individual_adaptor;
my $sample_adaptor;
# additional attributes: source, url, external_reference, study_type
my $study_1000G = Bio::EnsEMBL::Variation::Study->new(
-name => '1000G phase 3',
-description => 'Whole-genome sequencing',
);
my $study_ExAC = Bio::EnsEMBL::Variation::Study->new(
-name => 'Exome Aggregation Consortium',
-description => 'exome sequencing',
);
my $individual = Bio::EnsEMBL::Variation::Individual(
-name => 'NA18967',
);
$individual = $individual_adaptor->store($individual);
my $sample_1000G = Bio::EnsEMBL::Variation::Sample->new(
-individual => $individual,
-study => $study,
);
$sample_adaptor->store($sample);
# OR:
$individual->add_Sample->($sample);
$individual_adaptor->store($individual);
| at7/work | documentation/sample.pl | Perl | apache-2.0 | 1,606 |
#!/usr/bin/env perl
use strict;
use warnings;
use v5.10;
use Digest::SHA qw(sha1);
use File::Temp();
use File::Basename qw(basename);
use File::Find();
my $mode = shift(@ARGV) || die usage();
my $dir = shift(@ARGV) || die usage();
$dir =~ s{/$}{};
our $RELEASES_DIR = "$dir/target/releases/";
our $LICENSE_DIR = "$dir/licenses/";
$mode eq '--check' ? check_shas_and_licenses($dir)
: $mode eq '--update' ? write_shas($dir)
: die usage();
#===================================
sub check_shas_and_licenses {
#===================================
my %new = get_shas_from_zip();
check_tar_has_same_shas(%new);
my %old = get_sha_files();
my %licenses = get_files_with('LICENSE');
my %notices = get_files_with('NOTICE');
my $error = 0;
my $sha_error = 0;
for my $jar ( sort keys %new ) {
my $old_sha = delete $old{$jar};
unless ($old_sha) {
say STDERR "$jar: SHA is missing";
$error++;
$sha_error++;
next;
}
unless ( $old_sha eq $new{$jar} ) {
say STDERR "$jar: SHA has changed";
$error++;
$sha_error++;
next;
}
my $license_found;
my $notice_found;
my $prefix = $jar;
$prefix =~ s/\.sha1//;
while ( $prefix =~ s/-[^\-]+$// ) {
if ( exists $licenses{$prefix} ) {
$license_found = 1;
# mark all licenses with the same prefix as used
for ( keys %licenses ) {
$licenses{$_}++ if index( $prefix, $_ ) == 0;
}
if ( exists $notices{$prefix} ) {
$notices{$prefix}++;
$notice_found = 1;
}
last;
}
}
unless ($license_found) {
say STDERR "$jar: LICENSE is missing";
$error++;
$sha_error++;
}
unless ($notice_found) {
say STDERR "$jar: NOTICE is missing";
$error++;
}
}
if ( keys %old ) {
say STDERR "Extra SHA files present for: " . join ", ", sort keys %old;
$error++;
}
my @unused_licenses = grep { !$licenses{$_} } keys %licenses;
if (@unused_licenses) {
say STDERR "Extra LICENCE file present: " . join ", ",
sort @unused_licenses;
}
my @unused_notices = grep { !$notices{$_} } keys %notices;
if (@unused_notices) {
say STDERR "Extra NOTICE file present: " . join ", ",
sort @unused_notices;
}
if ($sha_error) {
say STDERR <<"SHAS"
You can update the SHA files by running:
$0 --update core
SHAS
}
exit $error;
}
#===================================
sub write_shas {
#===================================
my %new = get_shas_from_zip();
my %old = get_sha_files();
for my $jar ( sort keys %new ) {
if ( $old{$jar} ) {
next if $old{$jar} eq $new{$jar};
say "Updating $jar";
}
else {
say "Adding $jar";
}
open my $fh, '>', $LICENSE_DIR . $jar or die $!;
say $fh $new{$jar} or die $!;
close $fh or die $!;
}
continue {
delete $old{$jar};
}
for my $jar ( sort keys %old ) {
say "Deleting $jar";
unlink $LICENSE_DIR . $jar or die $!;
}
}
#===================================
sub get_files_with {
#===================================
my $pattern = shift;
my %files;
for my $path ( grep {-f} glob("$LICENSE_DIR/*$pattern*") ) {
my ($file) = ( $path =~ m{([^/]+)-${pattern}.*$} );
$files{$file} = 0;
}
return %files;
}
#===================================
sub get_sha_files {
#===================================
my %shas;
die "Missing directory: $LICENSE_DIR\n"
unless -d $LICENSE_DIR;
for my $file ( grep {-f} glob("$LICENSE_DIR/*.sha1") ) {
my ($jar) = ( $file =~ m{([^/]+)$} );
open my $fh, '<', $file or die $!;
my $sha = <$fh>;
$sha ||= '';
chomp $sha;
$shas{$jar} = $sha;
}
return %shas;
}
#===================================
sub get_shas_from_zip {
#===================================
my ($zip) = glob("$RELEASES_DIR/elasticsearch*.zip")
or die "No .zip file found in $RELEASES_DIR\n";
my $temp_dir = File::Temp->newdir;
my $dir_name = $temp_dir->dirname;
system( 'unzip', "-j", "-q", $zip, "*.jar", "-d" => $dir_name )
&& die "Error unzipping <$zip> to <" . $dir_name . ">: $!\n";
my @jars = grep { !/\/elasticsearch[^\/]+.jar$/ } glob "$dir_name/*.jar";
return calculate_shas(@jars);
}
#===================================
sub check_tar_has_same_shas {
#===================================
my %zip_shas = @_;
my ($tar) = glob("$RELEASES_DIR/elasticsearch*.tar.gz")
or return;
my $temp_dir = File::Temp->newdir;
my $dir_name = $temp_dir->dirname;
system( 'tar', "-xz", "-C" => $dir_name, "-f" => $tar )
&& die "Error unpacking <$tar> to <" . $dir_name . ">: $!\n";
my @jars;
File::Find::find(
{ wanted =>
sub { push @jars, $_ if /\.jar$/ && !/elasticsearch[^\/]*$/ },
no_chdir => 1
},
$dir_name
);
my %tar_shas = calculate_shas(@jars);
my @errors;
for ( sort keys %zip_shas ) {
my $sha = delete $tar_shas{$_};
if ( !$sha ) {
push @errors, "$_: JAR present in zip but not in tar.gz";
}
elsif ( $sha ne $zip_shas{$_} ) {
push @errors, "$_: JAR in zip and tar.gz are different";
}
}
for ( sort keys %tar_shas ) {
push @errors, "$_: JAR present in tar.gz but not in zip";
}
if (@errors) {
die join "\n", @errors;
}
}
#===================================
sub calculate_shas {
#===================================
my %shas;
while ( my $file = shift() ) {
my $digest = eval { Digest::SHA->new(1)->addfile($file) }
or die "Error calculating SHA1 for <$file>: $!\n";
$shas{ basename($file) . ".sha1" } = $digest->hexdigest;
}
return %shas;
}
#===================================
sub usage {
#===================================
return <<"USAGE";
USAGE:
$0 --check dir # check the sha1 and LICENSE files for each jar
$0 --update dir # update the sha1 files for each jar
The <dir> can be set to e.g. 'core' or 'plugins/analysis-icu/'
USAGE
}
| hechunwen/elasticsearch | dev-tools/src/main/resources/license-check/check_license_and_sha.pl | Perl | apache-2.0 | 6,599 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
use Bio::EnsEMBL::Registry;
use Bio::SeqIO;
my $reg = "Bio::EnsEMBL::Registry";
my $species = shift;
if($species eq "mouse"){
$reg->load_registry_from_db(
-host => 'ens-staging2',
-user => 'ensro');
}
else{
$reg->load_registry_from_db(
-host => 'ens-staging1',
-user => "ensro");
}
my $core_sa = $reg->get_adaptor($species,"core","slice");
my $vega_sa = $reg->get_adaptor($species,"vega","slice");
if(!defined($core_sa)){
die "Could not get core slice adaptor for $species???";
}
if(!defined($vega_sa)){
die "Could not get vega slice adaptor for $species???";
}
my $sql = 'select t.stable_id, x.display_label from xref x, object_xref ox , transcript t, external_db e where e.external_db_id = x.external_db_id and x.xref_id = ox.xref_id and t.transcript_id = ox.ensembl_id and e.db_name like ?';
my $sth = $vega_sa->dbc->prepare($sql) || die "Could not prepare $sql for vega";
my %vega_ott_to_enst;
my %core_ott_to_enst;
$sth->execute("ENST_CDS") or croak( $vega_sa->dbc->errstr() );
while ( my @row = $sth->fetchrow_array() ) {
$vega_ott_to_enst{$row[0]} = $row[1];
}
$sth->execute("ENST_ident") or croak( $vega_sa->dbc->errstr() );
while ( my @row = $sth->fetchrow_array() ) {
$vega_ott_to_enst{$row[0]} = $row[1];
}
print "We have from the vega database ".scalar(%vega_ott_to_enst)." ott to enst entries\n ";
$sth = $core_sa->dbc->prepare($sql) || die "Could not prepare $sql for core ";
$sth->execute("OTTT") or croak( $core_sa->dbc->errstr() );
my $core_extra_count = 0;
while ( my @row = $sth->fetchrow_array() ) {
if(!defined($vega_ott_to_enst{$row[1]})){
$core_extra_count++;
if($core_extra_count < 10){
print "core extra ".$row[1]." ". $row[0]."\n";
}
}
$core_ott_to_enst{$row[1]} = $row[0];
}
print "Core extra tags (OTTT) -> $core_extra_count \n";
$core_extra_count = 0;
$sth = $core_sa->dbc->prepare($sql) || die "Could not prepare $sql for core ";
$sth->execute("shares_CDS_and_UTR_with_OTTT") or croak( $core_sa->dbc->errstr() );
my $core_extra_count = 0;
while ( my @row = $sth->fetchrow_array() ) {
if(!defined($vega_ott_to_enst{$row[1]})){
$core_extra_count++;
if($core_extra_count < 10){
print "core extra ".$row[1]." ". $row[0]."\n";
}
}
$core_ott_to_enst{$row[1]} = $row[0];
}
print "Core extra tags (shares_CDS_and_UTR_with_OTTT) -> $core_extra_count \n";
$core_extra_count = 0;
#$sth = $core_sa->dbc->prepare($sql) || die "Could not prepare $sql for core ";
#$sth->execute("shares_CDS_with_ENST") or croak( $core_sa->dbc->errstr() );
#my $core_extra_count = 0;
#while ( my @row = $sth->fetchrow_array() ) {
# if(!defined($vega_ott_to_enst{$row[1]})){
# $core_extra_count++;
# if($core_extra_count < 10){
# print "core extra ".$row[1]." ". $row[0]."\n";
# }
# }
# $core_ott_to_enst{$row[1]} = $row[0];
#}
#print "Core extra tags (shares_CDS_with_ENST) -> $core_extra_count \n";
#$core_extra_count = 0;
#$sth->execute("Vega_transcript") or croak( $core_sa->dbc->errstr() );
#while ( my @row = $sth->fetchrow_array() ) {
# if(!($row[1] =~ /^OTT/)){
# next;
# }
# if(!defined($vega_ott_to_enst{$row[1]})){
# $core_extra_count++;
# if($core_extra_count < 10){
# print "core extra ".$row[1]." ". $row[0]."\n";
# }
# }
# $core_ott_to_enst{$row[1]} = $row[0];
#}
#print "Core extra tags (Vega_transcript) -> $core_extra_count \n";
$core_extra_count = 0;
$sth->execute("shares_CDS_with_OTTT") or croak( $core_sa->dbc->errstr() );
while ( my @row = $sth->fetchrow_array() ) {
if(!($row[1] =~ /^OTT/)){
next;
}
if(!defined($vega_ott_to_enst{$row[1]})){
$core_extra_count++;
if($core_extra_count < 10){
print "core extra ".$row[1]." ". $row[0]."\n";
}
}
$core_ott_to_enst{$row[1]} = $row[0];
}
print "Core extra tags (shares_CDS_with_OTTT) -> $core_extra_count \n";
$core_extra_count = 0;
print "We have from the core database ".scalar(%core_ott_to_enst)." ott to enst entries\n ";
my $vega_extra_count = 0;
foreach my $key (keys %vega_ott_to_enst){
if(!defined($core_ott_to_enst{$key})){
$vega_extra_count++;
if($vega_extra_count < 10){
print "vega extra ".$key." ". $vega_ott_to_enst{$key}."\n";
}
}
}
print "Vega extra tags -> $vega_extra_count \n";
| james-monkeyshines/ensembl | misc-scripts/xref_mapping/core_vega_link_check.pl | Perl | apache-2.0 | 5,048 |
package KinoSearch::Index::LexiconWriter;
use KinoSearch;
1;
__END__
__BINDING__
Clownfish::Binding::Perl::Class->register(
parcel => "KinoSearch",
class_name => "KinoSearch::Index::LexiconWriter",
bind_constructors => ["new"],
);
__COPYRIGHT__
Copyright 2005-2011 Marvin Humphrey
This program is free software; you can redistribute it and/or modify
under the same terms as Perl itself.
| gitpan/KinoSearch | lib/KinoSearch/Index/LexiconWriter.pm | Perl | apache-2.0 | 425 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package os::solaris::local::mode::hwraidctl;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::misc;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"hostname:s" => { name => 'hostname' },
"remote" => { name => 'remote' },
"ssh-option:s@" => { name => 'ssh_option' },
"ssh-path:s" => { name => 'ssh_path' },
"ssh-command:s" => { name => 'ssh_command', default => 'ssh' },
"timeout:s" => { name => 'timeout', default => 30 },
"sudo" => { name => 'sudo' },
"command:s" => { name => 'command', default => 'raidctl' },
"command-path:s" => { name => 'command_path', default => '/usr/sbin' },
"command-options:s" => { name => 'command_options', default => '-S 2>&1' },
"warning:s" => { name => 'warning', },
"critical:s" => { name => 'critical', },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
my $stdout = centreon::plugins::misc::execute(output => $self->{output},
options => $self->{option_results},
sudo => $self->{option_results}->{sudo},
command => $self->{option_results}->{command},
command_path => $self->{option_results}->{command_path},
command_options => $self->{option_results}->{command_options});
my $long_msg = $stdout;
$long_msg =~ s/\|/~/mg;
$self->{output}->output_add(long_msg => $long_msg);
my $volumes_errors = 0;
my $disks_errors = 0;
my $volumes = '';
my $disks = '';
foreach (split(/\n/, $stdout)) {
#1 "LSI_1030"
#c1t2d0 2 0.2.0 0.3.0 1 OPTIMAL
#0.0.0 GOOD
#0.1.0 GOOD
#0.2.0 GOOD
#0.3.0 GOOD
#4 "LSI_1030"
# For Disk
if (/^\s*(\S+)\s+(FAILED)$/i ) {
my $disk = $1;
$disks_errors++;
$disks .= ' [' . $disk . '/FAILED' . ']';
} elsif (/^\s*(\S+).*?(DEGRADED|FAILED)$/i) {
$volumes_errors++;
$volumes .= ' [' . $1 . '/' . $2 . ']';
}
}
my ($exit_code) = $self->{perfdata}->threshold_check(value => $volumes_errors,
threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
if ($volumes_errors > 0) {
$self->{output}->output_add(severity => $exit_code,
short_msg => sprintf("Some volumes problems:" . $volumes));
} else {
$self->{output}->output_add(severity => 'OK',
short_msg => "No problems on volumes");
}
($exit_code) = $self->{perfdata}->threshold_check(value => $disks_errors,
threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
if ($disks_errors > 0) {
$self->{output}->output_add(severity => $exit_code,
short_msg => sprintf("Some disks problems:" . $disks));
} else {
$self->{output}->output_add(severity => 'OK',
short_msg => "No problems on disks");
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Hardware Raid Status (use 'raidctl' command).
=over 8
=item B<--warning>
Threshold warning.
=item B<--critical>
Threshold critical.
=item B<--remote>
Execute command remotely in 'ssh'.
=item B<--hostname>
Hostname to query (need --remote).
=item B<--ssh-option>
Specify multiple options like the user (example: --ssh-option='-l=centreon-engine" --ssh-option='-p=52").
=item B<--ssh-path>
Specify ssh command path (default: none)
=item B<--ssh-command>
Specify ssh command (default: 'ssh'). Useful to use 'plink'.
=item B<--timeout>
Timeout in seconds for the command (Default: 30).
=item B<--sudo>
Use 'sudo' to execute the command.
=item B<--command>
Command to get information (Default: 'raidctl').
Can be changed if you have output in a file.
=item B<--command-path>
Command path (Default: '/usr/sbin').
=item B<--command-options>
Command options (Default: '-S 2>&1').
=back
=cut
| wilfriedcomte/centreon-plugins | os/solaris/local/mode/hwraidctl.pm | Perl | apache-2.0 | 6,547 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME - Bio::EnsEMBL::Analysis::Tools::Otter::DBSQL::DBAdaptor
Inherits from the standard Ensembl Bio::EnsEMBL::DBSQL::DBAdaptor
However - the get_available_adaptors method is overridden here
so that we can read in the Otter DnaAlignFeature and
DnaAlignFeatureHistory adaptors.
=cut
package Bio::EnsEMBL::Analysis::Tools::Otter::DBSQL::DBAdaptor;
use warnings ;
use Bio::EnsEMBL::Utils::Exception;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use vars qw(@ISA);
use strict;
@ISA = qw( Bio::EnsEMBL::DBSQL::DBAdaptor);
=head2 get_available_adaptors
Example : my %pairs = %{$dba->get_available_adaptors()};
Description: gets a hash of the available adaptors
ReturnType : reference to a hash
Exceptions : none
Caller : Bio::EnsEMBL::Utils::ConfigRegistry
Status : Stable
=cut
sub get_available_adaptors {
my %pairs = (
# Firstly those that just have an adaptor named after there object
# in the main DBSQL directory.
map( { $_ => "Bio::EnsEMBL::DBSQL::${_}Adaptor" } qw(
AffyFeature AffyArray AffyProbe
Analysis ArchiveStableId Attribute
AssemblyExceptionFeature AssemblyMapper CoordSystem
CompressedSequence DBEntry
DensityFeature DensityType Exon
Gene KaryotypeBand MiscSet
MiscFeature OligoArray OligoFeature
OligoProbe PredictionTranscript PredictionExon
ProteinFeature ProteinAlignFeature RepeatConsensus
RepeatFeature Sequence SimpleFeature
Slice SupportingFeature Transcript
TranscriptSupportingFeature Translation UnmappedObject
UnconventionalTranscriptAssociation
AssemblySlice
) ),
# Those whose adaptors are in Map::DBSQL
map( { $_ => "Bio::EnsEMBL::Map::DBSQL::${_}Adaptor" } qw(
Marker MarkerFeature QtlFeature Qtl Ditag DitagFeature
) ),
# otter ones
map( { $_ => "Bio::EnsEMBL::Analysis::Tools::Otter::DBSQL::${_}Adaptor" } qw(
DnaAlignFeature DnaAlignFeatureHistory
) ),
# Finally the exceptions... those that have non-standard mapping
# between object / adaptor ....
# 'Blast' => 'Bio::EnsEMBL::External::BlastAdaptor',
'MetaCoordContainer' => 'Bio::EnsEMBL::DBSQL::MetaCoordContainer',
'MetaContainer' => 'Bio::EnsEMBL::DBSQL::MetaContainer',
'SNP' => 'Bio::EnsEMBL::DBSQL::ProxySNPAdaptor',
# Feature Collections:
'GeneCollection' => 'Bio::EnsEMBL::Collection::Gene',
'TranscriptCollection' => 'Bio::EnsEMBL::Collection::Transcript',
'ExonCollection' => 'Bio::EnsEMBL::Collection::Exon',
'RepeatFeatureCollection' =>
'Bio::EnsEMBL::Collection::RepeatFeature' );
return ( \%pairs );
} ## end sub get_available_adaptors
1;
| james-monkeyshines/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Tools/Otter/DBSQL/DBAdaptor.pm | Perl | apache-2.0 | 3,696 |
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::centreon::map4::jmx::mode::sessions;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', },
"critical:s" => { name => 'critical', },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{connector} = $options{custom};
$self->{request} = [
{ mbean => "com.centreon.studio:name=statistics,type=session" }
];
my $result = $self->{connector}->get_attributes(request => $self->{request}, nothing_quit => 0);
my $exit = $self->{perfdata}->threshold_check(value => $result->{"com.centreon.studio:name=statistics,type=session"}->{SessionCount},
threshold => [ { label => 'critical', exit_litteral => 'critical' }, { label => 'warning', exit_litteral => 'warning'} ]);
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Current sessions : %d",
$result->{"com.centreon.studio:name=statistics,type=session"}->{SessionCount}));
$self->{output}->perfdata_add(label => 'sessions',
value => $result->{"com.centreon.studio:name=statistics,type=session"}->{SessionCount},
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Centreon Map Number of sessions
Example:
perl centreon_plugins.pl --plugin=apps::centreon::map::jmx::plugin --custommode=jolokia --url=http://10.30.2.22:8080/jolokia-war --mode=sessions
=over 8
=item B<--warning>
Set this threshold if you want a warning if current session number match condition
=item B<--critical>
Set this threshold if you want a warning if current session number match condition
=back
=cut
| centreon/centreon-plugins | apps/centreon/map4/jmx/mode/sessions.pm | Perl | apache-2.0 | 3,829 |
package Paws::WAF::ByteMatchSetUpdate;
use Moose;
has Action => (is => 'ro', isa => 'Str', required => 1);
has ByteMatchTuple => (is => 'ro', isa => 'Paws::WAF::ByteMatchTuple', required => 1);
1;
### main pod documentation begin ###
=head1 NAME
Paws::WAF::ByteMatchSetUpdate
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::WAF::ByteMatchSetUpdate object:
$service_obj->Method(Att1 => { Action => $value, ..., ByteMatchTuple => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::WAF::ByteMatchSetUpdate object:
$result = $service_obj->Method(...);
$result->Att1->Action
=head1 DESCRIPTION
In an UpdateByteMatchSet request, C<ByteMatchSetUpdate> specifies
whether to insert or delete a ByteMatchTuple and includes the settings
for the C<ByteMatchTuple>.
=head1 ATTRIBUTES
=head2 B<REQUIRED> Action => Str
Specifies whether to insert or delete a ByteMatchTuple.
=head2 B<REQUIRED> ByteMatchTuple => L<Paws::WAF::ByteMatchTuple>
Information about the part of a web request that you want AWS WAF to
inspect and the value that you want AWS WAF to search for. If you
specify C<DELETE> for the value of C<Action>, the C<ByteMatchTuple>
values must exactly match the values in the C<ByteMatchTuple> that you
want to delete from the C<ByteMatchSet>.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::WAF>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/WAF/ByteMatchSetUpdate.pm | Perl | apache-2.0 | 1,916 |
false :-
main_verifier_error.
verifier_error(A,B,C) :-
A=0,
B=0,
C=0.
verifier_error(A,B,C) :-
A=0,
B=1,
C=1.
verifier_error(A,B,C) :-
A=1,
B=0,
C=1.
verifier_error(A,B,C) :-
A=1,
B=1,
C=1.
id(A,B,C,D,E) :-
A=1,
B=1,
C=1.
id(A,B,C,D,E) :-
A=0,
B=1,
C=1.
id(A,B,C,D,E) :-
A=0,
B=0,
C=0.
id__1(A) :-
true.
id___0(A,B) :-
id__1(B),
B=0,
A=0.
id__3(A) :-
id__1(A),
A<0.
id__3(A) :-
id__1(A),
A>0.
id___0(A,B) :-
id__3(B),
id(1,0,0,B+ -1,C),
A=C+1.
id__split(A,B) :-
id___0(A,B).
id(A,B,C,D,E) :-
A=1,
B=0,
C=0,
id__split(E,D).
main_entry :-
true.
main__un :-
main_entry,
id(1,0,0,5,A),
A=5.
main_verifier_error :-
main__un.
| bishoksan/RAHFT | benchmarks_scp/SVCOMP15/svcomp15-clp/id_i5_o5_false-unreach-call.c.pl | Perl | apache-2.0 | 750 |
# Copyright 2018 - present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use strict;
use warnings;
package MongoDB::_Dispatcher;
# Encapsulate op dispatching; breaking this out from client
# allows avoiding circular references with the session pool class.
use version;
our $VERSION = 'v2.2.2';
use Moo;
use MongoDB::_Constants;
use MongoDB::_Types qw(
Boolish
);
use Carp;
use Types::Standard qw(
InstanceOf
);
use Safe::Isa;
use namespace::clean;
has topology => (
is => 'ro',
required => 1,
isa => InstanceOf ['MongoDB::_Topology'],
);
has retry_writes => (
is => 'ro',
required => 1,
isa => Boolish,
);
has retry_reads => (
is => 'ro',
required => 1,
isa => Boolish,
);
# Reset session state if we're outside an active transaction, otherwise set
# that this transaction actually has operations
sub _maybe_update_session_state {
my ( $self, $op ) = @_;
if ( defined $op->session && ! $op->session->_active_transaction ) {
$op->session->_set__transaction_state( TXN_NONE );
} elsif ( defined $op->session ) {
$op->session->_set__has_transaction_operations( 1 );
}
}
# op dispatcher written in highly optimized style
sub send_direct_op {
my ( $self, $op, $address ) = @_;
my ( $link, $result );
$self->_maybe_update_session_state( $op );
( $link = $self->{topology}->get_specific_link( $address, $op ) ), (
eval { ($result) = $op->execute($link); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
if ( $err->$_isa("MongoDB::ConnectionError") || $err->$_isa("MongoDB::NetworkTimeout") ) {
$self->{topology}->mark_server_unknown( $link->server, $err );
}
elsif ( $err->$_isa("MongoDB::NotMasterError") ) {
$self->{topology}->mark_server_unknown( $link->server, $err );
$self->{topology}->mark_stale;
}
# regardless of cleanup, rethrow the error
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
),
return $result;
}
sub _retrieve_link_for {
my ( $self, $op, $rw ) = @_;
my $topology = $self->{'topology'};
my $link;
if ( $op->session
&& $op->session->_address # no point trying if theres no address....
&& $op->session->_active_transaction # this is true during a transaction and on every commit
&& $topology->_supports_mongos_pinning_transactions )
{
$link = $topology->get_specific_link( $op->session->_address, $op );
}
elsif ( $rw eq 'w' ) {
$link = $topology->get_writable_link( $op );
} else {
$link = $topology->get_readable_link( $op );
}
return $link;
}
# op dispatcher written in highly optimized style
sub send_write_op {
my ( $self, $op ) = @_;
my ( $link, $result );
$self->_maybe_update_session_state( $op );
( $link = $self->_retrieve_link_for( $op, 'w' ) ), (
eval { ($result) = $self->_try_op_for_link( $link, $op ); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
),
return $result;
}
# Sometimes, seeing an op dispatched as "send_write_op" is confusing when
# really, we're just insisting that it be sent only to a primary or
# directly connected server.
BEGIN {
no warnings 'once';
*send_primary_op = \&send_write_op;
}
sub send_retryable_write_op {
my ( $self, $op, $force ) = @_;
my ( $link, $result ) = ( $self->_retrieve_link_for( $op, 'w' ) );
$self->_maybe_update_session_state( $op );
# Need to force to do a retryable write on a Transaction Commit or Abort.
# $force is an override for retry_writes, but theres no point trying that
# if the link doesnt support it anyway.
# This triggers on the following:
# * $force is not set to 'force'
# (specifically for retrying writes in ending transaction operations)
# * retry writes is not enabled or the link doesnt support retryWrites
# * if an active transaction is starting or in progress
unless ( $link->supports_retryWrites
&& ( $self->retry_writes || ( defined $force && $force eq 'force' ) )
&& ( defined $op->session
&& ! $op->session->_in_transaction_state( TXN_STARTING, TXN_IN_PROGRESS )
)
) {
eval { ($result) = $self->_try_op_for_link( $link, $op ); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
WITH_ASSERTS ? ( confess $err ) : ( die $err );
};
return $result;
}
# If we get this far and there is no session, then somethings gone really
# wrong, so probably not worth worrying about.
# increment transaction id before write, but otherwise is the same for both
# attempts. If not in a transaction, is a no-op
$op->session->_increment_transaction_id;
$op->retryable_write( 1 );
# attempt the op the first time
eval { ($result) = $self->_try_op_for_link( $link, $op ); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
if ( $err->$_call_if_can('_is_storage_engine_not_retryable') ) {
# Break encapsulation to rewrite the message, then rethrow.
$err->{message} = "This MongoDB deployment does not support retryable writes. Please add retryWrites=false to your connection string.";
die $err;
}
# If the error is not retryable, then drop out
unless ( $err->$_call_if_can('_is_retryable') ) {
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
# Must check if error is retryable before getting the link, in case we
# get a 'no writable servers' error. In the case of a mongos retry,
# this will end up as the same server by design.
my $retry_link = $self->_retrieve_link_for( $op, 'w' );
# Rare chance that the new link is not retryable
unless ( $retry_link->supports_retryWrites ) {
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
# Second attempt
eval { ($result) = $self->_try_op_for_link( $retry_link, $op ); 1 } or do {
my $retry_err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
WITH_ASSERTS ? ( confess $retry_err ) : ( die $retry_err );
};
};
# just in case this gets reused for some reason
$op->retryable_write( 0 );
return $result;
}
sub _is_primary_stepdown {
my ($self, $err, $link) = @_;
my $err_info = $err->{result}->{output};
my $err_code_name = '';
$err_code_name = $err_info->{'codeName'} if defined $err_info->{'codeName'};
my @other_errors = qw(ShutdownInProgress InterruptedAtShutdown);
my $not_master = (
$err->$_isa('MongoDB::NotMasterError')
|| ( $err_info && $err_code_name eq 'NotMaster' )
) && $link->max_wire_version < 8;
return (
$err_info && grep { $err_code_name eq $_ } @other_errors
) || $not_master;
}
# op dispatcher written in highly optimized style
sub _try_op_for_link {
my ( $self, $link, $op ) = @_;
my $result;
(
eval { ($result) = $op->execute($link, $self->{topology}->type); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
if ( $err->$_isa("MongoDB::ConnectionError") || $err->$_isa("MongoDB::NetworkTimeout") ) {
$self->{topology}->mark_server_unknown( $link->server, $err );
}
elsif ( $self->_is_primary_stepdown($err, $link) ) {
$self->{topology}->mark_server_unknown( $link->server, $err );
$self->{topology}->mark_stale;
}
# normal die here instead of assert, which is used later
die $err;
}
),
return $result;
}
sub send_retryable_read_op {
my ( $self, $op ) = @_;
my $result;
# Get transaction read preference if in a transaction.
if ( defined $op->session && $op->session->_active_transaction ) {
# Transactions may only read from primary in MongoDB 4.0, so get and
# check the read preference from the transaction settings as per
# transaction spec - see MongoDB::_TransactionOptions
$op->read_preference( $op->session->_get_transaction_read_preference );
}
my $link = $self->_retrieve_link_for( $op, 'r' );
$self->_maybe_update_session_state( $op );
if ( ! $link->supports_retryReads
|| ! $self->retry_reads
|| ( defined $op->session && $op->session->_in_transaction_state( TXN_STARTING, TXN_IN_PROGRESS ))
) {
eval { ($result) = $self->_try_op_for_link( $link, $op ); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
WITH_ASSERTS ? ( confess $err ) : ( die $err );
};
return $result;
}
$op->session->_increment_transaction_id if $op->session;
$op->retryable_read( 1 );
# attempt the op the first time
eval { ($result) = $self->_try_op_for_link( $link, $op ); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
# If the error is not retryable, then drop out
unless ( $err->$_call_if_can('_is_retryable') ) {
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
my $retry_link = $self->_retrieve_link_for( $op, 'r' );
# Rare chance that the new link is not retryable
unless ( $retry_link->supports_retryReads ) {
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
# Second attempt
eval { ($result) = $self->_try_op_for_link( $retry_link, $op ); 1 } or do {
my $retry_err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
WITH_ASSERTS ? ( confess $retry_err ) : ( die $retry_err );
};
};
# just in case this gets reused for some reason
$op->retryable_read( 0 );
return $result;
}
# op dispatcher written in highly optimized style
sub send_read_op {
my ( $self, $op ) = @_;
my ( $link, $type, $result );
# Get transaction read preference if in a transaction.
if ( defined $op->session && $op->session->_active_transaction ) {
# Transactions may only read from primary in MongoDB 4.0, so get and
# check the read preference from the transaction settings as per
# transaction spec - see MongoDB::_TransactionOptions
$op->read_preference( $op->session->_get_transaction_read_preference );
}
$self->_maybe_update_session_state( $op );
( $link = $self->_retrieve_link_for( $op, 'r' ) ),
( $type = $self->{topology}->type ), (
eval { ($result) = $op->execute( $link, $type ); 1 } or do {
my $err = length($@) ? $@ : "caught error, but it was lost in eval unwind";
if ( $err->$_isa("MongoDB::ConnectionError") || $err->$_isa("MongoDB::NetworkTimeout") ) {
$self->{topology}->mark_server_unknown( $link->server, $err );
}
elsif ( $err->$_isa("MongoDB::NotMasterError") ) {
$self->{topology}->mark_server_unknown( $link->server, $err );
$self->{topology}->mark_stale;
}
# regardless of cleanup, rethrow the error
WITH_ASSERTS ? ( confess $err ) : ( die $err );
}
),
return $result;
}
1;
| mongodb/mongo-perl-driver | lib/MongoDB/_Dispatcher.pm | Perl | apache-2.0 | 12,154 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2020] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use warnings ;
use strict;
use Getopt::Long qw(:config no_ignore_case);
use Bio::EnsEMBL::Utils::Exception qw(verbose throw warning info);
use Bio::EnsEMBL::Pipeline::Utils::PipelineSanityChecks;
use Bio::EnsEMBL::Pipeline::RuleManager;
use Bio::EnsEMBL::Pipeline::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Pipeline::Utils::InputIDFactory;
$| = 1;
my $term_sig = 0;
$SIG{TERM} = \&termhandler;
$SIG{INT} = \&termhandler;
#database arguments
my $dbhost = $ENV{'ENS_DBHOST'};
my $dbname = $ENV{'ENS_DBNAME'};
my $dbuser = $ENV{'ENS_DBUSER'};
my $dbpass = $ENV{'ENS_DBPASS'};
my $dbport = $ENV{'ENS_DBPORT'} || 3306;
# command line args
my $help; # get docs about script
my $verbose; # print statements about the running script
my $queue_manager; # Which Bio::EnsEMBL::BatchSubmission module to use
my $runner; # Runner script to use when running jobs (this will be overridden by anything in config)
my $output_dir; # Output_dir to use when running jobs (this won't be overridden when running jobs)
my $mark_awol = 1; # Flag as whether to mark jobs which have gone missing from the system
my $rename_on_retry = 1; # Whether to rename jobs stdout/err which are being retried
my $config_sanity = 1; # Flag as to whether to check configuration sanity
my $utils_verbosity = 'WARNING'; # how verbose do you want the
# Bio::EnsEMBL::Utils::Exceptions module to be by default it is set to
# WARNING as this gives warning and throws but not deprecates or infos
my $logic_name; # the logic_name of the analysis to be run
my $force; # force the analysis to run regardless of the rules
my $ids_to_run; # filepath to file of input_ids to run
my $perldoc;
my @command_args = @ARGV;
my $make_input_ids;
my $slice_size;
my $slice_overlap;
my $coord_system;
my $coord_system_version;
my $slice;
my $input_id_type;
my $file;
my $dir;
my $regex;
my $single;
my $translation_id;
my $name = 'genome';
my $seq_level;
my $top_level;
GetOptions(
'host|dbhost|h:s' => \$dbhost,
'dbname|db|D:s' => \$dbname,
'user|dbuser|u:s' => \$dbuser,
'pass|dbpass|p:s' => \$dbpass,
'port|dbport|P:s' => \$dbport,
'help!' => \$help,
'verbose!' => \$verbose,
'queue_manager=s' => \$queue_manager,
'runner=s' => \$runner,
'output_dir=s' => \$output_dir,
'mark_awol!' => \$mark_awol,
'rename_on_retry' => \$rename_on_retry,
'config_sanity!' => \$config_sanity,
'utils_verbosity=s' => \$utils_verbosity,
'perldoc!' => \$perldoc,
'logic_name|analysis=s' => \$logic_name,
'force!' => \$force,
'input_id_file=s' => \$ids_to_run,
'make_input_ids!' => \$make_input_ids,
'coord_system|cs_name:s' => \$coord_system,
'coord_system_version:s' => \$coord_system_version,
'slice' => \$slice,
'slice_size:s' => \$slice_size,
'slice_overlap:s' => \$slice_overlap,
'logic_name:s' => \$logic_name,
'file' => \$file,
'dir:s' => \$dir,
'file_regex:s' => \$regex,
'single' => \$single,
'single_name:s' => \$name,
'translation_ids' => \$translation_id,
'seq_level!' => \$seq_level,
'top_level!' => \$top_level,
) or useage(\@command_args);
perldoc() if $perldoc;
verbose($utils_verbosity);
unless ($dbhost && $dbname && $dbuser) {
print STDERR "Must specify database with -dbhost, -dbname, -dbuser and -dbpass\n";
print STDERR "Currently have -dbhost $dbhost -dbname $dbname ".
"-dbuser $dbuser -dbpass $dbpass -dbport $dbport\n";
$help = 1;
}
if (!$logic_name) {
print "Can't run without an analysis to run on \n";
print "specific analysis with -analysis logic_name";
$help = 1;
}
if ($help) {
useage(\@command_args);
}
if (!defined($mark_awol)) {
$mark_awol = 0;
}
if ($make_input_ids && !$force) {
print STDERR "Setting force to on as if you are making the input ids ".
"the odds are any rule won't work/n";
$force = 1;
}
my $db = Bio::EnsEMBL::Pipeline::DBSQL::DBAdaptor->new(
-host => $dbhost,
-dbname => $dbname,
-user => $dbuser,
-pass => $dbpass,
-port => $dbport,
);
my $sanity = Bio::EnsEMBL::Pipeline::Utils::PipelineSanityChecks->new
(
-DB => $db,
);
my $rulemanager = Bio::EnsEMBL::Pipeline::RuleManager->new
(
-DB => $db,
-QUEUE_MANAGER => $queue_manager,
-MARK_AWOL => $mark_awol,
-RENAME_ON_RETRY => $rename_on_retry,
-VERBOSE => $verbose,
-RUNNER => $runner,
-OUTPUT_DIR => $output_dir,
);
if ($config_sanity) {
$sanity->config_sanity_check;
}
my $analysis = $rulemanager->analysis_adaptor->fetch_by_logic_name($logic_name);
if (!$analysis || !$analysis->input_id_type || !$analysis->module) {
throw("Must have an analysis object $logic_name $analysis and analysis ".
"must have an input_id_type and a module to run\n");
}
if ($analysis->input_id_type eq 'ACCUMULATOR') {
throw("Can't use this script to run accumulators");
}
if ($ids_to_run && ! -e $ids_to_run) {
throw("Must be able to read $ids_to_run");
}
my $input_ids = setup_input_ids($analysis, $rulemanager, $ids_to_run,
$make_input_ids, $slice, $file,
$translation_id, $single, $slice_size,
$slice_overlap, $dir, $regex, $name,
$seq_level, $top_level, $verbose,
$logic_name);
my $rule = $rulemanager->rule_adaptor->fetch_by_goal($analysis);
my %completed_accumulator_analyses = %{$rulemanager->fetch_complete_accumulators};
if (@$input_ids == 0) {
throw("Can't do anything I have no input ids");
}
if ($force){
warning("You are forcing this job to be run without checking if ".
"the rules allow it or if it has already run/is running - ".
"are you sure you want to do this?\n");
}
print STDERR "Trying to submit jobs for ".$analysis->logic_name."\n"
if($verbose);
INPUT_ID:
foreach my $input_id (@$input_ids){
print $input_id."\n" if($verbose);
if ($term_sig) {
print "Got term signal\n" if($verbose);
last INPUT_ID;
}
if ($force) {
my $job = $rulemanager->create_and_store_job($input_id, $analysis);
$job->batch_runRemote;
} else {
my @anals = @{$rulemanager->stateinfocontainer->
fetch_analysis_by_input_id($input_id)};
my $anal = $rule->check_for_analysis
(\@anals, $analysis->input_id_type,
\%completed_accumulator_analyses, $verbose);
if(UNIVERSAL::isa($anal,'Bio::EnsEMBL::Pipeline::Analysis')){
$rulemanager->can_job_run($input_id, $analysis);
}
}
}
$rulemanager->cleanup_waiting_jobs();
$rulemanager->db->pipeline_unlock;
sub setup_input_ids {
my ($analysis, $rulemanager, $ids_to_run, $make_input_ids,
$slice, $file, $translation_id, $single, $slice_size,
$slice_overlap, $dir, $regex, $name, $seq_level, $top_level,
$verbose, $logic_name) = @_;
if ($ids_to_run) {
my $id_hash = $rulemanager->read_id_file($ids_to_run);
my @types = keys(%$id_hash);
if (scalar(@types) != 1) {
throw("You have passed in a file with ".@types." input ".
"id types something funny is going on");
}
if ($types[0] ne $analysis->input_id_type) {
throw("If your input_ids aren't the same tupe are your analysis ".
$types[0]." compared to ".$analysis->input_id_type." this ".
"won't work");
}
my @ids = keys(%{$id_hash->{$analysis->input_id_type}});
return \@ids;
} elsif ($make_input_ids) {
print STDERR "Making input ids\n" if($verbose);
my $ids = make_input_ids($slice, $file, $translation_id, $single,
$slice_size, $slice_overlap, $dir,
$regex, $name, $seq_level, $top_level,
$verbose, $logic_name, $rulemanager->db);
return $ids;
} else {
my @ids = @{$rulemanager->stateinfocontainer
->list_input_ids_by_type($analysis->input_id_type)};
return \@ids;
}
}
sub termhandler {
$term_sig = 1;
}
sub make_input_ids {
my ($slice, $file, $translation_id, $single, $slice_size,
$slice_overlap, $dir, $regex, $name, $seq_level, $top_level,
$verbose, $logic_name, $db) = @_;
my $inputIDFactory = new Bio::EnsEMBL::Pipeline::Utils::InputIDFactory
(
-db => $db,
-slice => $slice,
-single => $single,
-file => $file,
-translation_id => $translation_id,
-seq_level => $seq_level,
-top_level => $top_level,
-dir => $dir,
-regex => $regex,
-single_name => $name,
-verbose => $verbose,
-logic_name => $logic_name,
-coord_system => $coord_system,
-coord_system_version => $coord_system_version,
-slice_size => $slice_size,
-slice_overlaps => $slice_overlap,
);
my $ids = $inputIDFactory->generate_input_ids;
return $ids;
}
sub useage {
my ($command_args) = @_;
print "Your commandline was :\n".
"job_submission.pl ".join("\t", @$command_args), "\n\n";
print "job_submission.pl is a script for submitting a single analysis ".
"jobs to the farm using the job system as standard it derives its ".
"input ids based on the input_id type of the analysis \n\n";
print "Everytime you run job_submission.pl you must pass in the ".
"database options\n\n";
print "-dbhost The host where the pipeline database is.\n".
"-dbport The port.\n".
"-dbuser The user to connect as.\n".
"-dbpass The password to use.\n".
"-dbname The database name.\n\n";
print "This script also requires an analysis object to already exist ".
"in the database and its logic_name is passed in with\n".
"-logic_name\n\n";
print "Other options you may find useful are:\n\n".
"-force which forces the script to ignore currently running jobs and ".
" the rules and just submits jobs with the input_ids specified\n".
"-input_id_file a file in the format input_id input_id_type which ".
"specified which input_ids to run the analysis with\n";
print "-make_input_ids which tells the script to create a set of ".
"input_ids using the Bio::EnsEMBL::Pipeline::Utils::InputIDFactory\n".
"this also requires a series of other options which can been seen ".
"if you run the script with -perldoc\n\n";
print " -perldocs will print out the perl documentation of this module ".
"and -help will print out the help again \n";
exit(0);
}
sub perldoc{
exec('perldoc', $0);
exit(0);
}
=pod
=head1 NAME
job_submission.pl
=head1 SYNOPSIS
job_submission.pl a script for submitting a single analysis' jobs
to the pipeline.
=head1 DESCRIPTION
this script will run a single analyses jobs through the pipeline
it will check rules and retry failed jobs but it doesn't have too.
It can either read the input_ids from a file or create the
input_ids using the InputIDFactory
=head OPTIONS
DB Connection Details
-dbhost The host where the pipeline database is.
-dbport The port.
-dbuser The user to connect as.
-dbpass The password to use.
-dbname The database name.
Analysis Details
-logic_name the logic_name of the analysis you want to run. You
must have this analysis already in the analysis table and it
must has an input_id_type and module specified. This logic
name is also used to fetch the rule. This analysis should be
the goal of the rule you want executed
-force this forces the script to ignore the rule and just create
and submit the jobs with no regard to whether they should be run
or are already running. If this option is specifed jobs already
in the pipeline running this analysis are ignored and failed
jobs of this type are not retried
RuleManager details
-utils_verbosity, this affects the amount of chatter you recieve from
the core module Bio::EnsEMBL::Utils::Exception. By default this is set
to WARNING which means you see the prints from warnings and throws but
not from deprecate and info calls. See the modules itself for more
information
-verbose, toggles whether some print statements are printed in this
script and in the RuleManager
-config_sanity this is a test to check certain values are defined in
your General.pm and BatchQueue.pm config files
Some of the follow options can intially be set in either the General.pm
of BatchQueue.pm config files see docs of rulemanager.pl for more
details
-queue_manager this specifies which
Bio::EnsEMBL::Pipeline::BatchSubmission module is used by
Bio::EnsEMBL::Pipeline::RuleManager
-output_dir the path to an output directory when the jobs stderr and
stdout will be redirected. This alwaysoverrides the values specified in
BatchQueue
-mark_awol toggle to specify whether to mark jobs as awol if lost from
the submission system this can apply strain to the job table. It is on
by default and can be switched off using the -nomark_awol flag
-runner path to a default runner script. This will override what is
set in General.pm but will be overidden by any analyses specific
settings found in BatchQueue.pm
-rename_on_retry a toggle to specify whether to rename stderr/stdout
file when a job is retried as otherwise the submission system just
cats them all together
Input id details
If you specify no options to do with input_ids it will just take
all the input_ids from the input_id_analysis table with an appropriate
input_id_type as specified by the analysis object
-input_id_file this is a text file in the format input_id input_id_type
if used these are the only input_ids which are considered
-make_input_ids this indicates you want to use the InputIDFactory
to make the input_ids for you. If you specify this option it
makes you use force too so the rules are ignored as if the
analysis needs its input ids created it won't pass a rule check'
These are options needed for the manufacture of input ids
-slice signals to insert slice type input ids using
the format
coord_system:coord_system_version:seq_region_name:start:end:strand
-coord_system the coordinate system you want slices in
-coord_system_version the version of the coord system you want
-slice_size the size to make the slice ids
-slice_overlap the slice overlap (non-overlapping by default)
-file if the input_ids are to be a list of filenames from a directory
-dir the directory to read the filenames from
-file_regex a regex to impose on the filenames before using them
-single if you just want a single dummy input_id ie for genome wide
analyses
-single_name , by default this is genome but you can specify something
different here, for example for protein annotation jobs which use the
whole proteome this must be proteome
-translation_ids if you want your input ids to be translation ids
-verbose if you want more information about what the script is doing
-input_id_type if you want to specific an input_id_type not already
used by the analysis object
-insert_analysis if you want to insert an analysis object if it doesn't
already exist in the database'
-seq_level if you want the ids for the seq_level seq_regions, can
work with slice_size but the -slice options isn't required'
-top_level this will fetch all the non_redundant pieces in
the database this may produce ids which are a mixture of different
coordinate systems, if -coord_system_version is specified it will
be ignored
Misc options
-help will print out the standard help
-perldoc will print out these perl docs
=head1 CONTACT
Post general queries to <http://lists.ensembl.org/mailman/listinfo/dev>
=head1 EXAMPLES
./job_submission.pl -dbhost myhost -dbuser user -dbpass password -dbport 3306
-dbname my_pipeline_database -logic_name blast
this would run the analysis specified with logic_name blast using
input_ids fetched using blasts input_id type and checking the rules and running
jobs to ensure the analysis can be run
./job_submission.pl -dbhost myhost -dbuser user -dbpass password -dbport 3306
-dbname my_pipeline_database -logic_name blast -force
as above this would still the same input_ids and run the same analysis but this
time it would ignore running jobs and rules
./job_submission.pl -dbhost myhost -dbuser user -dbpass password -dbport 3306
-dbname my_pipeline_database -logic_name blast -input_id_file id_list
this time would consider only the input_ids specified in the file
./job_submission.pl -dbhost myhost -dbuser user -dbpass password -dbport 3306
-dbname my_pipeline_database -logic_name blast -make_input_ids -slice
-coord_system contig
this time it would make the input_ids based on the contig input_id system
note by specifying make_input_ids this automatically switched -force on
as if you need to manufacture your input_ids it is unlikely that the analysis
will pass any rule checks
=head1 SEE ALSO
rulemanager.pl
lsf_submission.pl and
pipeline_sanity.pl all here in ensembl-pipeline/scripts
awol_check.pl
and also using_the_ensembl_pipeline.txt in the ensembl-docs cvs module
=cut
| Ensembl/ensembl-pipeline | scripts/job_submission.pl | Perl | apache-2.0 | 18,882 |
package VMOMI::HostFileSystemVolume;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['type', undef, 0, ],
['name', undef, 0, ],
['capacity', undef, 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/HostFileSystemVolume.pm | Perl | apache-2.0 | 473 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% This file is part of VivoMind Prolog Unicode Resources
% SPDX-License-Identifier: CC0-1.0
%
% VivoMind Prolog Unicode Resources is free software distributed using the
% Creative Commons CC0 1.0 Universal (CC0 1.0) - Public Domain Dedication
% license
%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% Last modified: March 19, 2012
%
% Original Unicode file header comments follow
/*
# HangulSyllableType-6.1.0.txt
# Date: 2011-08-25, 00:02:18 GMT [MD]
#
# Unicode Character Database
# Copyright (c) 1991-2011 Unicode, Inc.
# For terms of use, see http://www.unicode.org/terms_of_use.html
# For documentation, see http://www.unicode.org/reports/tr44/
*/
% ================================================
% Property: Hangul_Syllable_Type
% All code points not explicitly listed for Hangul_Syllable_Type
% have the value Not_Applicable (NA).
% @missing: 0000..10FFFF; Not_Applicable
% ================================================
unicode_hangul_syllable_type(CodePoint, Type) :-
( var(CodePoint) ->
% generate code point pairs
unicode_hangul_syllable_type(CodePointStart, CodePointEnd, Type),
between(CodePointStart, CodePointEnd, CodePoint)
; % try first-argument indexing first
unicode_hangul_syllable_type(CodePoint, _, CodePointType) ->
Type = CodePointType
; % look for a code point range that includes the given code point
unicode_hangul_syllable_type(CodePointStart, CodePointEnd, CodePointType),
between(CodePointStart, CodePointEnd, CodePoint) ->
Type = CodePointType
; % missing code point; see original comment above
between(0x0000, 0x10FFFF, CodePoint),
Type = 'NA'
).
% Hangul_Syllable_Type=Leading_Jamo
unicode_hangul_syllable_type(0x1100, 0x115F, 'L'). % Lo [96] HANGUL CHOSEONG KIYEOK..HANGUL CHOSEONG FILLER
unicode_hangul_syllable_type(0xA960, 0xA97C, 'L'). % Lo [29] HANGUL CHOSEONG TIKEUT-MIEUM..HANGUL CHOSEONG SSANGYEORINHIEUH
% Total code points: 125
% ================================================
% Hangul_Syllable_Type=Vowel_Jamo
unicode_hangul_syllable_type(0x1160, 0x11A7, 'V'). % Lo [72] HANGUL JUNGSEONG FILLER..HANGUL JUNGSEONG O-YAE
unicode_hangul_syllable_type(0xD7B0, 0xD7C6, 'V'). % Lo [23] HANGUL JUNGSEONG O-YEO..HANGUL JUNGSEONG ARAEA-E
% Total code points: 95
% ================================================
% Hangul_Syllable_Type=Trailing_Jamo
unicode_hangul_syllable_type(0x11A8, 0x11FF, 'T'). % Lo [88] HANGUL JONGSEONG KIYEOK..HANGUL JONGSEONG SSANGNIEUN
unicode_hangul_syllable_type(0xD7CB, 0xD7FB, 'T'). % Lo [49] HANGUL JONGSEONG NIEUN-RIEUL..HANGUL JONGSEONG PHIEUPH-THIEUTH
% Total code points: 137
% ================================================
% Hangul_Syllable_Type=LV_Syllable
unicode_hangul_syllable_type(0xAC00, 0xAC00, 'LV'). % Lo HANGUL SYLLABLE GA
unicode_hangul_syllable_type(0xAC1C, 0xAC1C, 'LV'). % Lo HANGUL SYLLABLE GAE
unicode_hangul_syllable_type(0xAC38, 0xAC38, 'LV'). % Lo HANGUL SYLLABLE GYA
unicode_hangul_syllable_type(0xAC54, 0xAC54, 'LV'). % Lo HANGUL SYLLABLE GYAE
unicode_hangul_syllable_type(0xAC70, 0xAC70, 'LV'). % Lo HANGUL SYLLABLE GEO
unicode_hangul_syllable_type(0xAC8C, 0xAC8C, 'LV'). % Lo HANGUL SYLLABLE GE
unicode_hangul_syllable_type(0xACA8, 0xACA8, 'LV'). % Lo HANGUL SYLLABLE GYEO
unicode_hangul_syllable_type(0xACC4, 0xACC4, 'LV'). % Lo HANGUL SYLLABLE GYE
unicode_hangul_syllable_type(0xACE0, 0xACE0, 'LV'). % Lo HANGUL SYLLABLE GO
unicode_hangul_syllable_type(0xACFC, 0xACFC, 'LV'). % Lo HANGUL SYLLABLE GWA
unicode_hangul_syllable_type(0xAD18, 0xAD18, 'LV'). % Lo HANGUL SYLLABLE GWAE
unicode_hangul_syllable_type(0xAD34, 0xAD34, 'LV'). % Lo HANGUL SYLLABLE GOE
unicode_hangul_syllable_type(0xAD50, 0xAD50, 'LV'). % Lo HANGUL SYLLABLE GYO
unicode_hangul_syllable_type(0xAD6C, 0xAD6C, 'LV'). % Lo HANGUL SYLLABLE GU
unicode_hangul_syllable_type(0xAD88, 0xAD88, 'LV'). % Lo HANGUL SYLLABLE GWEO
unicode_hangul_syllable_type(0xADA4, 0xADA4, 'LV'). % Lo HANGUL SYLLABLE GWE
unicode_hangul_syllable_type(0xADC0, 0xADC0, 'LV'). % Lo HANGUL SYLLABLE GWI
unicode_hangul_syllable_type(0xADDC, 0xADDC, 'LV'). % Lo HANGUL SYLLABLE GYU
unicode_hangul_syllable_type(0xADF8, 0xADF8, 'LV'). % Lo HANGUL SYLLABLE GEU
unicode_hangul_syllable_type(0xAE14, 0xAE14, 'LV'). % Lo HANGUL SYLLABLE GYI
unicode_hangul_syllable_type(0xAE30, 0xAE30, 'LV'). % Lo HANGUL SYLLABLE GI
unicode_hangul_syllable_type(0xAE4C, 0xAE4C, 'LV'). % Lo HANGUL SYLLABLE GGA
unicode_hangul_syllable_type(0xAE68, 0xAE68, 'LV'). % Lo HANGUL SYLLABLE GGAE
unicode_hangul_syllable_type(0xAE84, 0xAE84, 'LV'). % Lo HANGUL SYLLABLE GGYA
unicode_hangul_syllable_type(0xAEA0, 0xAEA0, 'LV'). % Lo HANGUL SYLLABLE GGYAE
unicode_hangul_syllable_type(0xAEBC, 0xAEBC, 'LV'). % Lo HANGUL SYLLABLE GGEO
unicode_hangul_syllable_type(0xAED8, 0xAED8, 'LV'). % Lo HANGUL SYLLABLE GGE
unicode_hangul_syllable_type(0xAEF4, 0xAEF4, 'LV'). % Lo HANGUL SYLLABLE GGYEO
unicode_hangul_syllable_type(0xAF10, 0xAF10, 'LV'). % Lo HANGUL SYLLABLE GGYE
unicode_hangul_syllable_type(0xAF2C, 0xAF2C, 'LV'). % Lo HANGUL SYLLABLE GGO
unicode_hangul_syllable_type(0xAF48, 0xAF48, 'LV'). % Lo HANGUL SYLLABLE GGWA
unicode_hangul_syllable_type(0xAF64, 0xAF64, 'LV'). % Lo HANGUL SYLLABLE GGWAE
unicode_hangul_syllable_type(0xAF80, 0xAF80, 'LV'). % Lo HANGUL SYLLABLE GGOE
unicode_hangul_syllable_type(0xAF9C, 0xAF9C, 'LV'). % Lo HANGUL SYLLABLE GGYO
unicode_hangul_syllable_type(0xAFB8, 0xAFB8, 'LV'). % Lo HANGUL SYLLABLE GGU
unicode_hangul_syllable_type(0xAFD4, 0xAFD4, 'LV'). % Lo HANGUL SYLLABLE GGWEO
unicode_hangul_syllable_type(0xAFF0, 0xAFF0, 'LV'). % Lo HANGUL SYLLABLE GGWE
unicode_hangul_syllable_type(0xB00C, 0xB00C, 'LV'). % Lo HANGUL SYLLABLE GGWI
unicode_hangul_syllable_type(0xB028, 0xB028, 'LV'). % Lo HANGUL SYLLABLE GGYU
unicode_hangul_syllable_type(0xB044, 0xB044, 'LV'). % Lo HANGUL SYLLABLE GGEU
unicode_hangul_syllable_type(0xB060, 0xB060, 'LV'). % Lo HANGUL SYLLABLE GGYI
unicode_hangul_syllable_type(0xB07C, 0xB07C, 'LV'). % Lo HANGUL SYLLABLE GGI
unicode_hangul_syllable_type(0xB098, 0xB098, 'LV'). % Lo HANGUL SYLLABLE NA
unicode_hangul_syllable_type(0xB0B4, 0xB0B4, 'LV'). % Lo HANGUL SYLLABLE NAE
unicode_hangul_syllable_type(0xB0D0, 0xB0D0, 'LV'). % Lo HANGUL SYLLABLE NYA
unicode_hangul_syllable_type(0xB0EC, 0xB0EC, 'LV'). % Lo HANGUL SYLLABLE NYAE
unicode_hangul_syllable_type(0xB108, 0xB108, 'LV'). % Lo HANGUL SYLLABLE NEO
unicode_hangul_syllable_type(0xB124, 0xB124, 'LV'). % Lo HANGUL SYLLABLE NE
unicode_hangul_syllable_type(0xB140, 0xB140, 'LV'). % Lo HANGUL SYLLABLE NYEO
unicode_hangul_syllable_type(0xB15C, 0xB15C, 'LV'). % Lo HANGUL SYLLABLE NYE
unicode_hangul_syllable_type(0xB178, 0xB178, 'LV'). % Lo HANGUL SYLLABLE NO
unicode_hangul_syllable_type(0xB194, 0xB194, 'LV'). % Lo HANGUL SYLLABLE NWA
unicode_hangul_syllable_type(0xB1B0, 0xB1B0, 'LV'). % Lo HANGUL SYLLABLE NWAE
unicode_hangul_syllable_type(0xB1CC, 0xB1CC, 'LV'). % Lo HANGUL SYLLABLE NOE
unicode_hangul_syllable_type(0xB1E8, 0xB1E8, 'LV'). % Lo HANGUL SYLLABLE NYO
unicode_hangul_syllable_type(0xB204, 0xB204, 'LV'). % Lo HANGUL SYLLABLE NU
unicode_hangul_syllable_type(0xB220, 0xB220, 'LV'). % Lo HANGUL SYLLABLE NWEO
unicode_hangul_syllable_type(0xB23C, 0xB23C, 'LV'). % Lo HANGUL SYLLABLE NWE
unicode_hangul_syllable_type(0xB258, 0xB258, 'LV'). % Lo HANGUL SYLLABLE NWI
unicode_hangul_syllable_type(0xB274, 0xB274, 'LV'). % Lo HANGUL SYLLABLE NYU
unicode_hangul_syllable_type(0xB290, 0xB290, 'LV'). % Lo HANGUL SYLLABLE NEU
unicode_hangul_syllable_type(0xB2AC, 0xB2AC, 'LV'). % Lo HANGUL SYLLABLE NYI
unicode_hangul_syllable_type(0xB2C8, 0xB2C8, 'LV'). % Lo HANGUL SYLLABLE NI
unicode_hangul_syllable_type(0xB2E4, 0xB2E4, 'LV'). % Lo HANGUL SYLLABLE DA
unicode_hangul_syllable_type(0xB300, 0xB300, 'LV'). % Lo HANGUL SYLLABLE DAE
unicode_hangul_syllable_type(0xB31C, 0xB31C, 'LV'). % Lo HANGUL SYLLABLE DYA
unicode_hangul_syllable_type(0xB338, 0xB338, 'LV'). % Lo HANGUL SYLLABLE DYAE
unicode_hangul_syllable_type(0xB354, 0xB354, 'LV'). % Lo HANGUL SYLLABLE DEO
unicode_hangul_syllable_type(0xB370, 0xB370, 'LV'). % Lo HANGUL SYLLABLE DE
unicode_hangul_syllable_type(0xB38C, 0xB38C, 'LV'). % Lo HANGUL SYLLABLE DYEO
unicode_hangul_syllable_type(0xB3A8, 0xB3A8, 'LV'). % Lo HANGUL SYLLABLE DYE
unicode_hangul_syllable_type(0xB3C4, 0xB3C4, 'LV'). % Lo HANGUL SYLLABLE DO
unicode_hangul_syllable_type(0xB3E0, 0xB3E0, 'LV'). % Lo HANGUL SYLLABLE DWA
unicode_hangul_syllable_type(0xB3FC, 0xB3FC, 'LV'). % Lo HANGUL SYLLABLE DWAE
unicode_hangul_syllable_type(0xB418, 0xB418, 'LV'). % Lo HANGUL SYLLABLE DOE
unicode_hangul_syllable_type(0xB434, 0xB434, 'LV'). % Lo HANGUL SYLLABLE DYO
unicode_hangul_syllable_type(0xB450, 0xB450, 'LV'). % Lo HANGUL SYLLABLE DU
unicode_hangul_syllable_type(0xB46C, 0xB46C, 'LV'). % Lo HANGUL SYLLABLE DWEO
unicode_hangul_syllable_type(0xB488, 0xB488, 'LV'). % Lo HANGUL SYLLABLE DWE
unicode_hangul_syllable_type(0xB4A4, 0xB4A4, 'LV'). % Lo HANGUL SYLLABLE DWI
unicode_hangul_syllable_type(0xB4C0, 0xB4C0, 'LV'). % Lo HANGUL SYLLABLE DYU
unicode_hangul_syllable_type(0xB4DC, 0xB4DC, 'LV'). % Lo HANGUL SYLLABLE DEU
unicode_hangul_syllable_type(0xB4F8, 0xB4F8, 'LV'). % Lo HANGUL SYLLABLE DYI
unicode_hangul_syllable_type(0xB514, 0xB514, 'LV'). % Lo HANGUL SYLLABLE DI
unicode_hangul_syllable_type(0xB530, 0xB530, 'LV'). % Lo HANGUL SYLLABLE DDA
unicode_hangul_syllable_type(0xB54C, 0xB54C, 'LV'). % Lo HANGUL SYLLABLE DDAE
unicode_hangul_syllable_type(0xB568, 0xB568, 'LV'). % Lo HANGUL SYLLABLE DDYA
unicode_hangul_syllable_type(0xB584, 0xB584, 'LV'). % Lo HANGUL SYLLABLE DDYAE
unicode_hangul_syllable_type(0xB5A0, 0xB5A0, 'LV'). % Lo HANGUL SYLLABLE DDEO
unicode_hangul_syllable_type(0xB5BC, 0xB5BC, 'LV'). % Lo HANGUL SYLLABLE DDE
unicode_hangul_syllable_type(0xB5D8, 0xB5D8, 'LV'). % Lo HANGUL SYLLABLE DDYEO
unicode_hangul_syllable_type(0xB5F4, 0xB5F4, 'LV'). % Lo HANGUL SYLLABLE DDYE
unicode_hangul_syllable_type(0xB610, 0xB610, 'LV'). % Lo HANGUL SYLLABLE DDO
unicode_hangul_syllable_type(0xB62C, 0xB62C, 'LV'). % Lo HANGUL SYLLABLE DDWA
unicode_hangul_syllable_type(0xB648, 0xB648, 'LV'). % Lo HANGUL SYLLABLE DDWAE
unicode_hangul_syllable_type(0xB664, 0xB664, 'LV'). % Lo HANGUL SYLLABLE DDOE
unicode_hangul_syllable_type(0xB680, 0xB680, 'LV'). % Lo HANGUL SYLLABLE DDYO
unicode_hangul_syllable_type(0xB69C, 0xB69C, 'LV'). % Lo HANGUL SYLLABLE DDU
unicode_hangul_syllable_type(0xB6B8, 0xB6B8, 'LV'). % Lo HANGUL SYLLABLE DDWEO
unicode_hangul_syllable_type(0xB6D4, 0xB6D4, 'LV'). % Lo HANGUL SYLLABLE DDWE
unicode_hangul_syllable_type(0xB6F0, 0xB6F0, 'LV'). % Lo HANGUL SYLLABLE DDWI
unicode_hangul_syllable_type(0xB70C, 0xB70C, 'LV'). % Lo HANGUL SYLLABLE DDYU
unicode_hangul_syllable_type(0xB728, 0xB728, 'LV'). % Lo HANGUL SYLLABLE DDEU
unicode_hangul_syllable_type(0xB744, 0xB744, 'LV'). % Lo HANGUL SYLLABLE DDYI
unicode_hangul_syllable_type(0xB760, 0xB760, 'LV'). % Lo HANGUL SYLLABLE DDI
unicode_hangul_syllable_type(0xB77C, 0xB77C, 'LV'). % Lo HANGUL SYLLABLE RA
unicode_hangul_syllable_type(0xB798, 0xB798, 'LV'). % Lo HANGUL SYLLABLE RAE
unicode_hangul_syllable_type(0xB7B4, 0xB7B4, 'LV'). % Lo HANGUL SYLLABLE RYA
unicode_hangul_syllable_type(0xB7D0, 0xB7D0, 'LV'). % Lo HANGUL SYLLABLE RYAE
unicode_hangul_syllable_type(0xB7EC, 0xB7EC, 'LV'). % Lo HANGUL SYLLABLE REO
unicode_hangul_syllable_type(0xB808, 0xB808, 'LV'). % Lo HANGUL SYLLABLE RE
unicode_hangul_syllable_type(0xB824, 0xB824, 'LV'). % Lo HANGUL SYLLABLE RYEO
unicode_hangul_syllable_type(0xB840, 0xB840, 'LV'). % Lo HANGUL SYLLABLE RYE
unicode_hangul_syllable_type(0xB85C, 0xB85C, 'LV'). % Lo HANGUL SYLLABLE RO
unicode_hangul_syllable_type(0xB878, 0xB878, 'LV'). % Lo HANGUL SYLLABLE RWA
unicode_hangul_syllable_type(0xB894, 0xB894, 'LV'). % Lo HANGUL SYLLABLE RWAE
unicode_hangul_syllable_type(0xB8B0, 0xB8B0, 'LV'). % Lo HANGUL SYLLABLE ROE
unicode_hangul_syllable_type(0xB8CC, 0xB8CC, 'LV'). % Lo HANGUL SYLLABLE RYO
unicode_hangul_syllable_type(0xB8E8, 0xB8E8, 'LV'). % Lo HANGUL SYLLABLE RU
unicode_hangul_syllable_type(0xB904, 0xB904, 'LV'). % Lo HANGUL SYLLABLE RWEO
unicode_hangul_syllable_type(0xB920, 0xB920, 'LV'). % Lo HANGUL SYLLABLE RWE
unicode_hangul_syllable_type(0xB93C, 0xB93C, 'LV'). % Lo HANGUL SYLLABLE RWI
unicode_hangul_syllable_type(0xB958, 0xB958, 'LV'). % Lo HANGUL SYLLABLE RYU
unicode_hangul_syllable_type(0xB974, 0xB974, 'LV'). % Lo HANGUL SYLLABLE REU
unicode_hangul_syllable_type(0xB990, 0xB990, 'LV'). % Lo HANGUL SYLLABLE RYI
unicode_hangul_syllable_type(0xB9AC, 0xB9AC, 'LV'). % Lo HANGUL SYLLABLE RI
unicode_hangul_syllable_type(0xB9C8, 0xB9C8, 'LV'). % Lo HANGUL SYLLABLE MA
unicode_hangul_syllable_type(0xB9E4, 0xB9E4, 'LV'). % Lo HANGUL SYLLABLE MAE
unicode_hangul_syllable_type(0xBA00, 0xBA00, 'LV'). % Lo HANGUL SYLLABLE MYA
unicode_hangul_syllable_type(0xBA1C, 0xBA1C, 'LV'). % Lo HANGUL SYLLABLE MYAE
unicode_hangul_syllable_type(0xBA38, 0xBA38, 'LV'). % Lo HANGUL SYLLABLE MEO
unicode_hangul_syllable_type(0xBA54, 0xBA54, 'LV'). % Lo HANGUL SYLLABLE ME
unicode_hangul_syllable_type(0xBA70, 0xBA70, 'LV'). % Lo HANGUL SYLLABLE MYEO
unicode_hangul_syllable_type(0xBA8C, 0xBA8C, 'LV'). % Lo HANGUL SYLLABLE MYE
unicode_hangul_syllable_type(0xBAA8, 0xBAA8, 'LV'). % Lo HANGUL SYLLABLE MO
unicode_hangul_syllable_type(0xBAC4, 0xBAC4, 'LV'). % Lo HANGUL SYLLABLE MWA
unicode_hangul_syllable_type(0xBAE0, 0xBAE0, 'LV'). % Lo HANGUL SYLLABLE MWAE
unicode_hangul_syllable_type(0xBAFC, 0xBAFC, 'LV'). % Lo HANGUL SYLLABLE MOE
unicode_hangul_syllable_type(0xBB18, 0xBB18, 'LV'). % Lo HANGUL SYLLABLE MYO
unicode_hangul_syllable_type(0xBB34, 0xBB34, 'LV'). % Lo HANGUL SYLLABLE MU
unicode_hangul_syllable_type(0xBB50, 0xBB50, 'LV'). % Lo HANGUL SYLLABLE MWEO
unicode_hangul_syllable_type(0xBB6C, 0xBB6C, 'LV'). % Lo HANGUL SYLLABLE MWE
unicode_hangul_syllable_type(0xBB88, 0xBB88, 'LV'). % Lo HANGUL SYLLABLE MWI
unicode_hangul_syllable_type(0xBBA4, 0xBBA4, 'LV'). % Lo HANGUL SYLLABLE MYU
unicode_hangul_syllable_type(0xBBC0, 0xBBC0, 'LV'). % Lo HANGUL SYLLABLE MEU
unicode_hangul_syllable_type(0xBBDC, 0xBBDC, 'LV'). % Lo HANGUL SYLLABLE MYI
unicode_hangul_syllable_type(0xBBF8, 0xBBF8, 'LV'). % Lo HANGUL SYLLABLE MI
unicode_hangul_syllable_type(0xBC14, 0xBC14, 'LV'). % Lo HANGUL SYLLABLE BA
unicode_hangul_syllable_type(0xBC30, 0xBC30, 'LV'). % Lo HANGUL SYLLABLE BAE
unicode_hangul_syllable_type(0xBC4C, 0xBC4C, 'LV'). % Lo HANGUL SYLLABLE BYA
unicode_hangul_syllable_type(0xBC68, 0xBC68, 'LV'). % Lo HANGUL SYLLABLE BYAE
unicode_hangul_syllable_type(0xBC84, 0xBC84, 'LV'). % Lo HANGUL SYLLABLE BEO
unicode_hangul_syllable_type(0xBCA0, 0xBCA0, 'LV'). % Lo HANGUL SYLLABLE BE
unicode_hangul_syllable_type(0xBCBC, 0xBCBC, 'LV'). % Lo HANGUL SYLLABLE BYEO
unicode_hangul_syllable_type(0xBCD8, 0xBCD8, 'LV'). % Lo HANGUL SYLLABLE BYE
unicode_hangul_syllable_type(0xBCF4, 0xBCF4, 'LV'). % Lo HANGUL SYLLABLE BO
unicode_hangul_syllable_type(0xBD10, 0xBD10, 'LV'). % Lo HANGUL SYLLABLE BWA
unicode_hangul_syllable_type(0xBD2C, 0xBD2C, 'LV'). % Lo HANGUL SYLLABLE BWAE
unicode_hangul_syllable_type(0xBD48, 0xBD48, 'LV'). % Lo HANGUL SYLLABLE BOE
unicode_hangul_syllable_type(0xBD64, 0xBD64, 'LV'). % Lo HANGUL SYLLABLE BYO
unicode_hangul_syllable_type(0xBD80, 0xBD80, 'LV'). % Lo HANGUL SYLLABLE BU
unicode_hangul_syllable_type(0xBD9C, 0xBD9C, 'LV'). % Lo HANGUL SYLLABLE BWEO
unicode_hangul_syllable_type(0xBDB8, 0xBDB8, 'LV'). % Lo HANGUL SYLLABLE BWE
unicode_hangul_syllable_type(0xBDD4, 0xBDD4, 'LV'). % Lo HANGUL SYLLABLE BWI
unicode_hangul_syllable_type(0xBDF0, 0xBDF0, 'LV'). % Lo HANGUL SYLLABLE BYU
unicode_hangul_syllable_type(0xBE0C, 0xBE0C, 'LV'). % Lo HANGUL SYLLABLE BEU
unicode_hangul_syllable_type(0xBE28, 0xBE28, 'LV'). % Lo HANGUL SYLLABLE BYI
unicode_hangul_syllable_type(0xBE44, 0xBE44, 'LV'). % Lo HANGUL SYLLABLE BI
unicode_hangul_syllable_type(0xBE60, 0xBE60, 'LV'). % Lo HANGUL SYLLABLE BBA
unicode_hangul_syllable_type(0xBE7C, 0xBE7C, 'LV'). % Lo HANGUL SYLLABLE BBAE
unicode_hangul_syllable_type(0xBE98, 0xBE98, 'LV'). % Lo HANGUL SYLLABLE BBYA
unicode_hangul_syllable_type(0xBEB4, 0xBEB4, 'LV'). % Lo HANGUL SYLLABLE BBYAE
unicode_hangul_syllable_type(0xBED0, 0xBED0, 'LV'). % Lo HANGUL SYLLABLE BBEO
unicode_hangul_syllable_type(0xBEEC, 0xBEEC, 'LV'). % Lo HANGUL SYLLABLE BBE
unicode_hangul_syllable_type(0xBF08, 0xBF08, 'LV'). % Lo HANGUL SYLLABLE BBYEO
unicode_hangul_syllable_type(0xBF24, 0xBF24, 'LV'). % Lo HANGUL SYLLABLE BBYE
unicode_hangul_syllable_type(0xBF40, 0xBF40, 'LV'). % Lo HANGUL SYLLABLE BBO
unicode_hangul_syllable_type(0xBF5C, 0xBF5C, 'LV'). % Lo HANGUL SYLLABLE BBWA
unicode_hangul_syllable_type(0xBF78, 0xBF78, 'LV'). % Lo HANGUL SYLLABLE BBWAE
unicode_hangul_syllable_type(0xBF94, 0xBF94, 'LV'). % Lo HANGUL SYLLABLE BBOE
unicode_hangul_syllable_type(0xBFB0, 0xBFB0, 'LV'). % Lo HANGUL SYLLABLE BBYO
unicode_hangul_syllable_type(0xBFCC, 0xBFCC, 'LV'). % Lo HANGUL SYLLABLE BBU
unicode_hangul_syllable_type(0xBFE8, 0xBFE8, 'LV'). % Lo HANGUL SYLLABLE BBWEO
unicode_hangul_syllable_type(0xC004, 0xC004, 'LV'). % Lo HANGUL SYLLABLE BBWE
unicode_hangul_syllable_type(0xC020, 0xC020, 'LV'). % Lo HANGUL SYLLABLE BBWI
unicode_hangul_syllable_type(0xC03C, 0xC03C, 'LV'). % Lo HANGUL SYLLABLE BBYU
unicode_hangul_syllable_type(0xC058, 0xC058, 'LV'). % Lo HANGUL SYLLABLE BBEU
unicode_hangul_syllable_type(0xC074, 0xC074, 'LV'). % Lo HANGUL SYLLABLE BBYI
unicode_hangul_syllable_type(0xC090, 0xC090, 'LV'). % Lo HANGUL SYLLABLE BBI
unicode_hangul_syllable_type(0xC0AC, 0xC0AC, 'LV'). % Lo HANGUL SYLLABLE SA
unicode_hangul_syllable_type(0xC0C8, 0xC0C8, 'LV'). % Lo HANGUL SYLLABLE SAE
unicode_hangul_syllable_type(0xC0E4, 0xC0E4, 'LV'). % Lo HANGUL SYLLABLE SYA
unicode_hangul_syllable_type(0xC100, 0xC100, 'LV'). % Lo HANGUL SYLLABLE SYAE
unicode_hangul_syllable_type(0xC11C, 0xC11C, 'LV'). % Lo HANGUL SYLLABLE SEO
unicode_hangul_syllable_type(0xC138, 0xC138, 'LV'). % Lo HANGUL SYLLABLE SE
unicode_hangul_syllable_type(0xC154, 0xC154, 'LV'). % Lo HANGUL SYLLABLE SYEO
unicode_hangul_syllable_type(0xC170, 0xC170, 'LV'). % Lo HANGUL SYLLABLE SYE
unicode_hangul_syllable_type(0xC18C, 0xC18C, 'LV'). % Lo HANGUL SYLLABLE SO
unicode_hangul_syllable_type(0xC1A8, 0xC1A8, 'LV'). % Lo HANGUL SYLLABLE SWA
unicode_hangul_syllable_type(0xC1C4, 0xC1C4, 'LV'). % Lo HANGUL SYLLABLE SWAE
unicode_hangul_syllable_type(0xC1E0, 0xC1E0, 'LV'). % Lo HANGUL SYLLABLE SOE
unicode_hangul_syllable_type(0xC1FC, 0xC1FC, 'LV'). % Lo HANGUL SYLLABLE SYO
unicode_hangul_syllable_type(0xC218, 0xC218, 'LV'). % Lo HANGUL SYLLABLE SU
unicode_hangul_syllable_type(0xC234, 0xC234, 'LV'). % Lo HANGUL SYLLABLE SWEO
unicode_hangul_syllable_type(0xC250, 0xC250, 'LV'). % Lo HANGUL SYLLABLE SWE
unicode_hangul_syllable_type(0xC26C, 0xC26C, 'LV'). % Lo HANGUL SYLLABLE SWI
unicode_hangul_syllable_type(0xC288, 0xC288, 'LV'). % Lo HANGUL SYLLABLE SYU
unicode_hangul_syllable_type(0xC2A4, 0xC2A4, 'LV'). % Lo HANGUL SYLLABLE SEU
unicode_hangul_syllable_type(0xC2C0, 0xC2C0, 'LV'). % Lo HANGUL SYLLABLE SYI
unicode_hangul_syllable_type(0xC2DC, 0xC2DC, 'LV'). % Lo HANGUL SYLLABLE SI
unicode_hangul_syllable_type(0xC2F8, 0xC2F8, 'LV'). % Lo HANGUL SYLLABLE SSA
unicode_hangul_syllable_type(0xC314, 0xC314, 'LV'). % Lo HANGUL SYLLABLE SSAE
unicode_hangul_syllable_type(0xC330, 0xC330, 'LV'). % Lo HANGUL SYLLABLE SSYA
unicode_hangul_syllable_type(0xC34C, 0xC34C, 'LV'). % Lo HANGUL SYLLABLE SSYAE
unicode_hangul_syllable_type(0xC368, 0xC368, 'LV'). % Lo HANGUL SYLLABLE SSEO
unicode_hangul_syllable_type(0xC384, 0xC384, 'LV'). % Lo HANGUL SYLLABLE SSE
unicode_hangul_syllable_type(0xC3A0, 0xC3A0, 'LV'). % Lo HANGUL SYLLABLE SSYEO
unicode_hangul_syllable_type(0xC3BC, 0xC3BC, 'LV'). % Lo HANGUL SYLLABLE SSYE
unicode_hangul_syllable_type(0xC3D8, 0xC3D8, 'LV'). % Lo HANGUL SYLLABLE SSO
unicode_hangul_syllable_type(0xC3F4, 0xC3F4, 'LV'). % Lo HANGUL SYLLABLE SSWA
unicode_hangul_syllable_type(0xC410, 0xC410, 'LV'). % Lo HANGUL SYLLABLE SSWAE
unicode_hangul_syllable_type(0xC42C, 0xC42C, 'LV'). % Lo HANGUL SYLLABLE SSOE
unicode_hangul_syllable_type(0xC448, 0xC448, 'LV'). % Lo HANGUL SYLLABLE SSYO
unicode_hangul_syllable_type(0xC464, 0xC464, 'LV'). % Lo HANGUL SYLLABLE SSU
unicode_hangul_syllable_type(0xC480, 0xC480, 'LV'). % Lo HANGUL SYLLABLE SSWEO
unicode_hangul_syllable_type(0xC49C, 0xC49C, 'LV'). % Lo HANGUL SYLLABLE SSWE
unicode_hangul_syllable_type(0xC4B8, 0xC4B8, 'LV'). % Lo HANGUL SYLLABLE SSWI
unicode_hangul_syllable_type(0xC4D4, 0xC4D4, 'LV'). % Lo HANGUL SYLLABLE SSYU
unicode_hangul_syllable_type(0xC4F0, 0xC4F0, 'LV'). % Lo HANGUL SYLLABLE SSEU
unicode_hangul_syllable_type(0xC50C, 0xC50C, 'LV'). % Lo HANGUL SYLLABLE SSYI
unicode_hangul_syllable_type(0xC528, 0xC528, 'LV'). % Lo HANGUL SYLLABLE SSI
unicode_hangul_syllable_type(0xC544, 0xC544, 'LV'). % Lo HANGUL SYLLABLE A
unicode_hangul_syllable_type(0xC560, 0xC560, 'LV'). % Lo HANGUL SYLLABLE AE
unicode_hangul_syllable_type(0xC57C, 0xC57C, 'LV'). % Lo HANGUL SYLLABLE YA
unicode_hangul_syllable_type(0xC598, 0xC598, 'LV'). % Lo HANGUL SYLLABLE YAE
unicode_hangul_syllable_type(0xC5B4, 0xC5B4, 'LV'). % Lo HANGUL SYLLABLE EO
unicode_hangul_syllable_type(0xC5D0, 0xC5D0, 'LV'). % Lo HANGUL SYLLABLE E
unicode_hangul_syllable_type(0xC5EC, 0xC5EC, 'LV'). % Lo HANGUL SYLLABLE YEO
unicode_hangul_syllable_type(0xC608, 0xC608, 'LV'). % Lo HANGUL SYLLABLE YE
unicode_hangul_syllable_type(0xC624, 0xC624, 'LV'). % Lo HANGUL SYLLABLE O
unicode_hangul_syllable_type(0xC640, 0xC640, 'LV'). % Lo HANGUL SYLLABLE WA
unicode_hangul_syllable_type(0xC65C, 0xC65C, 'LV'). % Lo HANGUL SYLLABLE WAE
unicode_hangul_syllable_type(0xC678, 0xC678, 'LV'). % Lo HANGUL SYLLABLE OE
unicode_hangul_syllable_type(0xC694, 0xC694, 'LV'). % Lo HANGUL SYLLABLE YO
unicode_hangul_syllable_type(0xC6B0, 0xC6B0, 'LV'). % Lo HANGUL SYLLABLE U
unicode_hangul_syllable_type(0xC6CC, 0xC6CC, 'LV'). % Lo HANGUL SYLLABLE WEO
unicode_hangul_syllable_type(0xC6E8, 0xC6E8, 'LV'). % Lo HANGUL SYLLABLE WE
unicode_hangul_syllable_type(0xC704, 0xC704, 'LV'). % Lo HANGUL SYLLABLE WI
unicode_hangul_syllable_type(0xC720, 0xC720, 'LV'). % Lo HANGUL SYLLABLE YU
unicode_hangul_syllable_type(0xC73C, 0xC73C, 'LV'). % Lo HANGUL SYLLABLE EU
unicode_hangul_syllable_type(0xC758, 0xC758, 'LV'). % Lo HANGUL SYLLABLE YI
unicode_hangul_syllable_type(0xC774, 0xC774, 'LV'). % Lo HANGUL SYLLABLE I
unicode_hangul_syllable_type(0xC790, 0xC790, 'LV'). % Lo HANGUL SYLLABLE JA
unicode_hangul_syllable_type(0xC7AC, 0xC7AC, 'LV'). % Lo HANGUL SYLLABLE JAE
unicode_hangul_syllable_type(0xC7C8, 0xC7C8, 'LV'). % Lo HANGUL SYLLABLE JYA
unicode_hangul_syllable_type(0xC7E4, 0xC7E4, 'LV'). % Lo HANGUL SYLLABLE JYAE
unicode_hangul_syllable_type(0xC800, 0xC800, 'LV'). % Lo HANGUL SYLLABLE JEO
unicode_hangul_syllable_type(0xC81C, 0xC81C, 'LV'). % Lo HANGUL SYLLABLE JE
unicode_hangul_syllable_type(0xC838, 0xC838, 'LV'). % Lo HANGUL SYLLABLE JYEO
unicode_hangul_syllable_type(0xC854, 0xC854, 'LV'). % Lo HANGUL SYLLABLE JYE
unicode_hangul_syllable_type(0xC870, 0xC870, 'LV'). % Lo HANGUL SYLLABLE JO
unicode_hangul_syllable_type(0xC88C, 0xC88C, 'LV'). % Lo HANGUL SYLLABLE JWA
unicode_hangul_syllable_type(0xC8A8, 0xC8A8, 'LV'). % Lo HANGUL SYLLABLE JWAE
unicode_hangul_syllable_type(0xC8C4, 0xC8C4, 'LV'). % Lo HANGUL SYLLABLE JOE
unicode_hangul_syllable_type(0xC8E0, 0xC8E0, 'LV'). % Lo HANGUL SYLLABLE JYO
unicode_hangul_syllable_type(0xC8FC, 0xC8FC, 'LV'). % Lo HANGUL SYLLABLE JU
unicode_hangul_syllable_type(0xC918, 0xC918, 'LV'). % Lo HANGUL SYLLABLE JWEO
unicode_hangul_syllable_type(0xC934, 0xC934, 'LV'). % Lo HANGUL SYLLABLE JWE
unicode_hangul_syllable_type(0xC950, 0xC950, 'LV'). % Lo HANGUL SYLLABLE JWI
unicode_hangul_syllable_type(0xC96C, 0xC96C, 'LV'). % Lo HANGUL SYLLABLE JYU
unicode_hangul_syllable_type(0xC988, 0xC988, 'LV'). % Lo HANGUL SYLLABLE JEU
unicode_hangul_syllable_type(0xC9A4, 0xC9A4, 'LV'). % Lo HANGUL SYLLABLE JYI
unicode_hangul_syllable_type(0xC9C0, 0xC9C0, 'LV'). % Lo HANGUL SYLLABLE JI
unicode_hangul_syllable_type(0xC9DC, 0xC9DC, 'LV'). % Lo HANGUL SYLLABLE JJA
unicode_hangul_syllable_type(0xC9F8, 0xC9F8, 'LV'). % Lo HANGUL SYLLABLE JJAE
unicode_hangul_syllable_type(0xCA14, 0xCA14, 'LV'). % Lo HANGUL SYLLABLE JJYA
unicode_hangul_syllable_type(0xCA30, 0xCA30, 'LV'). % Lo HANGUL SYLLABLE JJYAE
unicode_hangul_syllable_type(0xCA4C, 0xCA4C, 'LV'). % Lo HANGUL SYLLABLE JJEO
unicode_hangul_syllable_type(0xCA68, 0xCA68, 'LV'). % Lo HANGUL SYLLABLE JJE
unicode_hangul_syllable_type(0xCA84, 0xCA84, 'LV'). % Lo HANGUL SYLLABLE JJYEO
unicode_hangul_syllable_type(0xCAA0, 0xCAA0, 'LV'). % Lo HANGUL SYLLABLE JJYE
unicode_hangul_syllable_type(0xCABC, 0xCABC, 'LV'). % Lo HANGUL SYLLABLE JJO
unicode_hangul_syllable_type(0xCAD8, 0xCAD8, 'LV'). % Lo HANGUL SYLLABLE JJWA
unicode_hangul_syllable_type(0xCAF4, 0xCAF4, 'LV'). % Lo HANGUL SYLLABLE JJWAE
unicode_hangul_syllable_type(0xCB10, 0xCB10, 'LV'). % Lo HANGUL SYLLABLE JJOE
unicode_hangul_syllable_type(0xCB2C, 0xCB2C, 'LV'). % Lo HANGUL SYLLABLE JJYO
unicode_hangul_syllable_type(0xCB48, 0xCB48, 'LV'). % Lo HANGUL SYLLABLE JJU
unicode_hangul_syllable_type(0xCB64, 0xCB64, 'LV'). % Lo HANGUL SYLLABLE JJWEO
unicode_hangul_syllable_type(0xCB80, 0xCB80, 'LV'). % Lo HANGUL SYLLABLE JJWE
unicode_hangul_syllable_type(0xCB9C, 0xCB9C, 'LV'). % Lo HANGUL SYLLABLE JJWI
unicode_hangul_syllable_type(0xCBB8, 0xCBB8, 'LV'). % Lo HANGUL SYLLABLE JJYU
unicode_hangul_syllable_type(0xCBD4, 0xCBD4, 'LV'). % Lo HANGUL SYLLABLE JJEU
unicode_hangul_syllable_type(0xCBF0, 0xCBF0, 'LV'). % Lo HANGUL SYLLABLE JJYI
unicode_hangul_syllable_type(0xCC0C, 0xCC0C, 'LV'). % Lo HANGUL SYLLABLE JJI
unicode_hangul_syllable_type(0xCC28, 0xCC28, 'LV'). % Lo HANGUL SYLLABLE CA
unicode_hangul_syllable_type(0xCC44, 0xCC44, 'LV'). % Lo HANGUL SYLLABLE CAE
unicode_hangul_syllable_type(0xCC60, 0xCC60, 'LV'). % Lo HANGUL SYLLABLE CYA
unicode_hangul_syllable_type(0xCC7C, 0xCC7C, 'LV'). % Lo HANGUL SYLLABLE CYAE
unicode_hangul_syllable_type(0xCC98, 0xCC98, 'LV'). % Lo HANGUL SYLLABLE CEO
unicode_hangul_syllable_type(0xCCB4, 0xCCB4, 'LV'). % Lo HANGUL SYLLABLE CE
unicode_hangul_syllable_type(0xCCD0, 0xCCD0, 'LV'). % Lo HANGUL SYLLABLE CYEO
unicode_hangul_syllable_type(0xCCEC, 0xCCEC, 'LV'). % Lo HANGUL SYLLABLE CYE
unicode_hangul_syllable_type(0xCD08, 0xCD08, 'LV'). % Lo HANGUL SYLLABLE CO
unicode_hangul_syllable_type(0xCD24, 0xCD24, 'LV'). % Lo HANGUL SYLLABLE CWA
unicode_hangul_syllable_type(0xCD40, 0xCD40, 'LV'). % Lo HANGUL SYLLABLE CWAE
unicode_hangul_syllable_type(0xCD5C, 0xCD5C, 'LV'). % Lo HANGUL SYLLABLE COE
unicode_hangul_syllable_type(0xCD78, 0xCD78, 'LV'). % Lo HANGUL SYLLABLE CYO
unicode_hangul_syllable_type(0xCD94, 0xCD94, 'LV'). % Lo HANGUL SYLLABLE CU
unicode_hangul_syllable_type(0xCDB0, 0xCDB0, 'LV'). % Lo HANGUL SYLLABLE CWEO
unicode_hangul_syllable_type(0xCDCC, 0xCDCC, 'LV'). % Lo HANGUL SYLLABLE CWE
unicode_hangul_syllable_type(0xCDE8, 0xCDE8, 'LV'). % Lo HANGUL SYLLABLE CWI
unicode_hangul_syllable_type(0xCE04, 0xCE04, 'LV'). % Lo HANGUL SYLLABLE CYU
unicode_hangul_syllable_type(0xCE20, 0xCE20, 'LV'). % Lo HANGUL SYLLABLE CEU
unicode_hangul_syllable_type(0xCE3C, 0xCE3C, 'LV'). % Lo HANGUL SYLLABLE CYI
unicode_hangul_syllable_type(0xCE58, 0xCE58, 'LV'). % Lo HANGUL SYLLABLE CI
unicode_hangul_syllable_type(0xCE74, 0xCE74, 'LV'). % Lo HANGUL SYLLABLE KA
unicode_hangul_syllable_type(0xCE90, 0xCE90, 'LV'). % Lo HANGUL SYLLABLE KAE
unicode_hangul_syllable_type(0xCEAC, 0xCEAC, 'LV'). % Lo HANGUL SYLLABLE KYA
unicode_hangul_syllable_type(0xCEC8, 0xCEC8, 'LV'). % Lo HANGUL SYLLABLE KYAE
unicode_hangul_syllable_type(0xCEE4, 0xCEE4, 'LV'). % Lo HANGUL SYLLABLE KEO
unicode_hangul_syllable_type(0xCF00, 0xCF00, 'LV'). % Lo HANGUL SYLLABLE KE
unicode_hangul_syllable_type(0xCF1C, 0xCF1C, 'LV'). % Lo HANGUL SYLLABLE KYEO
unicode_hangul_syllable_type(0xCF38, 0xCF38, 'LV'). % Lo HANGUL SYLLABLE KYE
unicode_hangul_syllable_type(0xCF54, 0xCF54, 'LV'). % Lo HANGUL SYLLABLE KO
unicode_hangul_syllable_type(0xCF70, 0xCF70, 'LV'). % Lo HANGUL SYLLABLE KWA
unicode_hangul_syllable_type(0xCF8C, 0xCF8C, 'LV'). % Lo HANGUL SYLLABLE KWAE
unicode_hangul_syllable_type(0xCFA8, 0xCFA8, 'LV'). % Lo HANGUL SYLLABLE KOE
unicode_hangul_syllable_type(0xCFC4, 0xCFC4, 'LV'). % Lo HANGUL SYLLABLE KYO
unicode_hangul_syllable_type(0xCFE0, 0xCFE0, 'LV'). % Lo HANGUL SYLLABLE KU
unicode_hangul_syllable_type(0xCFFC, 0xCFFC, 'LV'). % Lo HANGUL SYLLABLE KWEO
unicode_hangul_syllable_type(0xD018, 0xD018, 'LV'). % Lo HANGUL SYLLABLE KWE
unicode_hangul_syllable_type(0xD034, 0xD034, 'LV'). % Lo HANGUL SYLLABLE KWI
unicode_hangul_syllable_type(0xD050, 0xD050, 'LV'). % Lo HANGUL SYLLABLE KYU
unicode_hangul_syllable_type(0xD06C, 0xD06C, 'LV'). % Lo HANGUL SYLLABLE KEU
unicode_hangul_syllable_type(0xD088, 0xD088, 'LV'). % Lo HANGUL SYLLABLE KYI
unicode_hangul_syllable_type(0xD0A4, 0xD0A4, 'LV'). % Lo HANGUL SYLLABLE KI
unicode_hangul_syllable_type(0xD0C0, 0xD0C0, 'LV'). % Lo HANGUL SYLLABLE TA
unicode_hangul_syllable_type(0xD0DC, 0xD0DC, 'LV'). % Lo HANGUL SYLLABLE TAE
unicode_hangul_syllable_type(0xD0F8, 0xD0F8, 'LV'). % Lo HANGUL SYLLABLE TYA
unicode_hangul_syllable_type(0xD114, 0xD114, 'LV'). % Lo HANGUL SYLLABLE TYAE
unicode_hangul_syllable_type(0xD130, 0xD130, 'LV'). % Lo HANGUL SYLLABLE TEO
unicode_hangul_syllable_type(0xD14C, 0xD14C, 'LV'). % Lo HANGUL SYLLABLE TE
unicode_hangul_syllable_type(0xD168, 0xD168, 'LV'). % Lo HANGUL SYLLABLE TYEO
unicode_hangul_syllable_type(0xD184, 0xD184, 'LV'). % Lo HANGUL SYLLABLE TYE
unicode_hangul_syllable_type(0xD1A0, 0xD1A0, 'LV'). % Lo HANGUL SYLLABLE TO
unicode_hangul_syllable_type(0xD1BC, 0xD1BC, 'LV'). % Lo HANGUL SYLLABLE TWA
unicode_hangul_syllable_type(0xD1D8, 0xD1D8, 'LV'). % Lo HANGUL SYLLABLE TWAE
unicode_hangul_syllable_type(0xD1F4, 0xD1F4, 'LV'). % Lo HANGUL SYLLABLE TOE
unicode_hangul_syllable_type(0xD210, 0xD210, 'LV'). % Lo HANGUL SYLLABLE TYO
unicode_hangul_syllable_type(0xD22C, 0xD22C, 'LV'). % Lo HANGUL SYLLABLE TU
unicode_hangul_syllable_type(0xD248, 0xD248, 'LV'). % Lo HANGUL SYLLABLE TWEO
unicode_hangul_syllable_type(0xD264, 0xD264, 'LV'). % Lo HANGUL SYLLABLE TWE
unicode_hangul_syllable_type(0xD280, 0xD280, 'LV'). % Lo HANGUL SYLLABLE TWI
unicode_hangul_syllable_type(0xD29C, 0xD29C, 'LV'). % Lo HANGUL SYLLABLE TYU
unicode_hangul_syllable_type(0xD2B8, 0xD2B8, 'LV'). % Lo HANGUL SYLLABLE TEU
unicode_hangul_syllable_type(0xD2D4, 0xD2D4, 'LV'). % Lo HANGUL SYLLABLE TYI
unicode_hangul_syllable_type(0xD2F0, 0xD2F0, 'LV'). % Lo HANGUL SYLLABLE TI
unicode_hangul_syllable_type(0xD30C, 0xD30C, 'LV'). % Lo HANGUL SYLLABLE PA
unicode_hangul_syllable_type(0xD328, 0xD328, 'LV'). % Lo HANGUL SYLLABLE PAE
unicode_hangul_syllable_type(0xD344, 0xD344, 'LV'). % Lo HANGUL SYLLABLE PYA
unicode_hangul_syllable_type(0xD360, 0xD360, 'LV'). % Lo HANGUL SYLLABLE PYAE
unicode_hangul_syllable_type(0xD37C, 0xD37C, 'LV'). % Lo HANGUL SYLLABLE PEO
unicode_hangul_syllable_type(0xD398, 0xD398, 'LV'). % Lo HANGUL SYLLABLE PE
unicode_hangul_syllable_type(0xD3B4, 0xD3B4, 'LV'). % Lo HANGUL SYLLABLE PYEO
unicode_hangul_syllable_type(0xD3D0, 0xD3D0, 'LV'). % Lo HANGUL SYLLABLE PYE
unicode_hangul_syllable_type(0xD3EC, 0xD3EC, 'LV'). % Lo HANGUL SYLLABLE PO
unicode_hangul_syllable_type(0xD408, 0xD408, 'LV'). % Lo HANGUL SYLLABLE PWA
unicode_hangul_syllable_type(0xD424, 0xD424, 'LV'). % Lo HANGUL SYLLABLE PWAE
unicode_hangul_syllable_type(0xD440, 0xD440, 'LV'). % Lo HANGUL SYLLABLE POE
unicode_hangul_syllable_type(0xD45C, 0xD45C, 'LV'). % Lo HANGUL SYLLABLE PYO
unicode_hangul_syllable_type(0xD478, 0xD478, 'LV'). % Lo HANGUL SYLLABLE PU
unicode_hangul_syllable_type(0xD494, 0xD494, 'LV'). % Lo HANGUL SYLLABLE PWEO
unicode_hangul_syllable_type(0xD4B0, 0xD4B0, 'LV'). % Lo HANGUL SYLLABLE PWE
unicode_hangul_syllable_type(0xD4CC, 0xD4CC, 'LV'). % Lo HANGUL SYLLABLE PWI
unicode_hangul_syllable_type(0xD4E8, 0xD4E8, 'LV'). % Lo HANGUL SYLLABLE PYU
unicode_hangul_syllable_type(0xD504, 0xD504, 'LV'). % Lo HANGUL SYLLABLE PEU
unicode_hangul_syllable_type(0xD520, 0xD520, 'LV'). % Lo HANGUL SYLLABLE PYI
unicode_hangul_syllable_type(0xD53C, 0xD53C, 'LV'). % Lo HANGUL SYLLABLE PI
unicode_hangul_syllable_type(0xD558, 0xD558, 'LV'). % Lo HANGUL SYLLABLE HA
unicode_hangul_syllable_type(0xD574, 0xD574, 'LV'). % Lo HANGUL SYLLABLE HAE
unicode_hangul_syllable_type(0xD590, 0xD590, 'LV'). % Lo HANGUL SYLLABLE HYA
unicode_hangul_syllable_type(0xD5AC, 0xD5AC, 'LV'). % Lo HANGUL SYLLABLE HYAE
unicode_hangul_syllable_type(0xD5C8, 0xD5C8, 'LV'). % Lo HANGUL SYLLABLE HEO
unicode_hangul_syllable_type(0xD5E4, 0xD5E4, 'LV'). % Lo HANGUL SYLLABLE HE
unicode_hangul_syllable_type(0xD600, 0xD600, 'LV'). % Lo HANGUL SYLLABLE HYEO
unicode_hangul_syllable_type(0xD61C, 0xD61C, 'LV'). % Lo HANGUL SYLLABLE HYE
unicode_hangul_syllable_type(0xD638, 0xD638, 'LV'). % Lo HANGUL SYLLABLE HO
unicode_hangul_syllable_type(0xD654, 0xD654, 'LV'). % Lo HANGUL SYLLABLE HWA
unicode_hangul_syllable_type(0xD670, 0xD670, 'LV'). % Lo HANGUL SYLLABLE HWAE
unicode_hangul_syllable_type(0xD68C, 0xD68C, 'LV'). % Lo HANGUL SYLLABLE HOE
unicode_hangul_syllable_type(0xD6A8, 0xD6A8, 'LV'). % Lo HANGUL SYLLABLE HYO
unicode_hangul_syllable_type(0xD6C4, 0xD6C4, 'LV'). % Lo HANGUL SYLLABLE HU
unicode_hangul_syllable_type(0xD6E0, 0xD6E0, 'LV'). % Lo HANGUL SYLLABLE HWEO
unicode_hangul_syllable_type(0xD6FC, 0xD6FC, 'LV'). % Lo HANGUL SYLLABLE HWE
unicode_hangul_syllable_type(0xD718, 0xD718, 'LV'). % Lo HANGUL SYLLABLE HWI
unicode_hangul_syllable_type(0xD734, 0xD734, 'LV'). % Lo HANGUL SYLLABLE HYU
unicode_hangul_syllable_type(0xD750, 0xD750, 'LV'). % Lo HANGUL SYLLABLE HEU
unicode_hangul_syllable_type(0xD76C, 0xD76C, 'LV'). % Lo HANGUL SYLLABLE HYI
unicode_hangul_syllable_type(0xD788, 0xD788, 'LV'). % Lo HANGUL SYLLABLE HI
% Total code points: 399
% ================================================
% Hangul_Syllable_Type=LVT_Syllable
unicode_hangul_syllable_type(0xAC01, 0xAC1B, 'LVT'). % Lo [27] HANGUL SYLLABLE GAG..HANGUL SYLLABLE GAH
unicode_hangul_syllable_type(0xAC1D, 0xAC37, 'LVT'). % Lo [27] HANGUL SYLLABLE GAEG..HANGUL SYLLABLE GAEH
unicode_hangul_syllable_type(0xAC39, 0xAC53, 'LVT'). % Lo [27] HANGUL SYLLABLE GYAG..HANGUL SYLLABLE GYAH
unicode_hangul_syllable_type(0xAC55, 0xAC6F, 'LVT'). % Lo [27] HANGUL SYLLABLE GYAEG..HANGUL SYLLABLE GYAEH
unicode_hangul_syllable_type(0xAC71, 0xAC8B, 'LVT'). % Lo [27] HANGUL SYLLABLE GEOG..HANGUL SYLLABLE GEOH
unicode_hangul_syllable_type(0xAC8D, 0xACA7, 'LVT'). % Lo [27] HANGUL SYLLABLE GEG..HANGUL SYLLABLE GEH
unicode_hangul_syllable_type(0xACA9, 0xACC3, 'LVT'). % Lo [27] HANGUL SYLLABLE GYEOG..HANGUL SYLLABLE GYEOH
unicode_hangul_syllable_type(0xACC5, 0xACDF, 'LVT'). % Lo [27] HANGUL SYLLABLE GYEG..HANGUL SYLLABLE GYEH
unicode_hangul_syllable_type(0xACE1, 0xACFB, 'LVT'). % Lo [27] HANGUL SYLLABLE GOG..HANGUL SYLLABLE GOH
unicode_hangul_syllable_type(0xACFD, 0xAD17, 'LVT'). % Lo [27] HANGUL SYLLABLE GWAG..HANGUL SYLLABLE GWAH
unicode_hangul_syllable_type(0xAD19, 0xAD33, 'LVT'). % Lo [27] HANGUL SYLLABLE GWAEG..HANGUL SYLLABLE GWAEH
unicode_hangul_syllable_type(0xAD35, 0xAD4F, 'LVT'). % Lo [27] HANGUL SYLLABLE GOEG..HANGUL SYLLABLE GOEH
unicode_hangul_syllable_type(0xAD51, 0xAD6B, 'LVT'). % Lo [27] HANGUL SYLLABLE GYOG..HANGUL SYLLABLE GYOH
unicode_hangul_syllable_type(0xAD6D, 0xAD87, 'LVT'). % Lo [27] HANGUL SYLLABLE GUG..HANGUL SYLLABLE GUH
unicode_hangul_syllable_type(0xAD89, 0xADA3, 'LVT'). % Lo [27] HANGUL SYLLABLE GWEOG..HANGUL SYLLABLE GWEOH
unicode_hangul_syllable_type(0xADA5, 0xADBF, 'LVT'). % Lo [27] HANGUL SYLLABLE GWEG..HANGUL SYLLABLE GWEH
unicode_hangul_syllable_type(0xADC1, 0xADDB, 'LVT'). % Lo [27] HANGUL SYLLABLE GWIG..HANGUL SYLLABLE GWIH
unicode_hangul_syllable_type(0xADDD, 0xADF7, 'LVT'). % Lo [27] HANGUL SYLLABLE GYUG..HANGUL SYLLABLE GYUH
unicode_hangul_syllable_type(0xADF9, 0xAE13, 'LVT'). % Lo [27] HANGUL SYLLABLE GEUG..HANGUL SYLLABLE GEUH
unicode_hangul_syllable_type(0xAE15, 0xAE2F, 'LVT'). % Lo [27] HANGUL SYLLABLE GYIG..HANGUL SYLLABLE GYIH
unicode_hangul_syllable_type(0xAE31, 0xAE4B, 'LVT'). % Lo [27] HANGUL SYLLABLE GIG..HANGUL SYLLABLE GIH
unicode_hangul_syllable_type(0xAE4D, 0xAE67, 'LVT'). % Lo [27] HANGUL SYLLABLE GGAG..HANGUL SYLLABLE GGAH
unicode_hangul_syllable_type(0xAE69, 0xAE83, 'LVT'). % Lo [27] HANGUL SYLLABLE GGAEG..HANGUL SYLLABLE GGAEH
unicode_hangul_syllable_type(0xAE85, 0xAE9F, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYAG..HANGUL SYLLABLE GGYAH
unicode_hangul_syllable_type(0xAEA1, 0xAEBB, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYAEG..HANGUL SYLLABLE GGYAEH
unicode_hangul_syllable_type(0xAEBD, 0xAED7, 'LVT'). % Lo [27] HANGUL SYLLABLE GGEOG..HANGUL SYLLABLE GGEOH
unicode_hangul_syllable_type(0xAED9, 0xAEF3, 'LVT'). % Lo [27] HANGUL SYLLABLE GGEG..HANGUL SYLLABLE GGEH
unicode_hangul_syllable_type(0xAEF5, 0xAF0F, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYEOG..HANGUL SYLLABLE GGYEOH
unicode_hangul_syllable_type(0xAF11, 0xAF2B, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYEG..HANGUL SYLLABLE GGYEH
unicode_hangul_syllable_type(0xAF2D, 0xAF47, 'LVT'). % Lo [27] HANGUL SYLLABLE GGOG..HANGUL SYLLABLE GGOH
unicode_hangul_syllable_type(0xAF49, 0xAF63, 'LVT'). % Lo [27] HANGUL SYLLABLE GGWAG..HANGUL SYLLABLE GGWAH
unicode_hangul_syllable_type(0xAF65, 0xAF7F, 'LVT'). % Lo [27] HANGUL SYLLABLE GGWAEG..HANGUL SYLLABLE GGWAEH
unicode_hangul_syllable_type(0xAF81, 0xAF9B, 'LVT'). % Lo [27] HANGUL SYLLABLE GGOEG..HANGUL SYLLABLE GGOEH
unicode_hangul_syllable_type(0xAF9D, 0xAFB7, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYOG..HANGUL SYLLABLE GGYOH
unicode_hangul_syllable_type(0xAFB9, 0xAFD3, 'LVT'). % Lo [27] HANGUL SYLLABLE GGUG..HANGUL SYLLABLE GGUH
unicode_hangul_syllable_type(0xAFD5, 0xAFEF, 'LVT'). % Lo [27] HANGUL SYLLABLE GGWEOG..HANGUL SYLLABLE GGWEOH
unicode_hangul_syllable_type(0xAFF1, 0xB00B, 'LVT'). % Lo [27] HANGUL SYLLABLE GGWEG..HANGUL SYLLABLE GGWEH
unicode_hangul_syllable_type(0xB00D, 0xB027, 'LVT'). % Lo [27] HANGUL SYLLABLE GGWIG..HANGUL SYLLABLE GGWIH
unicode_hangul_syllable_type(0xB029, 0xB043, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYUG..HANGUL SYLLABLE GGYUH
unicode_hangul_syllable_type(0xB045, 0xB05F, 'LVT'). % Lo [27] HANGUL SYLLABLE GGEUG..HANGUL SYLLABLE GGEUH
unicode_hangul_syllable_type(0xB061, 0xB07B, 'LVT'). % Lo [27] HANGUL SYLLABLE GGYIG..HANGUL SYLLABLE GGYIH
unicode_hangul_syllable_type(0xB07D, 0xB097, 'LVT'). % Lo [27] HANGUL SYLLABLE GGIG..HANGUL SYLLABLE GGIH
unicode_hangul_syllable_type(0xB099, 0xB0B3, 'LVT'). % Lo [27] HANGUL SYLLABLE NAG..HANGUL SYLLABLE NAH
unicode_hangul_syllable_type(0xB0B5, 0xB0CF, 'LVT'). % Lo [27] HANGUL SYLLABLE NAEG..HANGUL SYLLABLE NAEH
unicode_hangul_syllable_type(0xB0D1, 0xB0EB, 'LVT'). % Lo [27] HANGUL SYLLABLE NYAG..HANGUL SYLLABLE NYAH
unicode_hangul_syllable_type(0xB0ED, 0xB107, 'LVT'). % Lo [27] HANGUL SYLLABLE NYAEG..HANGUL SYLLABLE NYAEH
unicode_hangul_syllable_type(0xB109, 0xB123, 'LVT'). % Lo [27] HANGUL SYLLABLE NEOG..HANGUL SYLLABLE NEOH
unicode_hangul_syllable_type(0xB125, 0xB13F, 'LVT'). % Lo [27] HANGUL SYLLABLE NEG..HANGUL SYLLABLE NEH
unicode_hangul_syllable_type(0xB141, 0xB15B, 'LVT'). % Lo [27] HANGUL SYLLABLE NYEOG..HANGUL SYLLABLE NYEOH
unicode_hangul_syllable_type(0xB15D, 0xB177, 'LVT'). % Lo [27] HANGUL SYLLABLE NYEG..HANGUL SYLLABLE NYEH
unicode_hangul_syllable_type(0xB179, 0xB193, 'LVT'). % Lo [27] HANGUL SYLLABLE NOG..HANGUL SYLLABLE NOH
unicode_hangul_syllable_type(0xB195, 0xB1AF, 'LVT'). % Lo [27] HANGUL SYLLABLE NWAG..HANGUL SYLLABLE NWAH
unicode_hangul_syllable_type(0xB1B1, 0xB1CB, 'LVT'). % Lo [27] HANGUL SYLLABLE NWAEG..HANGUL SYLLABLE NWAEH
unicode_hangul_syllable_type(0xB1CD, 0xB1E7, 'LVT'). % Lo [27] HANGUL SYLLABLE NOEG..HANGUL SYLLABLE NOEH
unicode_hangul_syllable_type(0xB1E9, 0xB203, 'LVT'). % Lo [27] HANGUL SYLLABLE NYOG..HANGUL SYLLABLE NYOH
unicode_hangul_syllable_type(0xB205, 0xB21F, 'LVT'). % Lo [27] HANGUL SYLLABLE NUG..HANGUL SYLLABLE NUH
unicode_hangul_syllable_type(0xB221, 0xB23B, 'LVT'). % Lo [27] HANGUL SYLLABLE NWEOG..HANGUL SYLLABLE NWEOH
unicode_hangul_syllable_type(0xB23D, 0xB257, 'LVT'). % Lo [27] HANGUL SYLLABLE NWEG..HANGUL SYLLABLE NWEH
unicode_hangul_syllable_type(0xB259, 0xB273, 'LVT'). % Lo [27] HANGUL SYLLABLE NWIG..HANGUL SYLLABLE NWIH
unicode_hangul_syllable_type(0xB275, 0xB28F, 'LVT'). % Lo [27] HANGUL SYLLABLE NYUG..HANGUL SYLLABLE NYUH
unicode_hangul_syllable_type(0xB291, 0xB2AB, 'LVT'). % Lo [27] HANGUL SYLLABLE NEUG..HANGUL SYLLABLE NEUH
unicode_hangul_syllable_type(0xB2AD, 0xB2C7, 'LVT'). % Lo [27] HANGUL SYLLABLE NYIG..HANGUL SYLLABLE NYIH
unicode_hangul_syllable_type(0xB2C9, 0xB2E3, 'LVT'). % Lo [27] HANGUL SYLLABLE NIG..HANGUL SYLLABLE NIH
unicode_hangul_syllable_type(0xB2E5, 0xB2FF, 'LVT'). % Lo [27] HANGUL SYLLABLE DAG..HANGUL SYLLABLE DAH
unicode_hangul_syllable_type(0xB301, 0xB31B, 'LVT'). % Lo [27] HANGUL SYLLABLE DAEG..HANGUL SYLLABLE DAEH
unicode_hangul_syllable_type(0xB31D, 0xB337, 'LVT'). % Lo [27] HANGUL SYLLABLE DYAG..HANGUL SYLLABLE DYAH
unicode_hangul_syllable_type(0xB339, 0xB353, 'LVT'). % Lo [27] HANGUL SYLLABLE DYAEG..HANGUL SYLLABLE DYAEH
unicode_hangul_syllable_type(0xB355, 0xB36F, 'LVT'). % Lo [27] HANGUL SYLLABLE DEOG..HANGUL SYLLABLE DEOH
unicode_hangul_syllable_type(0xB371, 0xB38B, 'LVT'). % Lo [27] HANGUL SYLLABLE DEG..HANGUL SYLLABLE DEH
unicode_hangul_syllable_type(0xB38D, 0xB3A7, 'LVT'). % Lo [27] HANGUL SYLLABLE DYEOG..HANGUL SYLLABLE DYEOH
unicode_hangul_syllable_type(0xB3A9, 0xB3C3, 'LVT'). % Lo [27] HANGUL SYLLABLE DYEG..HANGUL SYLLABLE DYEH
unicode_hangul_syllable_type(0xB3C5, 0xB3DF, 'LVT'). % Lo [27] HANGUL SYLLABLE DOG..HANGUL SYLLABLE DOH
unicode_hangul_syllable_type(0xB3E1, 0xB3FB, 'LVT'). % Lo [27] HANGUL SYLLABLE DWAG..HANGUL SYLLABLE DWAH
unicode_hangul_syllable_type(0xB3FD, 0xB417, 'LVT'). % Lo [27] HANGUL SYLLABLE DWAEG..HANGUL SYLLABLE DWAEH
unicode_hangul_syllable_type(0xB419, 0xB433, 'LVT'). % Lo [27] HANGUL SYLLABLE DOEG..HANGUL SYLLABLE DOEH
unicode_hangul_syllable_type(0xB435, 0xB44F, 'LVT'). % Lo [27] HANGUL SYLLABLE DYOG..HANGUL SYLLABLE DYOH
unicode_hangul_syllable_type(0xB451, 0xB46B, 'LVT'). % Lo [27] HANGUL SYLLABLE DUG..HANGUL SYLLABLE DUH
unicode_hangul_syllable_type(0xB46D, 0xB487, 'LVT'). % Lo [27] HANGUL SYLLABLE DWEOG..HANGUL SYLLABLE DWEOH
unicode_hangul_syllable_type(0xB489, 0xB4A3, 'LVT'). % Lo [27] HANGUL SYLLABLE DWEG..HANGUL SYLLABLE DWEH
unicode_hangul_syllable_type(0xB4A5, 0xB4BF, 'LVT'). % Lo [27] HANGUL SYLLABLE DWIG..HANGUL SYLLABLE DWIH
unicode_hangul_syllable_type(0xB4C1, 0xB4DB, 'LVT'). % Lo [27] HANGUL SYLLABLE DYUG..HANGUL SYLLABLE DYUH
unicode_hangul_syllable_type(0xB4DD, 0xB4F7, 'LVT'). % Lo [27] HANGUL SYLLABLE DEUG..HANGUL SYLLABLE DEUH
unicode_hangul_syllable_type(0xB4F9, 0xB513, 'LVT'). % Lo [27] HANGUL SYLLABLE DYIG..HANGUL SYLLABLE DYIH
unicode_hangul_syllable_type(0xB515, 0xB52F, 'LVT'). % Lo [27] HANGUL SYLLABLE DIG..HANGUL SYLLABLE DIH
unicode_hangul_syllable_type(0xB531, 0xB54B, 'LVT'). % Lo [27] HANGUL SYLLABLE DDAG..HANGUL SYLLABLE DDAH
unicode_hangul_syllable_type(0xB54D, 0xB567, 'LVT'). % Lo [27] HANGUL SYLLABLE DDAEG..HANGUL SYLLABLE DDAEH
unicode_hangul_syllable_type(0xB569, 0xB583, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYAG..HANGUL SYLLABLE DDYAH
unicode_hangul_syllable_type(0xB585, 0xB59F, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYAEG..HANGUL SYLLABLE DDYAEH
unicode_hangul_syllable_type(0xB5A1, 0xB5BB, 'LVT'). % Lo [27] HANGUL SYLLABLE DDEOG..HANGUL SYLLABLE DDEOH
unicode_hangul_syllable_type(0xB5BD, 0xB5D7, 'LVT'). % Lo [27] HANGUL SYLLABLE DDEG..HANGUL SYLLABLE DDEH
unicode_hangul_syllable_type(0xB5D9, 0xB5F3, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYEOG..HANGUL SYLLABLE DDYEOH
unicode_hangul_syllable_type(0xB5F5, 0xB60F, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYEG..HANGUL SYLLABLE DDYEH
unicode_hangul_syllable_type(0xB611, 0xB62B, 'LVT'). % Lo [27] HANGUL SYLLABLE DDOG..HANGUL SYLLABLE DDOH
unicode_hangul_syllable_type(0xB62D, 0xB647, 'LVT'). % Lo [27] HANGUL SYLLABLE DDWAG..HANGUL SYLLABLE DDWAH
unicode_hangul_syllable_type(0xB649, 0xB663, 'LVT'). % Lo [27] HANGUL SYLLABLE DDWAEG..HANGUL SYLLABLE DDWAEH
unicode_hangul_syllable_type(0xB665, 0xB67F, 'LVT'). % Lo [27] HANGUL SYLLABLE DDOEG..HANGUL SYLLABLE DDOEH
unicode_hangul_syllable_type(0xB681, 0xB69B, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYOG..HANGUL SYLLABLE DDYOH
unicode_hangul_syllable_type(0xB69D, 0xB6B7, 'LVT'). % Lo [27] HANGUL SYLLABLE DDUG..HANGUL SYLLABLE DDUH
unicode_hangul_syllable_type(0xB6B9, 0xB6D3, 'LVT'). % Lo [27] HANGUL SYLLABLE DDWEOG..HANGUL SYLLABLE DDWEOH
unicode_hangul_syllable_type(0xB6D5, 0xB6EF, 'LVT'). % Lo [27] HANGUL SYLLABLE DDWEG..HANGUL SYLLABLE DDWEH
unicode_hangul_syllable_type(0xB6F1, 0xB70B, 'LVT'). % Lo [27] HANGUL SYLLABLE DDWIG..HANGUL SYLLABLE DDWIH
unicode_hangul_syllable_type(0xB70D, 0xB727, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYUG..HANGUL SYLLABLE DDYUH
unicode_hangul_syllable_type(0xB729, 0xB743, 'LVT'). % Lo [27] HANGUL SYLLABLE DDEUG..HANGUL SYLLABLE DDEUH
unicode_hangul_syllable_type(0xB745, 0xB75F, 'LVT'). % Lo [27] HANGUL SYLLABLE DDYIG..HANGUL SYLLABLE DDYIH
unicode_hangul_syllable_type(0xB761, 0xB77B, 'LVT'). % Lo [27] HANGUL SYLLABLE DDIG..HANGUL SYLLABLE DDIH
unicode_hangul_syllable_type(0xB77D, 0xB797, 'LVT'). % Lo [27] HANGUL SYLLABLE RAG..HANGUL SYLLABLE RAH
unicode_hangul_syllable_type(0xB799, 0xB7B3, 'LVT'). % Lo [27] HANGUL SYLLABLE RAEG..HANGUL SYLLABLE RAEH
unicode_hangul_syllable_type(0xB7B5, 0xB7CF, 'LVT'). % Lo [27] HANGUL SYLLABLE RYAG..HANGUL SYLLABLE RYAH
unicode_hangul_syllable_type(0xB7D1, 0xB7EB, 'LVT'). % Lo [27] HANGUL SYLLABLE RYAEG..HANGUL SYLLABLE RYAEH
unicode_hangul_syllable_type(0xB7ED, 0xB807, 'LVT'). % Lo [27] HANGUL SYLLABLE REOG..HANGUL SYLLABLE REOH
unicode_hangul_syllable_type(0xB809, 0xB823, 'LVT'). % Lo [27] HANGUL SYLLABLE REG..HANGUL SYLLABLE REH
unicode_hangul_syllable_type(0xB825, 0xB83F, 'LVT'). % Lo [27] HANGUL SYLLABLE RYEOG..HANGUL SYLLABLE RYEOH
unicode_hangul_syllable_type(0xB841, 0xB85B, 'LVT'). % Lo [27] HANGUL SYLLABLE RYEG..HANGUL SYLLABLE RYEH
unicode_hangul_syllable_type(0xB85D, 0xB877, 'LVT'). % Lo [27] HANGUL SYLLABLE ROG..HANGUL SYLLABLE ROH
unicode_hangul_syllable_type(0xB879, 0xB893, 'LVT'). % Lo [27] HANGUL SYLLABLE RWAG..HANGUL SYLLABLE RWAH
unicode_hangul_syllable_type(0xB895, 0xB8AF, 'LVT'). % Lo [27] HANGUL SYLLABLE RWAEG..HANGUL SYLLABLE RWAEH
unicode_hangul_syllable_type(0xB8B1, 0xB8CB, 'LVT'). % Lo [27] HANGUL SYLLABLE ROEG..HANGUL SYLLABLE ROEH
unicode_hangul_syllable_type(0xB8CD, 0xB8E7, 'LVT'). % Lo [27] HANGUL SYLLABLE RYOG..HANGUL SYLLABLE RYOH
unicode_hangul_syllable_type(0xB8E9, 0xB903, 'LVT'). % Lo [27] HANGUL SYLLABLE RUG..HANGUL SYLLABLE RUH
unicode_hangul_syllable_type(0xB905, 0xB91F, 'LVT'). % Lo [27] HANGUL SYLLABLE RWEOG..HANGUL SYLLABLE RWEOH
unicode_hangul_syllable_type(0xB921, 0xB93B, 'LVT'). % Lo [27] HANGUL SYLLABLE RWEG..HANGUL SYLLABLE RWEH
unicode_hangul_syllable_type(0xB93D, 0xB957, 'LVT'). % Lo [27] HANGUL SYLLABLE RWIG..HANGUL SYLLABLE RWIH
unicode_hangul_syllable_type(0xB959, 0xB973, 'LVT'). % Lo [27] HANGUL SYLLABLE RYUG..HANGUL SYLLABLE RYUH
unicode_hangul_syllable_type(0xB975, 0xB98F, 'LVT'). % Lo [27] HANGUL SYLLABLE REUG..HANGUL SYLLABLE REUH
unicode_hangul_syllable_type(0xB991, 0xB9AB, 'LVT'). % Lo [27] HANGUL SYLLABLE RYIG..HANGUL SYLLABLE RYIH
unicode_hangul_syllable_type(0xB9AD, 0xB9C7, 'LVT'). % Lo [27] HANGUL SYLLABLE RIG..HANGUL SYLLABLE RIH
unicode_hangul_syllable_type(0xB9C9, 0xB9E3, 'LVT'). % Lo [27] HANGUL SYLLABLE MAG..HANGUL SYLLABLE MAH
unicode_hangul_syllable_type(0xB9E5, 0xB9FF, 'LVT'). % Lo [27] HANGUL SYLLABLE MAEG..HANGUL SYLLABLE MAEH
unicode_hangul_syllable_type(0xBA01, 0xBA1B, 'LVT'). % Lo [27] HANGUL SYLLABLE MYAG..HANGUL SYLLABLE MYAH
unicode_hangul_syllable_type(0xBA1D, 0xBA37, 'LVT'). % Lo [27] HANGUL SYLLABLE MYAEG..HANGUL SYLLABLE MYAEH
unicode_hangul_syllable_type(0xBA39, 0xBA53, 'LVT'). % Lo [27] HANGUL SYLLABLE MEOG..HANGUL SYLLABLE MEOH
unicode_hangul_syllable_type(0xBA55, 0xBA6F, 'LVT'). % Lo [27] HANGUL SYLLABLE MEG..HANGUL SYLLABLE MEH
unicode_hangul_syllable_type(0xBA71, 0xBA8B, 'LVT'). % Lo [27] HANGUL SYLLABLE MYEOG..HANGUL SYLLABLE MYEOH
unicode_hangul_syllable_type(0xBA8D, 0xBAA7, 'LVT'). % Lo [27] HANGUL SYLLABLE MYEG..HANGUL SYLLABLE MYEH
unicode_hangul_syllable_type(0xBAA9, 0xBAC3, 'LVT'). % Lo [27] HANGUL SYLLABLE MOG..HANGUL SYLLABLE MOH
unicode_hangul_syllable_type(0xBAC5, 0xBADF, 'LVT'). % Lo [27] HANGUL SYLLABLE MWAG..HANGUL SYLLABLE MWAH
unicode_hangul_syllable_type(0xBAE1, 0xBAFB, 'LVT'). % Lo [27] HANGUL SYLLABLE MWAEG..HANGUL SYLLABLE MWAEH
unicode_hangul_syllable_type(0xBAFD, 0xBB17, 'LVT'). % Lo [27] HANGUL SYLLABLE MOEG..HANGUL SYLLABLE MOEH
unicode_hangul_syllable_type(0xBB19, 0xBB33, 'LVT'). % Lo [27] HANGUL SYLLABLE MYOG..HANGUL SYLLABLE MYOH
unicode_hangul_syllable_type(0xBB35, 0xBB4F, 'LVT'). % Lo [27] HANGUL SYLLABLE MUG..HANGUL SYLLABLE MUH
unicode_hangul_syllable_type(0xBB51, 0xBB6B, 'LVT'). % Lo [27] HANGUL SYLLABLE MWEOG..HANGUL SYLLABLE MWEOH
unicode_hangul_syllable_type(0xBB6D, 0xBB87, 'LVT'). % Lo [27] HANGUL SYLLABLE MWEG..HANGUL SYLLABLE MWEH
unicode_hangul_syllable_type(0xBB89, 0xBBA3, 'LVT'). % Lo [27] HANGUL SYLLABLE MWIG..HANGUL SYLLABLE MWIH
unicode_hangul_syllable_type(0xBBA5, 0xBBBF, 'LVT'). % Lo [27] HANGUL SYLLABLE MYUG..HANGUL SYLLABLE MYUH
unicode_hangul_syllable_type(0xBBC1, 0xBBDB, 'LVT'). % Lo [27] HANGUL SYLLABLE MEUG..HANGUL SYLLABLE MEUH
unicode_hangul_syllable_type(0xBBDD, 0xBBF7, 'LVT'). % Lo [27] HANGUL SYLLABLE MYIG..HANGUL SYLLABLE MYIH
unicode_hangul_syllable_type(0xBBF9, 0xBC13, 'LVT'). % Lo [27] HANGUL SYLLABLE MIG..HANGUL SYLLABLE MIH
unicode_hangul_syllable_type(0xBC15, 0xBC2F, 'LVT'). % Lo [27] HANGUL SYLLABLE BAG..HANGUL SYLLABLE BAH
unicode_hangul_syllable_type(0xBC31, 0xBC4B, 'LVT'). % Lo [27] HANGUL SYLLABLE BAEG..HANGUL SYLLABLE BAEH
unicode_hangul_syllable_type(0xBC4D, 0xBC67, 'LVT'). % Lo [27] HANGUL SYLLABLE BYAG..HANGUL SYLLABLE BYAH
unicode_hangul_syllable_type(0xBC69, 0xBC83, 'LVT'). % Lo [27] HANGUL SYLLABLE BYAEG..HANGUL SYLLABLE BYAEH
unicode_hangul_syllable_type(0xBC85, 0xBC9F, 'LVT'). % Lo [27] HANGUL SYLLABLE BEOG..HANGUL SYLLABLE BEOH
unicode_hangul_syllable_type(0xBCA1, 0xBCBB, 'LVT'). % Lo [27] HANGUL SYLLABLE BEG..HANGUL SYLLABLE BEH
unicode_hangul_syllable_type(0xBCBD, 0xBCD7, 'LVT'). % Lo [27] HANGUL SYLLABLE BYEOG..HANGUL SYLLABLE BYEOH
unicode_hangul_syllable_type(0xBCD9, 0xBCF3, 'LVT'). % Lo [27] HANGUL SYLLABLE BYEG..HANGUL SYLLABLE BYEH
unicode_hangul_syllable_type(0xBCF5, 0xBD0F, 'LVT'). % Lo [27] HANGUL SYLLABLE BOG..HANGUL SYLLABLE BOH
unicode_hangul_syllable_type(0xBD11, 0xBD2B, 'LVT'). % Lo [27] HANGUL SYLLABLE BWAG..HANGUL SYLLABLE BWAH
unicode_hangul_syllable_type(0xBD2D, 0xBD47, 'LVT'). % Lo [27] HANGUL SYLLABLE BWAEG..HANGUL SYLLABLE BWAEH
unicode_hangul_syllable_type(0xBD49, 0xBD63, 'LVT'). % Lo [27] HANGUL SYLLABLE BOEG..HANGUL SYLLABLE BOEH
unicode_hangul_syllable_type(0xBD65, 0xBD7F, 'LVT'). % Lo [27] HANGUL SYLLABLE BYOG..HANGUL SYLLABLE BYOH
unicode_hangul_syllable_type(0xBD81, 0xBD9B, 'LVT'). % Lo [27] HANGUL SYLLABLE BUG..HANGUL SYLLABLE BUH
unicode_hangul_syllable_type(0xBD9D, 0xBDB7, 'LVT'). % Lo [27] HANGUL SYLLABLE BWEOG..HANGUL SYLLABLE BWEOH
unicode_hangul_syllable_type(0xBDB9, 0xBDD3, 'LVT'). % Lo [27] HANGUL SYLLABLE BWEG..HANGUL SYLLABLE BWEH
unicode_hangul_syllable_type(0xBDD5, 0xBDEF, 'LVT'). % Lo [27] HANGUL SYLLABLE BWIG..HANGUL SYLLABLE BWIH
unicode_hangul_syllable_type(0xBDF1, 0xBE0B, 'LVT'). % Lo [27] HANGUL SYLLABLE BYUG..HANGUL SYLLABLE BYUH
unicode_hangul_syllable_type(0xBE0D, 0xBE27, 'LVT'). % Lo [27] HANGUL SYLLABLE BEUG..HANGUL SYLLABLE BEUH
unicode_hangul_syllable_type(0xBE29, 0xBE43, 'LVT'). % Lo [27] HANGUL SYLLABLE BYIG..HANGUL SYLLABLE BYIH
unicode_hangul_syllable_type(0xBE45, 0xBE5F, 'LVT'). % Lo [27] HANGUL SYLLABLE BIG..HANGUL SYLLABLE BIH
unicode_hangul_syllable_type(0xBE61, 0xBE7B, 'LVT'). % Lo [27] HANGUL SYLLABLE BBAG..HANGUL SYLLABLE BBAH
unicode_hangul_syllable_type(0xBE7D, 0xBE97, 'LVT'). % Lo [27] HANGUL SYLLABLE BBAEG..HANGUL SYLLABLE BBAEH
unicode_hangul_syllable_type(0xBE99, 0xBEB3, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYAG..HANGUL SYLLABLE BBYAH
unicode_hangul_syllable_type(0xBEB5, 0xBECF, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYAEG..HANGUL SYLLABLE BBYAEH
unicode_hangul_syllable_type(0xBED1, 0xBEEB, 'LVT'). % Lo [27] HANGUL SYLLABLE BBEOG..HANGUL SYLLABLE BBEOH
unicode_hangul_syllable_type(0xBEED, 0xBF07, 'LVT'). % Lo [27] HANGUL SYLLABLE BBEG..HANGUL SYLLABLE BBEH
unicode_hangul_syllable_type(0xBF09, 0xBF23, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYEOG..HANGUL SYLLABLE BBYEOH
unicode_hangul_syllable_type(0xBF25, 0xBF3F, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYEG..HANGUL SYLLABLE BBYEH
unicode_hangul_syllable_type(0xBF41, 0xBF5B, 'LVT'). % Lo [27] HANGUL SYLLABLE BBOG..HANGUL SYLLABLE BBOH
unicode_hangul_syllable_type(0xBF5D, 0xBF77, 'LVT'). % Lo [27] HANGUL SYLLABLE BBWAG..HANGUL SYLLABLE BBWAH
unicode_hangul_syllable_type(0xBF79, 0xBF93, 'LVT'). % Lo [27] HANGUL SYLLABLE BBWAEG..HANGUL SYLLABLE BBWAEH
unicode_hangul_syllable_type(0xBF95, 0xBFAF, 'LVT'). % Lo [27] HANGUL SYLLABLE BBOEG..HANGUL SYLLABLE BBOEH
unicode_hangul_syllable_type(0xBFB1, 0xBFCB, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYOG..HANGUL SYLLABLE BBYOH
unicode_hangul_syllable_type(0xBFCD, 0xBFE7, 'LVT'). % Lo [27] HANGUL SYLLABLE BBUG..HANGUL SYLLABLE BBUH
unicode_hangul_syllable_type(0xBFE9, 0xC003, 'LVT'). % Lo [27] HANGUL SYLLABLE BBWEOG..HANGUL SYLLABLE BBWEOH
unicode_hangul_syllable_type(0xC005, 0xC01F, 'LVT'). % Lo [27] HANGUL SYLLABLE BBWEG..HANGUL SYLLABLE BBWEH
unicode_hangul_syllable_type(0xC021, 0xC03B, 'LVT'). % Lo [27] HANGUL SYLLABLE BBWIG..HANGUL SYLLABLE BBWIH
unicode_hangul_syllable_type(0xC03D, 0xC057, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYUG..HANGUL SYLLABLE BBYUH
unicode_hangul_syllable_type(0xC059, 0xC073, 'LVT'). % Lo [27] HANGUL SYLLABLE BBEUG..HANGUL SYLLABLE BBEUH
unicode_hangul_syllable_type(0xC075, 0xC08F, 'LVT'). % Lo [27] HANGUL SYLLABLE BBYIG..HANGUL SYLLABLE BBYIH
unicode_hangul_syllable_type(0xC091, 0xC0AB, 'LVT'). % Lo [27] HANGUL SYLLABLE BBIG..HANGUL SYLLABLE BBIH
unicode_hangul_syllable_type(0xC0AD, 0xC0C7, 'LVT'). % Lo [27] HANGUL SYLLABLE SAG..HANGUL SYLLABLE SAH
unicode_hangul_syllable_type(0xC0C9, 0xC0E3, 'LVT'). % Lo [27] HANGUL SYLLABLE SAEG..HANGUL SYLLABLE SAEH
unicode_hangul_syllable_type(0xC0E5, 0xC0FF, 'LVT'). % Lo [27] HANGUL SYLLABLE SYAG..HANGUL SYLLABLE SYAH
unicode_hangul_syllable_type(0xC101, 0xC11B, 'LVT'). % Lo [27] HANGUL SYLLABLE SYAEG..HANGUL SYLLABLE SYAEH
unicode_hangul_syllable_type(0xC11D, 0xC137, 'LVT'). % Lo [27] HANGUL SYLLABLE SEOG..HANGUL SYLLABLE SEOH
unicode_hangul_syllable_type(0xC139, 0xC153, 'LVT'). % Lo [27] HANGUL SYLLABLE SEG..HANGUL SYLLABLE SEH
unicode_hangul_syllable_type(0xC155, 0xC16F, 'LVT'). % Lo [27] HANGUL SYLLABLE SYEOG..HANGUL SYLLABLE SYEOH
unicode_hangul_syllable_type(0xC171, 0xC18B, 'LVT'). % Lo [27] HANGUL SYLLABLE SYEG..HANGUL SYLLABLE SYEH
unicode_hangul_syllable_type(0xC18D, 0xC1A7, 'LVT'). % Lo [27] HANGUL SYLLABLE SOG..HANGUL SYLLABLE SOH
unicode_hangul_syllable_type(0xC1A9, 0xC1C3, 'LVT'). % Lo [27] HANGUL SYLLABLE SWAG..HANGUL SYLLABLE SWAH
unicode_hangul_syllable_type(0xC1C5, 0xC1DF, 'LVT'). % Lo [27] HANGUL SYLLABLE SWAEG..HANGUL SYLLABLE SWAEH
unicode_hangul_syllable_type(0xC1E1, 0xC1FB, 'LVT'). % Lo [27] HANGUL SYLLABLE SOEG..HANGUL SYLLABLE SOEH
unicode_hangul_syllable_type(0xC1FD, 0xC217, 'LVT'). % Lo [27] HANGUL SYLLABLE SYOG..HANGUL SYLLABLE SYOH
unicode_hangul_syllable_type(0xC219, 0xC233, 'LVT'). % Lo [27] HANGUL SYLLABLE SUG..HANGUL SYLLABLE SUH
unicode_hangul_syllable_type(0xC235, 0xC24F, 'LVT'). % Lo [27] HANGUL SYLLABLE SWEOG..HANGUL SYLLABLE SWEOH
unicode_hangul_syllable_type(0xC251, 0xC26B, 'LVT'). % Lo [27] HANGUL SYLLABLE SWEG..HANGUL SYLLABLE SWEH
unicode_hangul_syllable_type(0xC26D, 0xC287, 'LVT'). % Lo [27] HANGUL SYLLABLE SWIG..HANGUL SYLLABLE SWIH
unicode_hangul_syllable_type(0xC289, 0xC2A3, 'LVT'). % Lo [27] HANGUL SYLLABLE SYUG..HANGUL SYLLABLE SYUH
unicode_hangul_syllable_type(0xC2A5, 0xC2BF, 'LVT'). % Lo [27] HANGUL SYLLABLE SEUG..HANGUL SYLLABLE SEUH
unicode_hangul_syllable_type(0xC2C1, 0xC2DB, 'LVT'). % Lo [27] HANGUL SYLLABLE SYIG..HANGUL SYLLABLE SYIH
unicode_hangul_syllable_type(0xC2DD, 0xC2F7, 'LVT'). % Lo [27] HANGUL SYLLABLE SIG..HANGUL SYLLABLE SIH
unicode_hangul_syllable_type(0xC2F9, 0xC313, 'LVT'). % Lo [27] HANGUL SYLLABLE SSAG..HANGUL SYLLABLE SSAH
unicode_hangul_syllable_type(0xC315, 0xC32F, 'LVT'). % Lo [27] HANGUL SYLLABLE SSAEG..HANGUL SYLLABLE SSAEH
unicode_hangul_syllable_type(0xC331, 0xC34B, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYAG..HANGUL SYLLABLE SSYAH
unicode_hangul_syllable_type(0xC34D, 0xC367, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYAEG..HANGUL SYLLABLE SSYAEH
unicode_hangul_syllable_type(0xC369, 0xC383, 'LVT'). % Lo [27] HANGUL SYLLABLE SSEOG..HANGUL SYLLABLE SSEOH
unicode_hangul_syllable_type(0xC385, 0xC39F, 'LVT'). % Lo [27] HANGUL SYLLABLE SSEG..HANGUL SYLLABLE SSEH
unicode_hangul_syllable_type(0xC3A1, 0xC3BB, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYEOG..HANGUL SYLLABLE SSYEOH
unicode_hangul_syllable_type(0xC3BD, 0xC3D7, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYEG..HANGUL SYLLABLE SSYEH
unicode_hangul_syllable_type(0xC3D9, 0xC3F3, 'LVT'). % Lo [27] HANGUL SYLLABLE SSOG..HANGUL SYLLABLE SSOH
unicode_hangul_syllable_type(0xC3F5, 0xC40F, 'LVT'). % Lo [27] HANGUL SYLLABLE SSWAG..HANGUL SYLLABLE SSWAH
unicode_hangul_syllable_type(0xC411, 0xC42B, 'LVT'). % Lo [27] HANGUL SYLLABLE SSWAEG..HANGUL SYLLABLE SSWAEH
unicode_hangul_syllable_type(0xC42D, 0xC447, 'LVT'). % Lo [27] HANGUL SYLLABLE SSOEG..HANGUL SYLLABLE SSOEH
unicode_hangul_syllable_type(0xC449, 0xC463, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYOG..HANGUL SYLLABLE SSYOH
unicode_hangul_syllable_type(0xC465, 0xC47F, 'LVT'). % Lo [27] HANGUL SYLLABLE SSUG..HANGUL SYLLABLE SSUH
unicode_hangul_syllable_type(0xC481, 0xC49B, 'LVT'). % Lo [27] HANGUL SYLLABLE SSWEOG..HANGUL SYLLABLE SSWEOH
unicode_hangul_syllable_type(0xC49D, 0xC4B7, 'LVT'). % Lo [27] HANGUL SYLLABLE SSWEG..HANGUL SYLLABLE SSWEH
unicode_hangul_syllable_type(0xC4B9, 0xC4D3, 'LVT'). % Lo [27] HANGUL SYLLABLE SSWIG..HANGUL SYLLABLE SSWIH
unicode_hangul_syllable_type(0xC4D5, 0xC4EF, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYUG..HANGUL SYLLABLE SSYUH
unicode_hangul_syllable_type(0xC4F1, 0xC50B, 'LVT'). % Lo [27] HANGUL SYLLABLE SSEUG..HANGUL SYLLABLE SSEUH
unicode_hangul_syllable_type(0xC50D, 0xC527, 'LVT'). % Lo [27] HANGUL SYLLABLE SSYIG..HANGUL SYLLABLE SSYIH
unicode_hangul_syllable_type(0xC529, 0xC543, 'LVT'). % Lo [27] HANGUL SYLLABLE SSIG..HANGUL SYLLABLE SSIH
unicode_hangul_syllable_type(0xC545, 0xC55F, 'LVT'). % Lo [27] HANGUL SYLLABLE AG..HANGUL SYLLABLE AH
unicode_hangul_syllable_type(0xC561, 0xC57B, 'LVT'). % Lo [27] HANGUL SYLLABLE AEG..HANGUL SYLLABLE AEH
unicode_hangul_syllable_type(0xC57D, 0xC597, 'LVT'). % Lo [27] HANGUL SYLLABLE YAG..HANGUL SYLLABLE YAH
unicode_hangul_syllable_type(0xC599, 0xC5B3, 'LVT'). % Lo [27] HANGUL SYLLABLE YAEG..HANGUL SYLLABLE YAEH
unicode_hangul_syllable_type(0xC5B5, 0xC5CF, 'LVT'). % Lo [27] HANGUL SYLLABLE EOG..HANGUL SYLLABLE EOH
unicode_hangul_syllable_type(0xC5D1, 0xC5EB, 'LVT'). % Lo [27] HANGUL SYLLABLE EG..HANGUL SYLLABLE EH
unicode_hangul_syllable_type(0xC5ED, 0xC607, 'LVT'). % Lo [27] HANGUL SYLLABLE YEOG..HANGUL SYLLABLE YEOH
unicode_hangul_syllable_type(0xC609, 0xC623, 'LVT'). % Lo [27] HANGUL SYLLABLE YEG..HANGUL SYLLABLE YEH
unicode_hangul_syllable_type(0xC625, 0xC63F, 'LVT'). % Lo [27] HANGUL SYLLABLE OG..HANGUL SYLLABLE OH
unicode_hangul_syllable_type(0xC641, 0xC65B, 'LVT'). % Lo [27] HANGUL SYLLABLE WAG..HANGUL SYLLABLE WAH
unicode_hangul_syllable_type(0xC65D, 0xC677, 'LVT'). % Lo [27] HANGUL SYLLABLE WAEG..HANGUL SYLLABLE WAEH
unicode_hangul_syllable_type(0xC679, 0xC693, 'LVT'). % Lo [27] HANGUL SYLLABLE OEG..HANGUL SYLLABLE OEH
unicode_hangul_syllable_type(0xC695, 0xC6AF, 'LVT'). % Lo [27] HANGUL SYLLABLE YOG..HANGUL SYLLABLE YOH
unicode_hangul_syllable_type(0xC6B1, 0xC6CB, 'LVT'). % Lo [27] HANGUL SYLLABLE UG..HANGUL SYLLABLE UH
unicode_hangul_syllable_type(0xC6CD, 0xC6E7, 'LVT'). % Lo [27] HANGUL SYLLABLE WEOG..HANGUL SYLLABLE WEOH
unicode_hangul_syllable_type(0xC6E9, 0xC703, 'LVT'). % Lo [27] HANGUL SYLLABLE WEG..HANGUL SYLLABLE WEH
unicode_hangul_syllable_type(0xC705, 0xC71F, 'LVT'). % Lo [27] HANGUL SYLLABLE WIG..HANGUL SYLLABLE WIH
unicode_hangul_syllable_type(0xC721, 0xC73B, 'LVT'). % Lo [27] HANGUL SYLLABLE YUG..HANGUL SYLLABLE YUH
unicode_hangul_syllable_type(0xC73D, 0xC757, 'LVT'). % Lo [27] HANGUL SYLLABLE EUG..HANGUL SYLLABLE EUH
unicode_hangul_syllable_type(0xC759, 0xC773, 'LVT'). % Lo [27] HANGUL SYLLABLE YIG..HANGUL SYLLABLE YIH
unicode_hangul_syllable_type(0xC775, 0xC78F, 'LVT'). % Lo [27] HANGUL SYLLABLE IG..HANGUL SYLLABLE IH
unicode_hangul_syllable_type(0xC791, 0xC7AB, 'LVT'). % Lo [27] HANGUL SYLLABLE JAG..HANGUL SYLLABLE JAH
unicode_hangul_syllable_type(0xC7AD, 0xC7C7, 'LVT'). % Lo [27] HANGUL SYLLABLE JAEG..HANGUL SYLLABLE JAEH
unicode_hangul_syllable_type(0xC7C9, 0xC7E3, 'LVT'). % Lo [27] HANGUL SYLLABLE JYAG..HANGUL SYLLABLE JYAH
unicode_hangul_syllable_type(0xC7E5, 0xC7FF, 'LVT'). % Lo [27] HANGUL SYLLABLE JYAEG..HANGUL SYLLABLE JYAEH
unicode_hangul_syllable_type(0xC801, 0xC81B, 'LVT'). % Lo [27] HANGUL SYLLABLE JEOG..HANGUL SYLLABLE JEOH
unicode_hangul_syllable_type(0xC81D, 0xC837, 'LVT'). % Lo [27] HANGUL SYLLABLE JEG..HANGUL SYLLABLE JEH
unicode_hangul_syllable_type(0xC839, 0xC853, 'LVT'). % Lo [27] HANGUL SYLLABLE JYEOG..HANGUL SYLLABLE JYEOH
unicode_hangul_syllable_type(0xC855, 0xC86F, 'LVT'). % Lo [27] HANGUL SYLLABLE JYEG..HANGUL SYLLABLE JYEH
unicode_hangul_syllable_type(0xC871, 0xC88B, 'LVT'). % Lo [27] HANGUL SYLLABLE JOG..HANGUL SYLLABLE JOH
unicode_hangul_syllable_type(0xC88D, 0xC8A7, 'LVT'). % Lo [27] HANGUL SYLLABLE JWAG..HANGUL SYLLABLE JWAH
unicode_hangul_syllable_type(0xC8A9, 0xC8C3, 'LVT'). % Lo [27] HANGUL SYLLABLE JWAEG..HANGUL SYLLABLE JWAEH
unicode_hangul_syllable_type(0xC8C5, 0xC8DF, 'LVT'). % Lo [27] HANGUL SYLLABLE JOEG..HANGUL SYLLABLE JOEH
unicode_hangul_syllable_type(0xC8E1, 0xC8FB, 'LVT'). % Lo [27] HANGUL SYLLABLE JYOG..HANGUL SYLLABLE JYOH
unicode_hangul_syllable_type(0xC8FD, 0xC917, 'LVT'). % Lo [27] HANGUL SYLLABLE JUG..HANGUL SYLLABLE JUH
unicode_hangul_syllable_type(0xC919, 0xC933, 'LVT'). % Lo [27] HANGUL SYLLABLE JWEOG..HANGUL SYLLABLE JWEOH
unicode_hangul_syllable_type(0xC935, 0xC94F, 'LVT'). % Lo [27] HANGUL SYLLABLE JWEG..HANGUL SYLLABLE JWEH
unicode_hangul_syllable_type(0xC951, 0xC96B, 'LVT'). % Lo [27] HANGUL SYLLABLE JWIG..HANGUL SYLLABLE JWIH
unicode_hangul_syllable_type(0xC96D, 0xC987, 'LVT'). % Lo [27] HANGUL SYLLABLE JYUG..HANGUL SYLLABLE JYUH
unicode_hangul_syllable_type(0xC989, 0xC9A3, 'LVT'). % Lo [27] HANGUL SYLLABLE JEUG..HANGUL SYLLABLE JEUH
unicode_hangul_syllable_type(0xC9A5, 0xC9BF, 'LVT'). % Lo [27] HANGUL SYLLABLE JYIG..HANGUL SYLLABLE JYIH
unicode_hangul_syllable_type(0xC9C1, 0xC9DB, 'LVT'). % Lo [27] HANGUL SYLLABLE JIG..HANGUL SYLLABLE JIH
unicode_hangul_syllable_type(0xC9DD, 0xC9F7, 'LVT'). % Lo [27] HANGUL SYLLABLE JJAG..HANGUL SYLLABLE JJAH
unicode_hangul_syllable_type(0xC9F9, 0xCA13, 'LVT'). % Lo [27] HANGUL SYLLABLE JJAEG..HANGUL SYLLABLE JJAEH
unicode_hangul_syllable_type(0xCA15, 0xCA2F, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYAG..HANGUL SYLLABLE JJYAH
unicode_hangul_syllable_type(0xCA31, 0xCA4B, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYAEG..HANGUL SYLLABLE JJYAEH
unicode_hangul_syllable_type(0xCA4D, 0xCA67, 'LVT'). % Lo [27] HANGUL SYLLABLE JJEOG..HANGUL SYLLABLE JJEOH
unicode_hangul_syllable_type(0xCA69, 0xCA83, 'LVT'). % Lo [27] HANGUL SYLLABLE JJEG..HANGUL SYLLABLE JJEH
unicode_hangul_syllable_type(0xCA85, 0xCA9F, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYEOG..HANGUL SYLLABLE JJYEOH
unicode_hangul_syllable_type(0xCAA1, 0xCABB, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYEG..HANGUL SYLLABLE JJYEH
unicode_hangul_syllable_type(0xCABD, 0xCAD7, 'LVT'). % Lo [27] HANGUL SYLLABLE JJOG..HANGUL SYLLABLE JJOH
unicode_hangul_syllable_type(0xCAD9, 0xCAF3, 'LVT'). % Lo [27] HANGUL SYLLABLE JJWAG..HANGUL SYLLABLE JJWAH
unicode_hangul_syllable_type(0xCAF5, 0xCB0F, 'LVT'). % Lo [27] HANGUL SYLLABLE JJWAEG..HANGUL SYLLABLE JJWAEH
unicode_hangul_syllable_type(0xCB11, 0xCB2B, 'LVT'). % Lo [27] HANGUL SYLLABLE JJOEG..HANGUL SYLLABLE JJOEH
unicode_hangul_syllable_type(0xCB2D, 0xCB47, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYOG..HANGUL SYLLABLE JJYOH
unicode_hangul_syllable_type(0xCB49, 0xCB63, 'LVT'). % Lo [27] HANGUL SYLLABLE JJUG..HANGUL SYLLABLE JJUH
unicode_hangul_syllable_type(0xCB65, 0xCB7F, 'LVT'). % Lo [27] HANGUL SYLLABLE JJWEOG..HANGUL SYLLABLE JJWEOH
unicode_hangul_syllable_type(0xCB81, 0xCB9B, 'LVT'). % Lo [27] HANGUL SYLLABLE JJWEG..HANGUL SYLLABLE JJWEH
unicode_hangul_syllable_type(0xCB9D, 0xCBB7, 'LVT'). % Lo [27] HANGUL SYLLABLE JJWIG..HANGUL SYLLABLE JJWIH
unicode_hangul_syllable_type(0xCBB9, 0xCBD3, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYUG..HANGUL SYLLABLE JJYUH
unicode_hangul_syllable_type(0xCBD5, 0xCBEF, 'LVT'). % Lo [27] HANGUL SYLLABLE JJEUG..HANGUL SYLLABLE JJEUH
unicode_hangul_syllable_type(0xCBF1, 0xCC0B, 'LVT'). % Lo [27] HANGUL SYLLABLE JJYIG..HANGUL SYLLABLE JJYIH
unicode_hangul_syllable_type(0xCC0D, 0xCC27, 'LVT'). % Lo [27] HANGUL SYLLABLE JJIG..HANGUL SYLLABLE JJIH
unicode_hangul_syllable_type(0xCC29, 0xCC43, 'LVT'). % Lo [27] HANGUL SYLLABLE CAG..HANGUL SYLLABLE CAH
unicode_hangul_syllable_type(0xCC45, 0xCC5F, 'LVT'). % Lo [27] HANGUL SYLLABLE CAEG..HANGUL SYLLABLE CAEH
unicode_hangul_syllable_type(0xCC61, 0xCC7B, 'LVT'). % Lo [27] HANGUL SYLLABLE CYAG..HANGUL SYLLABLE CYAH
unicode_hangul_syllable_type(0xCC7D, 0xCC97, 'LVT'). % Lo [27] HANGUL SYLLABLE CYAEG..HANGUL SYLLABLE CYAEH
unicode_hangul_syllable_type(0xCC99, 0xCCB3, 'LVT'). % Lo [27] HANGUL SYLLABLE CEOG..HANGUL SYLLABLE CEOH
unicode_hangul_syllable_type(0xCCB5, 0xCCCF, 'LVT'). % Lo [27] HANGUL SYLLABLE CEG..HANGUL SYLLABLE CEH
unicode_hangul_syllable_type(0xCCD1, 0xCCEB, 'LVT'). % Lo [27] HANGUL SYLLABLE CYEOG..HANGUL SYLLABLE CYEOH
unicode_hangul_syllable_type(0xCCED, 0xCD07, 'LVT'). % Lo [27] HANGUL SYLLABLE CYEG..HANGUL SYLLABLE CYEH
unicode_hangul_syllable_type(0xCD09, 0xCD23, 'LVT'). % Lo [27] HANGUL SYLLABLE COG..HANGUL SYLLABLE COH
unicode_hangul_syllable_type(0xCD25, 0xCD3F, 'LVT'). % Lo [27] HANGUL SYLLABLE CWAG..HANGUL SYLLABLE CWAH
unicode_hangul_syllable_type(0xCD41, 0xCD5B, 'LVT'). % Lo [27] HANGUL SYLLABLE CWAEG..HANGUL SYLLABLE CWAEH
unicode_hangul_syllable_type(0xCD5D, 0xCD77, 'LVT'). % Lo [27] HANGUL SYLLABLE COEG..HANGUL SYLLABLE COEH
unicode_hangul_syllable_type(0xCD79, 0xCD93, 'LVT'). % Lo [27] HANGUL SYLLABLE CYOG..HANGUL SYLLABLE CYOH
unicode_hangul_syllable_type(0xCD95, 0xCDAF, 'LVT'). % Lo [27] HANGUL SYLLABLE CUG..HANGUL SYLLABLE CUH
unicode_hangul_syllable_type(0xCDB1, 0xCDCB, 'LVT'). % Lo [27] HANGUL SYLLABLE CWEOG..HANGUL SYLLABLE CWEOH
unicode_hangul_syllable_type(0xCDCD, 0xCDE7, 'LVT'). % Lo [27] HANGUL SYLLABLE CWEG..HANGUL SYLLABLE CWEH
unicode_hangul_syllable_type(0xCDE9, 0xCE03, 'LVT'). % Lo [27] HANGUL SYLLABLE CWIG..HANGUL SYLLABLE CWIH
unicode_hangul_syllable_type(0xCE05, 0xCE1F, 'LVT'). % Lo [27] HANGUL SYLLABLE CYUG..HANGUL SYLLABLE CYUH
unicode_hangul_syllable_type(0xCE21, 0xCE3B, 'LVT'). % Lo [27] HANGUL SYLLABLE CEUG..HANGUL SYLLABLE CEUH
unicode_hangul_syllable_type(0xCE3D, 0xCE57, 'LVT'). % Lo [27] HANGUL SYLLABLE CYIG..HANGUL SYLLABLE CYIH
unicode_hangul_syllable_type(0xCE59, 0xCE73, 'LVT'). % Lo [27] HANGUL SYLLABLE CIG..HANGUL SYLLABLE CIH
unicode_hangul_syllable_type(0xCE75, 0xCE8F, 'LVT'). % Lo [27] HANGUL SYLLABLE KAG..HANGUL SYLLABLE KAH
unicode_hangul_syllable_type(0xCE91, 0xCEAB, 'LVT'). % Lo [27] HANGUL SYLLABLE KAEG..HANGUL SYLLABLE KAEH
unicode_hangul_syllable_type(0xCEAD, 0xCEC7, 'LVT'). % Lo [27] HANGUL SYLLABLE KYAG..HANGUL SYLLABLE KYAH
unicode_hangul_syllable_type(0xCEC9, 0xCEE3, 'LVT'). % Lo [27] HANGUL SYLLABLE KYAEG..HANGUL SYLLABLE KYAEH
unicode_hangul_syllable_type(0xCEE5, 0xCEFF, 'LVT'). % Lo [27] HANGUL SYLLABLE KEOG..HANGUL SYLLABLE KEOH
unicode_hangul_syllable_type(0xCF01, 0xCF1B, 'LVT'). % Lo [27] HANGUL SYLLABLE KEG..HANGUL SYLLABLE KEH
unicode_hangul_syllable_type(0xCF1D, 0xCF37, 'LVT'). % Lo [27] HANGUL SYLLABLE KYEOG..HANGUL SYLLABLE KYEOH
unicode_hangul_syllable_type(0xCF39, 0xCF53, 'LVT'). % Lo [27] HANGUL SYLLABLE KYEG..HANGUL SYLLABLE KYEH
unicode_hangul_syllable_type(0xCF55, 0xCF6F, 'LVT'). % Lo [27] HANGUL SYLLABLE KOG..HANGUL SYLLABLE KOH
unicode_hangul_syllable_type(0xCF71, 0xCF8B, 'LVT'). % Lo [27] HANGUL SYLLABLE KWAG..HANGUL SYLLABLE KWAH
unicode_hangul_syllable_type(0xCF8D, 0xCFA7, 'LVT'). % Lo [27] HANGUL SYLLABLE KWAEG..HANGUL SYLLABLE KWAEH
unicode_hangul_syllable_type(0xCFA9, 0xCFC3, 'LVT'). % Lo [27] HANGUL SYLLABLE KOEG..HANGUL SYLLABLE KOEH
unicode_hangul_syllable_type(0xCFC5, 0xCFDF, 'LVT'). % Lo [27] HANGUL SYLLABLE KYOG..HANGUL SYLLABLE KYOH
unicode_hangul_syllable_type(0xCFE1, 0xCFFB, 'LVT'). % Lo [27] HANGUL SYLLABLE KUG..HANGUL SYLLABLE KUH
unicode_hangul_syllable_type(0xCFFD, 0xD017, 'LVT'). % Lo [27] HANGUL SYLLABLE KWEOG..HANGUL SYLLABLE KWEOH
unicode_hangul_syllable_type(0xD019, 0xD033, 'LVT'). % Lo [27] HANGUL SYLLABLE KWEG..HANGUL SYLLABLE KWEH
unicode_hangul_syllable_type(0xD035, 0xD04F, 'LVT'). % Lo [27] HANGUL SYLLABLE KWIG..HANGUL SYLLABLE KWIH
unicode_hangul_syllable_type(0xD051, 0xD06B, 'LVT'). % Lo [27] HANGUL SYLLABLE KYUG..HANGUL SYLLABLE KYUH
unicode_hangul_syllable_type(0xD06D, 0xD087, 'LVT'). % Lo [27] HANGUL SYLLABLE KEUG..HANGUL SYLLABLE KEUH
unicode_hangul_syllable_type(0xD089, 0xD0A3, 'LVT'). % Lo [27] HANGUL SYLLABLE KYIG..HANGUL SYLLABLE KYIH
unicode_hangul_syllable_type(0xD0A5, 0xD0BF, 'LVT'). % Lo [27] HANGUL SYLLABLE KIG..HANGUL SYLLABLE KIH
unicode_hangul_syllable_type(0xD0C1, 0xD0DB, 'LVT'). % Lo [27] HANGUL SYLLABLE TAG..HANGUL SYLLABLE TAH
unicode_hangul_syllable_type(0xD0DD, 0xD0F7, 'LVT'). % Lo [27] HANGUL SYLLABLE TAEG..HANGUL SYLLABLE TAEH
unicode_hangul_syllable_type(0xD0F9, 0xD113, 'LVT'). % Lo [27] HANGUL SYLLABLE TYAG..HANGUL SYLLABLE TYAH
unicode_hangul_syllable_type(0xD115, 0xD12F, 'LVT'). % Lo [27] HANGUL SYLLABLE TYAEG..HANGUL SYLLABLE TYAEH
unicode_hangul_syllable_type(0xD131, 0xD14B, 'LVT'). % Lo [27] HANGUL SYLLABLE TEOG..HANGUL SYLLABLE TEOH
unicode_hangul_syllable_type(0xD14D, 0xD167, 'LVT'). % Lo [27] HANGUL SYLLABLE TEG..HANGUL SYLLABLE TEH
unicode_hangul_syllable_type(0xD169, 0xD183, 'LVT'). % Lo [27] HANGUL SYLLABLE TYEOG..HANGUL SYLLABLE TYEOH
unicode_hangul_syllable_type(0xD185, 0xD19F, 'LVT'). % Lo [27] HANGUL SYLLABLE TYEG..HANGUL SYLLABLE TYEH
unicode_hangul_syllable_type(0xD1A1, 0xD1BB, 'LVT'). % Lo [27] HANGUL SYLLABLE TOG..HANGUL SYLLABLE TOH
unicode_hangul_syllable_type(0xD1BD, 0xD1D7, 'LVT'). % Lo [27] HANGUL SYLLABLE TWAG..HANGUL SYLLABLE TWAH
unicode_hangul_syllable_type(0xD1D9, 0xD1F3, 'LVT'). % Lo [27] HANGUL SYLLABLE TWAEG..HANGUL SYLLABLE TWAEH
unicode_hangul_syllable_type(0xD1F5, 0xD20F, 'LVT'). % Lo [27] HANGUL SYLLABLE TOEG..HANGUL SYLLABLE TOEH
unicode_hangul_syllable_type(0xD211, 0xD22B, 'LVT'). % Lo [27] HANGUL SYLLABLE TYOG..HANGUL SYLLABLE TYOH
unicode_hangul_syllable_type(0xD22D, 0xD247, 'LVT'). % Lo [27] HANGUL SYLLABLE TUG..HANGUL SYLLABLE TUH
unicode_hangul_syllable_type(0xD249, 0xD263, 'LVT'). % Lo [27] HANGUL SYLLABLE TWEOG..HANGUL SYLLABLE TWEOH
unicode_hangul_syllable_type(0xD265, 0xD27F, 'LVT'). % Lo [27] HANGUL SYLLABLE TWEG..HANGUL SYLLABLE TWEH
unicode_hangul_syllable_type(0xD281, 0xD29B, 'LVT'). % Lo [27] HANGUL SYLLABLE TWIG..HANGUL SYLLABLE TWIH
unicode_hangul_syllable_type(0xD29D, 0xD2B7, 'LVT'). % Lo [27] HANGUL SYLLABLE TYUG..HANGUL SYLLABLE TYUH
unicode_hangul_syllable_type(0xD2B9, 0xD2D3, 'LVT'). % Lo [27] HANGUL SYLLABLE TEUG..HANGUL SYLLABLE TEUH
unicode_hangul_syllable_type(0xD2D5, 0xD2EF, 'LVT'). % Lo [27] HANGUL SYLLABLE TYIG..HANGUL SYLLABLE TYIH
unicode_hangul_syllable_type(0xD2F1, 0xD30B, 'LVT'). % Lo [27] HANGUL SYLLABLE TIG..HANGUL SYLLABLE TIH
unicode_hangul_syllable_type(0xD30D, 0xD327, 'LVT'). % Lo [27] HANGUL SYLLABLE PAG..HANGUL SYLLABLE PAH
unicode_hangul_syllable_type(0xD329, 0xD343, 'LVT'). % Lo [27] HANGUL SYLLABLE PAEG..HANGUL SYLLABLE PAEH
unicode_hangul_syllable_type(0xD345, 0xD35F, 'LVT'). % Lo [27] HANGUL SYLLABLE PYAG..HANGUL SYLLABLE PYAH
unicode_hangul_syllable_type(0xD361, 0xD37B, 'LVT'). % Lo [27] HANGUL SYLLABLE PYAEG..HANGUL SYLLABLE PYAEH
unicode_hangul_syllable_type(0xD37D, 0xD397, 'LVT'). % Lo [27] HANGUL SYLLABLE PEOG..HANGUL SYLLABLE PEOH
unicode_hangul_syllable_type(0xD399, 0xD3B3, 'LVT'). % Lo [27] HANGUL SYLLABLE PEG..HANGUL SYLLABLE PEH
unicode_hangul_syllable_type(0xD3B5, 0xD3CF, 'LVT'). % Lo [27] HANGUL SYLLABLE PYEOG..HANGUL SYLLABLE PYEOH
unicode_hangul_syllable_type(0xD3D1, 0xD3EB, 'LVT'). % Lo [27] HANGUL SYLLABLE PYEG..HANGUL SYLLABLE PYEH
unicode_hangul_syllable_type(0xD3ED, 0xD407, 'LVT'). % Lo [27] HANGUL SYLLABLE POG..HANGUL SYLLABLE POH
unicode_hangul_syllable_type(0xD409, 0xD423, 'LVT'). % Lo [27] HANGUL SYLLABLE PWAG..HANGUL SYLLABLE PWAH
unicode_hangul_syllable_type(0xD425, 0xD43F, 'LVT'). % Lo [27] HANGUL SYLLABLE PWAEG..HANGUL SYLLABLE PWAEH
unicode_hangul_syllable_type(0xD441, 0xD45B, 'LVT'). % Lo [27] HANGUL SYLLABLE POEG..HANGUL SYLLABLE POEH
unicode_hangul_syllable_type(0xD45D, 0xD477, 'LVT'). % Lo [27] HANGUL SYLLABLE PYOG..HANGUL SYLLABLE PYOH
unicode_hangul_syllable_type(0xD479, 0xD493, 'LVT'). % Lo [27] HANGUL SYLLABLE PUG..HANGUL SYLLABLE PUH
unicode_hangul_syllable_type(0xD495, 0xD4AF, 'LVT'). % Lo [27] HANGUL SYLLABLE PWEOG..HANGUL SYLLABLE PWEOH
unicode_hangul_syllable_type(0xD4B1, 0xD4CB, 'LVT'). % Lo [27] HANGUL SYLLABLE PWEG..HANGUL SYLLABLE PWEH
unicode_hangul_syllable_type(0xD4CD, 0xD4E7, 'LVT'). % Lo [27] HANGUL SYLLABLE PWIG..HANGUL SYLLABLE PWIH
unicode_hangul_syllable_type(0xD4E9, 0xD503, 'LVT'). % Lo [27] HANGUL SYLLABLE PYUG..HANGUL SYLLABLE PYUH
unicode_hangul_syllable_type(0xD505, 0xD51F, 'LVT'). % Lo [27] HANGUL SYLLABLE PEUG..HANGUL SYLLABLE PEUH
unicode_hangul_syllable_type(0xD521, 0xD53B, 'LVT'). % Lo [27] HANGUL SYLLABLE PYIG..HANGUL SYLLABLE PYIH
unicode_hangul_syllable_type(0xD53D, 0xD557, 'LVT'). % Lo [27] HANGUL SYLLABLE PIG..HANGUL SYLLABLE PIH
unicode_hangul_syllable_type(0xD559, 0xD573, 'LVT'). % Lo [27] HANGUL SYLLABLE HAG..HANGUL SYLLABLE HAH
unicode_hangul_syllable_type(0xD575, 0xD58F, 'LVT'). % Lo [27] HANGUL SYLLABLE HAEG..HANGUL SYLLABLE HAEH
unicode_hangul_syllable_type(0xD591, 0xD5AB, 'LVT'). % Lo [27] HANGUL SYLLABLE HYAG..HANGUL SYLLABLE HYAH
unicode_hangul_syllable_type(0xD5AD, 0xD5C7, 'LVT'). % Lo [27] HANGUL SYLLABLE HYAEG..HANGUL SYLLABLE HYAEH
unicode_hangul_syllable_type(0xD5C9, 0xD5E3, 'LVT'). % Lo [27] HANGUL SYLLABLE HEOG..HANGUL SYLLABLE HEOH
unicode_hangul_syllable_type(0xD5E5, 0xD5FF, 'LVT'). % Lo [27] HANGUL SYLLABLE HEG..HANGUL SYLLABLE HEH
unicode_hangul_syllable_type(0xD601, 0xD61B, 'LVT'). % Lo [27] HANGUL SYLLABLE HYEOG..HANGUL SYLLABLE HYEOH
unicode_hangul_syllable_type(0xD61D, 0xD637, 'LVT'). % Lo [27] HANGUL SYLLABLE HYEG..HANGUL SYLLABLE HYEH
unicode_hangul_syllable_type(0xD639, 0xD653, 'LVT'). % Lo [27] HANGUL SYLLABLE HOG..HANGUL SYLLABLE HOH
unicode_hangul_syllable_type(0xD655, 0xD66F, 'LVT'). % Lo [27] HANGUL SYLLABLE HWAG..HANGUL SYLLABLE HWAH
unicode_hangul_syllable_type(0xD671, 0xD68B, 'LVT'). % Lo [27] HANGUL SYLLABLE HWAEG..HANGUL SYLLABLE HWAEH
unicode_hangul_syllable_type(0xD68D, 0xD6A7, 'LVT'). % Lo [27] HANGUL SYLLABLE HOEG..HANGUL SYLLABLE HOEH
unicode_hangul_syllable_type(0xD6A9, 0xD6C3, 'LVT'). % Lo [27] HANGUL SYLLABLE HYOG..HANGUL SYLLABLE HYOH
unicode_hangul_syllable_type(0xD6C5, 0xD6DF, 'LVT'). % Lo [27] HANGUL SYLLABLE HUG..HANGUL SYLLABLE HUH
unicode_hangul_syllable_type(0xD6E1, 0xD6FB, 'LVT'). % Lo [27] HANGUL SYLLABLE HWEOG..HANGUL SYLLABLE HWEOH
unicode_hangul_syllable_type(0xD6FD, 0xD717, 'LVT'). % Lo [27] HANGUL SYLLABLE HWEG..HANGUL SYLLABLE HWEH
unicode_hangul_syllable_type(0xD719, 0xD733, 'LVT'). % Lo [27] HANGUL SYLLABLE HWIG..HANGUL SYLLABLE HWIH
unicode_hangul_syllable_type(0xD735, 0xD74F, 'LVT'). % Lo [27] HANGUL SYLLABLE HYUG..HANGUL SYLLABLE HYUH
unicode_hangul_syllable_type(0xD751, 0xD76B, 'LVT'). % Lo [27] HANGUL SYLLABLE HEUG..HANGUL SYLLABLE HEUH
unicode_hangul_syllable_type(0xD76D, 0xD787, 'LVT'). % Lo [27] HANGUL SYLLABLE HYIG..HANGUL SYLLABLE HYIH
unicode_hangul_syllable_type(0xD789, 0xD7A3, 'LVT'). % Lo [27] HANGUL SYLLABLE HIG..HANGUL SYLLABLE HIH
% Total code points: 10773
% EOF
| LogtalkDotOrg/logtalk3 | library/unicode_data/unicode_hangul_syllable_type.pl | Perl | apache-2.0 | 78,975 |
package VMOMI::ArrayOfDvsApplyOperationFaultFaultOnObject;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['DvsApplyOperationFaultFaultOnObject', 'DvsApplyOperationFaultFaultOnObject', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfDvsApplyOperationFaultFaultOnObject.pm | Perl | apache-2.0 | 483 |
#!/usr/bin/perl -w
#
#
# This perl package contains common functions used in communication
# with the WASP (POI or ED) database.
package PerlWASPTools;
use lib ".";
use PerlTools; # for dbgprint
use PerlCategoriesObject;
use strict;
use DBI;
use File::Find;
use File::Basename;
require Exporter;
our @ISA = qw( Exporter );
our @EXPORT = qw( &getCountryIDFromName
&getCountryNameFromID
&getCountryIDFromISO3166Code
&getISO3166CodeFromCountryID
&getCountryDirNameFromIso3166Code
&getCountryNameFromISO3166Code
&getCountryDirNamesOfPoiSource
&getDetailLevelOfCountryID
&getAllCountryDirNames
&countryWaspNameToDirName
&getLanguageIDFromName
&getLanguageNameFromID
&getLangNameFromDodonaLangCode
&getDodonaLangCodeFromLangName
&getSourceIDFromName
&getSourceNameFromID
&getVersionIDFromName
&getVersionNameFromID
&getPOITypeIDFromTypeName
&getPOITypeNameFromTypeID
&getPOICatFromPOIType
&getPOICatDescriptFromPOICatID
&getProductID
&getProductSupplierName
&getPrevPoiSource
&getNextPoiSource
&getSourcesOfProduct
&getMostFrequentValidFromVersionOfSourceAndCountry
&getInUseFromSourceAndCountry
&getSourceIsMapPoi
&getStaticIDForPOIID
&getCountriesOfSource
&staticIDsPresentInSource
&getNonStaticCountries
&getSourceInfo
&getInUseInfoFromSource
&getNbrPOIsOfSourceID
&getNbrStaticIDsOfSourceID
&getMaxAttributeTypeID
&getMaxAttributeTypeIDLessThanInfo
&getMapVersionByCountryID
&handleMapVerOverrides
&getMapsFromCountryDirs
&mapVersionToEDVersionID
&getDodonaKeysByCategory
&storePOIs
&db_connect &db_disconnect );
#our $VERSION = 1.00;
sub getDodonaKeysByCategory {
my $dbh = $_[0];
my %result = ();
my $query=
"select catID, dodonaStringKey from POICategoryTypes;";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
while ( (my $catID, my $dodonaKey) = $sth->fetchrow() ){
$result{$catID} = $dodonaKey;
}
return %result;
}
sub getCountryIDFromName {
my $dbh = $_[0];
my $countryName = $_[1];
my $countryQuery=
"select ID from POICountries where country like '$countryName';";
my $sth = $dbh->prepare($countryQuery);
$sth->execute or die "Query execute failed: $countryQuery";
my $countryID = $sth->fetchrow();
return $countryID;
}
sub getCountryNameFromID {
my $dbh = $_[0];
my $countryID = $_[1];
my $countryQuery=
"select country from POICountries where ID = $countryID;";
my $sth = $dbh->prepare($countryQuery);
$sth->execute or die "Query execute failed: $countryQuery";
my $countryName = $sth->fetchrow();
return $countryName;
}
sub getCountryIDFromISO3166Code {
my $dbh = $_[0];
my $countryCode = $_[1];
my $countryQuery =
"SELECT ID FROM POICountries WHERE " .
"iso3166_1_alpha2 = '$countryCode'";
my $sth = $dbh->prepare($countryQuery);
$sth->execute or die "Query execute failed: $countryQuery";
my $countryID = $sth->fetchrow();
return $countryID;
}
sub getISO3166CodeFromCountryID {
my $dbh = $_[0];
my $countryID = $_[1];
my $countryQuery =
"SELECT iso3166_1_alpha2 FROM POICountries WHERE ID = $countryID;";
my $sth = $dbh->prepare($countryQuery);
$sth->execute or die "Query execute failed: $countryQuery";
my $isoCode = $sth->fetchrow();
return $isoCode;
}
sub getCountryNameFromISO3166Code {
my $dbh = $_[0];
my $countryCode = $_[1];
my $countryQuery =
"SELECT country FROM POICountries WHERE " .
"iso3166_1_alpha2 = '$countryCode'";
my $sth = $dbh->prepare($countryQuery);
$sth->execute or die "Query execute failed: $countryQuery";
my $country = $sth->fetchrow();
return $country;
}
sub getLanguageNameFromID {
my $dbh = $_[0];
my $langID = $_[1];
my $langQuery =
"SELECT langName FROM POINameLanguages WHERE ID = $langID;";
my $sth = $dbh->prepare($langQuery);
$sth->execute or die "Query execute failed: $langQuery";
my $langName = $sth->fetchrow();
return $langName;
}
sub getLanguageIDFromName {
my $dbh = $_[0];
my $langName = $_[1];
my $langQuery =
"SELECT ID FROM POINameLanguages WHERE langName LIKE '$langName';";
my $sth = $dbh->prepare($langQuery);
$sth->execute or die "Query execute failed: $langQuery";
my $langID = $sth->fetchrow();
return $langID;
}
sub getLangNameFromDodonaLangCode {
my $dbh = $_[0];
my $dodonaLangCode = $_[1];
my $langQuery =
"select langName " .
"from POINameLanguages where ".
"dodonaLangID = '" . $dodonaLangCode . "';";
my $sth = $dbh->prepare($langQuery);
$sth->execute or die "Query execute failed: $langQuery";
my $langID = $sth->fetchrow();
return $langID;
}
sub getDodonaLangCodeFromLangName {
my $dbh = $_[0];
my $langName = $_[1];
my $langQuery =
"select dodonaLangID " .
"from POINameLanguages where ".
"langName = '" . $langName . "';";
my $sth = $dbh->prepare($langQuery);
$sth->execute or die "Query execute failed: $langQuery";
my $dodonaLang = $sth->fetchrow();
return $dodonaLang;
}
sub getSourceIDFromName {
my $dbh = $_[0];
my $sourceName = $_[1];
my $sourceQuery =
"SELECT ID FROM POISources WHERE source LIKE '$sourceName';";
my $sth = $dbh->prepare($sourceQuery);
$sth->execute or die "Query execute failed: $sourceQuery";
my $sourceID = $sth->fetchrow();
return $sourceID;
}
sub getSourceNameFromID {
my $dbh = $_[0];
my $sourceID = $_[1];
my $sourceQuery =
"SELECT source FROM POISources WHERE ID=$sourceID;";
my $sth = $dbh->prepare($sourceQuery);
$sth->execute or die "Query execute failed: $sourceQuery";
my $sourceName = $sth->fetchrow();
return $sourceName;
}
sub getNbrPOIsOfSourceID {
my $dbh = $_[0];
my $sourceID = $_[1];
my $query =
"SELECT count(*) FROM POIMain WHERE source = $sourceID";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $count = $sth->fetchrow();
return $count;
}
sub getNbrStaticIDsOfSourceID {
my $dbh = $_[0];
my $sourceID = $_[1];
my $query =
"SELECT count(*) FROM POIMain join POIStatic on POIMain.ID = POIStatic.poiID WHERE source = $sourceID";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $count = $sth->fetchrow();
return $count;
}
sub staticIDsPresentInSource {
my $dbh = $_[0];
my $sourceID = $_[1];
my $query =
"SELECT * FROM POIMain join POIStatic on POIMain.ID = POIStatic.poiID WHERE source = $sourceID LIMIT 1";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
return ( $sth->rows > 0 );
}
sub getNonStaticCountries {
my $dbh = $_[0];
my $sourceID = $_[1];
my $query =
"SELECT DISTINCT POICountries.country".
" FROM ( POIMain left join POIStatic on POIMain.ID = POIStatic.poiID )".
" join POICountries on POIMain.country = POICountries.ID".
" WHERE source = $sourceID and POIStatic.staticID is NULL";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my @resultArray = ();
while (my $countryDbName = $sth->fetchrow()){
push @resultArray, $countryDbName;
}
return @resultArray;
}
sub getVersionIDFromName {
my $dbh = $_[0];
my $versionName = $_[1];
my $versionQuery =
"SELECT ID FROM EDVersion WHERE version LIKE '$versionName';";
my $sth = $dbh->prepare($versionQuery);
$sth->execute or die "Query execute failed: $versionQuery";
my $versionID = $sth->fetchrow();
return $versionID;
}
sub getVersionNameFromID {
my $dbh = $_[0];
my $versionID = $_[1];
my $versionQuery =
"SELECT version FROM EDVersion WHERE ID=$versionID;";
my $sth = $dbh->prepare($versionQuery);
$sth->execute or die "Query execute failed: $versionQuery";
my $versionName = $sth->fetchrow();
return $versionName;
}
# Retuns undef if it is not possible to lookup a category of a the type.
sub getPOICatFromPOIType {
my $dbh = $_[0];
my $poiType = $_[1];
my $query = "SELECT catID FROM POICategoryTypes " .
"WHERE poiTypeID = " . $poiType;
my $sth = $dbh->prepare($query);
$sth->execute() or die "Query execute failed: $query\n";
my $poiCatID = $sth->fetchrow();
return $poiCatID;
}
sub getPOICatDescriptFromPOICatID {
my $dbh = $_[0];
my $catID = $_[1];
my $query = "SELECT description FROM POICategoryTypes " .
"WHERE catID = " . $catID;
my $sth = $dbh->prepare($query);
$sth->execute() or die "Query execute failed: $query\n";
my $descript = $sth->fetchrow();
if ( ! defined $descript ) {
$descript = "-no match-";
}
return $descript;
}
sub getPOITypeIDFromTypeName {
my $dbh = $_[0];
my $typeName = $_[1];
my $query =
"SELECT ID FROM POITypeTypes where typeName like '$typeName';";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $poiTypeID = $sth->fetchrow();
return $poiTypeID;
}
sub getPOITypeNameFromTypeID {
my $dbh = $_[0];
my $typeID = $_[1];
my $query =
"SELECT typeName FROM POITypeTypes where ID = $typeID;";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $poiTypeName = $sth->fetchrow();
return $poiTypeName;
}
sub getMaxAttributeTypeID {
my $dbh = $_[0];
my $query =
"SELECT max(id) FROM POIAttributeTypes;";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $maxID = $sth->fetchrow();
return $maxID;
}
sub getMaxAttributeTypeIDLessThanInfo {
my $dbh = $_[0];
my $query =
"SELECT max(id) FROM POIAttributeTypes where id < 1000;";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $maxID = $sth->fetchrow();
return $maxID;
}
sub getCountryDirNamesOfPoiSource {
my $dbh = $_[0];
my $poiSourceID = $_[1];
my $query =
"SELECT DISTINCT POICountries.country, POICountries.detailLevel " .
"FROM POICountries inner join POIMain " .
" on POIMain.country = POICountries.ID " .
"WHERE POIMain.source = $poiSourceID " .
"order by POICountries.country;";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my @resultArray = ();
dbgprint "Printing countries (only detailed are used) of source:" .
getSourceNameFromID($dbh, $poiSourceID);
while ( (my $countryDbName, my $detailed) = $sth->fetchrow() ){
my $detailedStr = "[DETAILED]";
if ( !$detailed ){
$detailedStr = "[NOT DETAILED]";
}
dbgprint " " . $countryDbName."\t".$detailedStr;
push @resultArray, countryWaspNameToDirName($countryDbName);
}
return @resultArray;
} # getCountryDirNamesOfPoiSource
sub getCountryDirNameFromIso3166Code {
my $dbh = $_[0];
my $iso3166Code = $_[1];
if ( !defined($iso3166Code) ){
errprint "PerlWASPTools::iso3166CodeToCountryDirName iso3166Code not ".
"defined";
}
my $countryName = getCountryNameFromISO3166Code($dbh, $iso3166Code);
return countryWaspNameToDirName($countryName);
} # getCountryDirNameFromIso3166Code
sub countryWaspNameToDirName {
my $waspName = $_[0];
# The names that aren't right only after usual lower case underscore
# substitution.
my %specDirNameByWaspName
= ( 'United Arab Emirates' => 'uae',
'Bosnia and Herzegovina' => 'bosnia',
'D.R. Congo' => 'dr_congo',
'Georgia' => 'georgia_country',
'Guinea-Bissau' => 'guinea_bissau',
'Macedonia, TFYRO' => 'macedonia',
'Serbia and Montenegro' => 'serbia_montenegro',
'Taiwan Province of China' => 'taiwan',
'Timor-Leste' => 'timor_leste',
'United Republic of Tanzania' => 'tanzania',
'Viet Nam' => 'vietnam' );
# Fix special names
if (defined($specDirNameByWaspName{$waspName})){
$waspName = $specDirNameByWaspName{$waspName};
}
# Do the usual lower case underscore substitution.
my $dirCountry = lc($waspName);
$dirCountry =~ s/[ ]/_/g;
return $dirCountry;
}
# Map version is the name of the map release in map generation,
# e.g. for Tele Atlas map releases TA_2009_06 (the map release string
# that is written to before_merge/mapOrigin.txt file in the individual
# country map generation).
# In the WASP database version table the release might have another name
# e.g. Tele Atlas map releases TeleAtlas_2009_06
# since it uses the *long* version of the supplier name, as specified in
# variable file
# So you need to translate the short mapgen-mapver to the long WASP-mapver
# in order to find the WASP database version id.
#
# This function is used in poiImport
sub mapVersionToEDVersionID {
my $dbh = $_[0];
my $mapVersion = $_[1];
# Modify map version to fit ED version.
if ( $mapVersion =~ /^TA/ ){
# Tele Atlas
$mapVersion =~ s/^TA/TeleAtlas/;
}
elsif ( $mapVersion =~ /^AND/ ){
# AND, no change needed.
}
elsif ( $mapVersion =~ /^TM/ ){
# TopMap
$mapVersion =~ s/^TM/TopMap/;
}
elsif ( $mapVersion =~ /^Monolit/ ){
# Monolit, no changed needed.
}
elsif ( $mapVersion =~ /^GISrael/ ){
# GISrael, no changed needed.
}
elsif ( $mapVersion =~ /^CEInfoSystems/ ){
# CEInfoSystems, no change needed.
}
elsif ( $mapVersion =~ /^DMapas/ ){
# DMapas, no change needed.
}
else {
# no need to die with new map version name
#die "Map supplier not handled for map version: $mapVersion";
}
my $query =
"SELECT ID FROM EDVersion WHERE version = '$mapVersion'";
my $sth = $dbh->prepare($query) or die "Query prepare failed: $query";
$sth->execute or die "Query execute failed: $query";
if ( $sth->rows == 0 ){
errprint "Map version not found in database EDVersion: $mapVersion";
exit 1;
}
elsif ( $sth->rows > 1 ){
errprint "Multiple matches for map version in database: $mapVersion";
exit 1;
}
(my $edVersionID) = $sth->fetchrow();
return $edVersionID;
} # mapVersionToEDVersionID
sub getMapsFromCountryDirs {
my $dbh = $_[0];
my $countriesDirs = $_[1];
my $poiSourceID = $_[2];
my $mapDataPOIs = $_[3];
my $okWithMissingCountries = $_[4];
my @result = (); # Filled in with full paths to all map files.
# The countries where this poiSource have POIs.
my @wantedCountries;
if (defined $poiSourceID ){
@wantedCountries = getCountryDirNamesOfPoiSource($dbh, $poiSourceID);
}
my %allCountryDirs = (); # Country dir path by country dir name.
# Collect all the country directories (from EW,WW).
foreach my $countriesDir (split ",", $countriesDirs){
dbgprint "Checkning: $countriesDir";
opendir (COUNTRIES, "$countriesDir")
or die "Could not open dir $countriesDir";
foreach my $dir (readdir(COUNTRIES)){
#dbgprint($dir);
if ( ( $dir =~ /^\.$/ ) || ( $dir =~ /^\.\.$/ ) ){
# Uninteresting, and always duplicates
}
else {
if ( defined( $allCountryDirs{$dir} )){
errprint "Dir $dir already present: $allCountryDirs{$dir}," .
" duplicated in $countriesDir/$dir";
exit 1;
}
$allCountryDirs{$dir} = "$countriesDir/$dir";
}
}
closedir COUNTRIES or die "Could not close dir";
}
# Find the map files of all "good quality" countries in WASP.
# check that all of them are represented in the EW,WW dirs
# If param okWithMissingCountries given, ok for some to be missing
# If we have map data POIs we use map files of all countries, regardless
# of quality
my $nbrCountries = 0;
my $nbrCountriesSub = 0;
my $nbrCountriesMissing = 0;
my $detailedSubQuery = " WHERE detailLevel > 0";
if ( (defined $mapDataPOIs) and $mapDataPOIs ) {
$detailedSubQuery = " WHERE detailLevel >= 0";
}
my $query =
"SELECT ID, country FROM POICountries $detailedSubQuery;";
my $sth = $dbh->prepare($query);
$sth->execute;
while ((my $countryID, my $dbCountry) = $sth->fetchrow()) {
my $dirCountry = countryWaspNameToDirName($dbCountry);
#dbgprint $dirCountry;
$nbrCountries += 1;
if ( (defined($poiSourceID) && grep /$dirCountry/, @wantedCountries ) ||
(!defined($poiSourceID) ) )
{
$nbrCountriesSub += 1;
# When poiSource is defined, only use the wanted countries.
if ( defined ($allCountryDirs{$dirCountry}) ){
my $mapDir = "$allCountryDirs{$dirCountry}/before_merge";
# Read the map files of the country dir.
opendir (MAPDIR, "$mapDir")
or die "Could not open dir $mapDir";
foreach my $mapFile (readdir(MAPDIR)){
if ( ( "$mapFile" =~ /\.mcm$/ ) ||
( "$mapFile" =~ /\.mcm.bz2$/ ) ) {
# This is a valid map file, store it with full path.
push(@result, "$mapDir/$mapFile");
}
}
closedir MAPDIR or die "Could not close dir";
}
else {
$nbrCountriesMissing += 1;
if ( defined $okWithMissingCountries ) {
# ok
} else {
errprint "Missing country directory: $dirCountry\n" .
"Try okWithMissingCountries option.";
die;
}
}
}
else {
#warnprint "Not using not wanted country: $dirCountry";
}
}
if ( defined $okWithMissingCountries ) {
dbgprint "From $nbrCountries/$nbrCountriesSub available " .
"POICountries countries, " .
"$nbrCountriesMissing are missing in EW,WW countriesDirs " .
"(running okWithMissingCountries)";
}
return sort(@result); # return an arrray with full paths to all map files.
} # getMapsFromCountryDirs
sub handleMapVerOverrides {
my $dbh = $_[0];
my $iso3166AndMapVerStr = $_[1];
my $resultHash = $_[2];
foreach my $iso3166AndMapVerPair ( split(",", $iso3166AndMapVerStr) ){
dbgprint $iso3166AndMapVerPair;
(my $iso3166, my $mapVer) = split(":", $iso3166AndMapVerPair);
#dbgprint "($iso3166):($mapVer)";
# Validate country code.
my $countryID = getCountryIDFromISO3166Code($dbh, $iso3166);
if ( ! defined $countryID ) {
die "incorrect iso3166 \"$iso3166\"";
}
# Vailiate map version.
my $edVerID = mapVersionToEDVersionID($dbh, $mapVer);
if ( ! defined $edVerID ) {
die "incorrect map version \"$mapVer\"";
}
dbgprint "$iso3166:$mapVer:$countryID";
$resultHash->{$countryID}=$mapVer;
}
foreach my $key (keys(%{$resultHash})){
dbgprint "handleMapVerOverrides $key:$resultHash->{$key}";
}
}
# handleMapVerOverrides
sub getMapVersionByCountryID {
my $dbh = $_[0];
my $countriesDirs = $_[1];
my $resultHash = $_[2];
my $okWithMissingCountries = $_[3];
my %mapVerByCountry = ();
# Get mapOrigin from mapOrigin.txt in the EW,WW countries directories
foreach my $countriesDir (split ",", $countriesDirs){
dbgprint "Checking: $countriesDir";
opendir (COUNTRIES, "$countriesDir")
or die "Could not open dir $countriesDir";
foreach my $dir (readdir(COUNTRIES)){
#dbgprint($dir);
my $mapOrigFile = $countriesDir . "/" . $dir .
"/before_merge/mapOrigin.txt";
#dbgprint($mapOrigFile);
if ( -e $mapOrigFile ){
open MAPORIGFILE, "<$mapOrigFile"
or die "Could not open $mapOrigFile.";
if ( defined ($mapVerByCountry{$dir}) ){
errprint "This country already exists in previous country".
" directory, country: $dir";
#print "rm $dir && ";
exit 1;
}
($mapVerByCountry{$dir}) =<MAPORIGFILE>;
chomp $mapVerByCountry{$dir};
#dbgprint "$dir: $mapVerByCountry{$dir}";
close MAPORIGFILE or die "Could not close dir";
}
else {
dbgprint "$dir have no mapOrigin.txt";
}
}
}
# Get all countries from POICountries table,
# check that all of them were represented in the EW,WW dirs
# If param okWithMissingCountries given, ok for some to be missing
my $nbrCountries = 0;
my $nbrCountriesMissing = 0;
my $query =
"SELECT ID, country FROM POICountries";
my $sth = $dbh->prepare($query);
$sth->execute;
my %checkHash = ();
while ((my $countryID, my $dbCountry) = $sth->fetchrow()) {
my $dirCountry = countryWaspNameToDirName($dbCountry);
#dbgprint $dirCountry;
$nbrCountries += 1;
if ( defined ($mapVerByCountry{$dirCountry}) ){
$resultHash->{$countryID}=$mapVerByCountry{$dirCountry};
$checkHash{$dirCountry}=$mapVerByCountry{$dirCountry};
}
else {
$nbrCountriesMissing += 1;
if ( defined $okWithMissingCountries ) {
# ok
} else {
errprint "Missing map version for country: $dbCountry($dirCountry)\n" .
"Try okWithMissingCountries option.";
die;
}
}
}
if ( defined $okWithMissingCountries ) {
dbgprint "From $nbrCountries available POICountries countries, " .
"$nbrCountriesMissing are missing in EW,WW countriesDirs " .
"(running okWithMissingCountries)";
}
# Printing what country directories that have not been used.
# Might be that country dir name does not match the name extracted
# from POICountries table, need to fix!?
my $someNotUsed = 0;
foreach my $countryDir (keys(%mapVerByCountry)){
if ( ! defined $checkHash{$countryDir} ){
warnprint "Not using country dir: $countryDir";
$someNotUsed = 1;
}
}
if ( $someNotUsed ) {
die "Fix country dir names so they match " .
"the name extracted from POICountries table";
}
} # getMapVersionByCountryID
sub getDetailLevelOfCountryID {
my $dbh = $_[0];
my $countryID = $_[1];
my $query =
"SELECT detailLevel FROM POICountries " .
"WHERE ID = $countryID";
my $sth = $dbh->prepare( $query ) or
die "Could not prepare query: $query\n";
$sth->execute() or die "Could not execute query: $query\n";
my ($detailLevel) = $sth->fetchrow();
if (!defined $detailLevel or $detailLevel eq '') {
dbgprint "No detail level found\n";
return undef;
}
else {
return $detailLevel;
}
} # getCountryDetailLevel
sub getSourceIsMapPoi {
my $dbh = $_[0];
my $sourceID = $_[1];
my $productID = getProductID($dbh, $sourceID);
if ( defined($productID) ){
my $query = "SELECT mapDataPOIs from POIProducts WHERE ID = $productID";
my $sth = $dbh->prepare($query) ||
die "Prepare error in getSourceIsMapPoi".
", failed query: $query";
$sth->execute()|| die "Execute error in getSourceIsMapPoi".
", failed query: $query";
if ($sth->rows() == 1){
(my $isMapPoiValue) = $sth->fetchrow();
return $isMapPoiValue;
}
else {
die "getSourceIsMapPoi unexpected query result from query: $query";
}
}
else {
die "getSourceIsMapPoi could not find product for source $sourceID";
}
} # getSourceIsMapPoi
sub getProductSupplierName {
my $dbh = $_[0];
my $productID = $_[1];
my $query =
"SELECT supplierName FROM POIProducts " .
"WHERE ID = $productID";
my $sth = $dbh->prepare( $query ) or
die "Could not prepare query: $query\n";
$sth->execute() or die "Could not execute query: $query\n";
my ($supplierName) = $sth->fetchrow();
if (!defined $supplierName or $supplierName eq '') {
dbgprint "No supplierName found\n";
return undef;
}
else {
return $supplierName;
}
}
sub getProductID {
my $dbh = $_[0];
my $sourceID = $_[1];
my $sourceQuery =
"SELECT productID FROM POISources " .
"WHERE ID = $sourceID";
my $sth = $dbh->prepare( $sourceQuery ) or die "Could not prepare query: $sourceQuery\n";
$sth->execute() or die "Could not execute query: $sourceQuery\n";
#dbgprint "source id = $sourceID\n";
my ($productID) = $sth->fetchrow();
if (!defined $productID or $productID eq '' or $productID == 0 ) {
dbgprint "No productID found\n";
return undef;
}
else {
return $productID;
}
}
sub getPrevPoiSource {
my $dbh = $_[0];
my $sourceID = $_[1];
getPrevOrNextPoiSource($dbh, $sourceID, 'prev');
} # getPrevPoiSource
sub getNextPoiSource {
my $dbh = $_[0];
my $sourceID = $_[1];
getPrevOrNextPoiSource($dbh, $sourceID, 'next');
} # getNextPoiSource
sub getPrevOrNextPoiSource {
# In parameters
my $dbh = $_[0];
my $sourceID = $_[1];
my $prevOrNextKeyword = $_[2];
if ( ($prevOrNextKeyword ne 'prev') and ($prevOrNextKeyword ne 'next') ) {
dbgprint "No \"prev\" or \"next\" keyword as input.\n";
return undef;
}
# Result variables;
my $prevSourceID;
my $prevSourceName;
my @sortedSourceIDs;
dbgprint "source id = $sourceID";
my $productID = getProductID ($dbh, $sourceID);
if (!defined $productID or $productID eq '' or $productID == 0 ) {
dbgprint "No productID found\n";
return undef;
}
my @sourceIDs = getSourcesOfProduct($dbh, $productID);
if ($prevOrNextKeyword eq 'prev') {
@sortedSourceIDs = sort {$a <=> $b} @sourceIDs;
} else {
@sortedSourceIDs = sort {$b <=> $a} @sourceIDs;
}
my $tmpPrevSrcID="";
foreach my $tmpSrcID (@sortedSourceIDs){
#dbgprint "Candidate: ($tmpSrcID)";
if ( $tmpSrcID eq $sourceID ){
$prevSourceID=$tmpPrevSrcID;
}
$tmpPrevSrcID=$tmpSrcID;
}
if ($prevSourceID) {
$prevSourceName = getSourceNameFromID( $dbh, $prevSourceID );
return ($prevSourceID, $prevSourceName);
}
else {
return undef;
}
} # getPrevOrNextPoiSource
sub getInUseInfoFromSource {
# In parameters
my $dbh = $_[0];
my $sourceID = $_[1];
timeprint "";
timeprint "Will print out info about inUse, validTo and validFrom for source";
# result variables
my %resultHash;
my $query = "SELECT country, inUse, validFromVersion, validToVersion " .
"FROM POIMain " .
"WHERE source = $sourceID GROUP BY country";
my $sth = $dbh->prepare($query) or die "Could not prepare query: $query";
$sth->execute() or die "Could not execute query: $query";
my %all_inuse;
my ($inuse_false, $inuse_true, $num_rows) = 0;
my $all_inuse_value;
timeprint "";
dbgprint "country\tinUse\tvalidFromVersion\tvalidToVersion";
while (my ($c, $i, $vf, $vt) = $sth->fetchrow()) {
if ($i == 0) {
$inuse_false++;
$all_inuse{$i} = $inuse_false;
} else {
$inuse_true++;
$all_inuse{$i} = $inuse_true;
}
if (!defined $vf) {
$vf = 'NULL';
}
if (!defined $vt) {
$vt = 'NULL';
}
$resultHash{$c}{'inUse'} = $i;
$resultHash{$c}{'validFromVersion'} = $vf;
$resultHash{$c}{'validToVersion'} = $vt;
dbgprint "$c \t$i \t$vf \t$vt ";
$num_rows++;
}
timeprint "";
if ((exists $all_inuse{0}) && ($all_inuse{0} == $num_rows)) {
$all_inuse_value = 0;
dbgprint "All inUse are set to false";
} elsif ((exists $all_inuse{1}) && ($all_inuse{1} == $num_rows)) {
dbgprint "All inUse are set to true";
$all_inuse_value = 1;
} else {
dbgprint "Countries have different inUse settings";
}
return (\%resultHash, $all_inuse_value);
} # getInUseInfoFromSource
sub getSourcesOfProduct {
# In parameters
my $dbh = $_[0];
my $productID = $_[1];
# Result variables
my @sourceIDs;
#dbgprint "product id = $productID\n";
my $query =
"SELECT ID " .
" FROM POISources WHERE productID = $productID" .
" ORDER BY ID";
#dbgprint $query;
my $sth = $dbh->prepare( $query ) or die "Could not prepare query: $query";
$sth->execute() or die "Could not execute query: $query";
while (my $sourceID = $sth->fetchrow()){
push @sourceIDs, $sourceID;
}
return @sourceIDs;
} # getSourcesOfProduct
sub getMostFrequentValidFromVersionOfSourceAndCountry {
# in parameters
my $dbh = $_[0];
my $sourceID = $_[1];
my $countryID = $_[2];
# result variables
my $mostFrequentValidFromVersion;
my $query = "SELECT validFromVersion, count(*) as number FROM POIMain WHERE " .
"source = $sourceID and country = $countryID GROUP BY validFromVersion " .
"ORDER BY number DESC LIMIT 1";
#dbgprint $query;
my $sth = $dbh->prepare($query) or die "Could not prepare query: $query";
$sth->execute() or die "Could not execute query: $query";
($mostFrequentValidFromVersion) = $sth->fetchrow();
return $mostFrequentValidFromVersion;
} # getMostFrequentValidFromVersionOfSourceAndCountry
sub getInUseFromSourceAndCountry {
# in parameters
my $dbh = $_[0];
my $sourceID = $_[1];
my $countryID = $_[2];
# result variables
my @resultValue;
my $query = "SELECT DISTINCT inUse FROM POIMain WHERE " .
"source = $sourceID and country = $countryID";
my $sth = $dbh->prepare($query) or die "Could not prepare query: $query";
$sth->execute() or die "Could not execute query: $query";
while ((my $tmpInUseValue) = $sth->fetchrow()) {
push @resultValue, $tmpInUseValue;
}
return @resultValue;
} # getInUseFromSourceAndCountry
sub getStaticIDForPOIID {
my $dbh = $_[0];
my $poiID = $_[1];
my $query = "SELECT staticID FROM POIStatic WHERE poiID = $poiID;";
my $sth = $dbh->prepare($query);
$sth->execute or die "Query execute failed: $query";
my $staticID = $sth->fetchrow();
return $staticID;
}
sub storePOIs {
my $dbh = $_[0];
my $allPOIs = $_[1];
my $addSynonyms = $_[2]; # obsolete, no action
my $addToWasp = $_[3];
my $cpifFile = $_[4];
my $categoryObject = $_[5];
dbgprint "PWT::storePOIs Called PerlWASPTools::storePOIs for " .
scalar( keys( %{$allPOIs} ) ) . " POIs in allPOIs hash.";
if ( defined($cpifFile) && ($cpifFile ne "") ) {
dbgprint("PWT::storePOIs Will write CPIF to: $cpifFile");
}
else {
dbgprint("PWT::storePOIs Will NOT write CPIF.");
}
if ( defined($addToWasp) ) {
dbgprint("PWT::storePOIs Will add POIs to WASP.");
}
else {
dbgprint("PWT::storePOIs Will NOT add POIs to WASP.");
}
# Check that all POIs have a name
foreach my $poiKey ( sort keys( %{$allPOIs} ) ){
if (!$allPOIs->{$poiKey}->getNbrNamesNonSynonyms()) {
# poi has no name (except perhaps synonym), don't use
dbgprint "POI " . $allPOIs->{$poiKey}->getSourceReference();
dbgprint "has no name except synonym. Will not use this POI.\n";
delete($allPOIs->{$poiKey});
}
}
# Extract categories from POI types
dbgprint "Extract categories from POI types";
my $tmpNbr = 0;
foreach my $poiKey ( sort keys( %{$allPOIs} ) ){
$tmpNbr += 1;
if ( $tmpNbr % 50000 == 0 ){ # Prints progress
dbgprint " processed $tmpNbr pois";
}
foreach my $poiType ($allPOIs->{$poiKey}->getPOITypes()){
my $catID = getPOICatFromPOIType($dbh, $poiType);
if ( defined $catID ) {
$allPOIs->{$poiKey}->addCategory($catID);
#dbgprint "Type: $poiType, cat:", $catID;
}
}
#$allPOIs{$poiKey}->dumpInfo();
# Remove all categories being parents of other categories of this POI.
my @newCategories =
$categoryObject->
eliminateHigherLevelDoubles($allPOIs->{$poiKey}->getCategoriesRef());
#$allPOIs->{$poiKey}->dumpInfo();
# If this POI has a category outside the tree, remove all other
# categories.
#
# This is because in this case, the category outside the
# tree is probably a category that should be below the POI type connected
# category in the tree, but since it's not, the parent category could not
# be removed by eliminateHigherLevelDoubles.
my @catInTree;
my $rmCatsOutsideTree = 0;
my $myCats = $allPOIs->{$poiKey}->getCategoriesRef;
if ( defined $myCats ) {
foreach my $catID ( @{$myCats} ){
if (! $categoryObject->catInTree($catID) ){
$rmCatsOutsideTree = 1;
}
else {
push @catInTree, $catID;
}
}
}
if ( $rmCatsOutsideTree ){
# We found a category outside the tree, so we remove the others.
foreach my $catID (@catInTree){
$allPOIs->{$poiKey}->removeCategory($catID);
}
}
}
dbgprint "Extracted categories from POI types";
# Writing CPIF
my $nbrPois = 0;
my $writtenPois = 0;
if ( defined($cpifFile) && ($cpifFile ne "") ) {
dbgprint "PWT::storePOIs Writing POIs to CPIF";
# Open outfile CPIF, and write it empty (used for backup)
open(CPIF, ">$cpifFile");
print CPIF "";
foreach my $poiKey ( sort (keys( %{$allPOIs} ) ) ){
$nbrPois++;
if ( $allPOIs->{$poiKey}->writeCPIF($cpifFile) ){
$writtenPois++;
}
}
dbgprint "PWT::storePOIs Wrote $writtenPois POIs of $nbrPois to CPIF.";
}
# Add to WASP
$nbrPois = 0;
my $insertedPois = 0;
if ( ! $addToWasp ) {
dbgprint "PWT::storePOIs Return without adding to WASP.";
return 0;
}
foreach my $poiKey ( sort keys( %{$allPOIs} ) ){
$nbrPois++;
my $newID = $allPOIs->{$poiKey}->addToWASP();
if ( $newID > 0 ) {
$insertedPois += 1;
dbgprint "WASP_INSERT[$insertedPois]: srcRef=".
$allPOIs->{$poiKey}->getSourceReference().
" waspID=". $newID;
}
elsif( $newID == -2 ) {
# Special return code for when the POI was not added because it
# Already exists.
dbgprint "WASP_PRESENT[$insertedPois]: srcRef=".
$allPOIs->{$poiKey}->getSourceReference();
}
else {
my $srcRef = $allPOIs->{$poiKey}->getSourceReference();
if (!defined ($srcRef) ){
$srcRef = "UNKNOWN SOURCE REF";
}
my $failedID = $insertedPois+1;
die ("PWT::storePOIs Failed to add poi[". $failedID . "]".
" srcRef: ". $srcRef. " Totally added $insertedPois");
}
}
dbgprint "PWT::storePOIs Added $insertedPois POIs of $nbrPois to WASP.";
dbgprint "PWT::storePOIs Wrote $writtenPois POIs of $nbrPois to CPIF.";
return $insertedPois;
}
# Returns a list with all country names
sub getAllCountryDirNames{
my $dbh = $_[0];
# result array
my @countries = ();
my $query = "SELECT country FROM POICountries";
my $sth = $dbh->prepare($query) || die "Prepare failed, query: $query";
$sth->execute() || die "Execute failed, query: $query";
while ((my $waspCountry) = $sth->fetchrow()) {
my $dirCountry = countryWaspNameToDirName($waspCountry);
push @countries, $dirCountry;
}
return @countries;
} # getAllCountryDirNames
sub getCountriesOfSource {
my $dbh = $_[0];
my $sourceID = $_[1];
# result array
my @countries = ();
my $query = "SELECT distinct country FROM POIMain WHERE " .
"source = $sourceID order by country";
my $sth = $dbh->prepare($query) || die "Prepare failed, query: $query";
$sth->execute() || die "Execute failed, query: $query";
while ((my $countryID) = $sth->fetchrow()) {
push @countries, $countryID;
}
return @countries;
} # getCountriesOfSource
sub getSourceInfo {
my $dbh = $_[0];
my $sourceID = $_[1];
my $query =
"SELECT".
" POICountries.country, deleted, inUse, (POIMain.lat is NULL),".
" (staticID is NULL), (POIEntryPoints.lat IS NULL), count(distinct POIMain.ID)".
" FROM (POIMain left join POIStatic on POIMain.ID = POIStatic.poiID)".
" join POICountries on POIMain.country = POICountries.ID".
" left join POIEntryPoints on POIMain.ID = POIEntryPoints.poiID".
" WHERE source = $sourceID".
" GROUP BY POICountries.country, deleted, inUse,".
" (POIMain.lat is NULL), (staticID is NULL),".
" (POIEntryPoints.lat IS NULL);";
my $sth = $dbh->prepare( $query ) || die "Prepare failed, query: $query";
$sth->execute() ||die "Execute failed, query: $query";
my $nbrTotal = 0;
my $nbrDeleted = 0;
my $nbrStaticOK = 0;
my $nbrPosOK = 0;
my $nbrInUse = 0;
my $nbrEtrPtsOK = 0;
my %uniqueCountryIDs = ();
while ( (my $countryID,
my $deleted,
my $inUse,
my $latIsNull,
my $staticIsNull,
my $etrPtsIsNull,
my $count ) = $sth->fetchrow() ){
$nbrTotal+=$count;
$uniqueCountryIDs{$countryID}=1;
if ($deleted){
$nbrDeleted += $count;
}
if ($inUse){
$nbrInUse += $count;
}
if (!$latIsNull){
$nbrPosOK += $count;
}
if (!$staticIsNull){
$nbrStaticOK += $count;
}
if (!$etrPtsIsNull){
$nbrEtrPtsOK += $count;
}
}
my $nbrCountries = scalar(keys(%uniqueCountryIDs));
return ( $nbrTotal,
$nbrCountries,
$nbrDeleted,
$nbrInUse,
$nbrStaticOK,
$nbrPosOK,
$nbrEtrPtsOK );
} # getSourceInfo
sub db_connect {
my $noPrint = 0;
if ( $_[0] ) {
$noPrint = 1;
}
# Define the settings for connecting to the WASP database here
my $dbname = "poi"; # sql database name
my $dbhost = "poihost"; # sql host
my $dbuser = "poi"; # sql user
my $dbpw = "UghTre6S"; # sql database password
my $ldbh = DBI->connect("DBI:mysql:$dbname:$dbhost", $dbuser, $dbpw);
if ( ! $noPrint ) {
dbgprint "Connecting to db: $dbname on $dbhost";
}
return $ldbh;
}
sub db_disconnect {
my ($ldbh) = @_;
$ldbh->disconnect;
}
1;
=head1 NAME
PerlWASPTools
Package with common functions used in communication with
the WASP (POI or ED) database.
=head1 USE
Include into your perl file with the combination of:
use lib "$BASEGENFILESPATH/script/perllib";
use PerlWASPTools;
pointing to the directory where the perl modules are stored
=head1 FUNCTIONS
getCountryIDFromName( $dbh, $countryName )
Asks the database connection $dbh to find countryID for
the $countryName given. If the $countryName is unknown
the countryID returned is undefined.
getCountryIDFromISO3166Code( $dbh, $countryCode )
Asks the database connection $dbh to find countryID for
the $countryCode given. If the $countryCode is unknown
the countryID returned is undefined. $countryCode
should be the two-letter code from ISO3166-1 alpha-2.
getCountryNameFromISO3166Code( $dbh, $countryCode )
Asks the database connection $dbh to find country
name for the $countryCode given. If the $countryCode
is unknown the country returned is undefined.
$countryCode should be the two-letter code from
ISO3166-1 alpha-2.
getCountryDirNameFromIso3166Code
Returns the country directory name, used when storing GDF and mcm maps in
one directory per country.
getLanguageIDFromName( $dbh, $langName )
Asks the database connection $dbh to find langID for
the $langName given. If the $langName is unknown the
langID returned is undefined.
getSourceIDFromName( $dbh, $sourceName )
Asks the database connection $dbh to find sourceID for
the $sourceName given. If the $sourceName is unknown
the sourceID returned is undefined.
getSourceNameFromID( $dbh, $sourceID )
Asks the database connection $dbh to find sourceName
for the $sourceID given. If the $sourceID is unknown
in WASP, the sourceName returned is undefined.
getVersionIDFromName( $dbh, $versionName )
Version ID is the one stored in EDVersion in the database.
Asks the database connection $dbh to find versionID for
the $versionName given. If the $versionName is unknown
the $versionID returned is undefined.
getVersionNameFromID( $dbh, $versionID )
Version ID is the one stored in EDVersion in the database.
Asks the database connection $dbh to find versionName
for the $versionID given. If the $versionID is unknown
in WASP, the versionName returned is undefined.
getPOITypeIDFromTypeName( $dbh, $typeName )
Asks the database connection $dbh to find poiTypeID for
the $typeName given.
getPrevPoiSource ( $dbh, $sourceID )
see getPrevOrNextPoiSource
getNextPoiSource ( $dbh, $sourceID )
see getPrevOrNextPoiSource
getPrevOrNextPoiSource ( $dbh, $sourceID, $prevOrNext )
Using database connection $dbh, in case $prevOrNext keyword
is "prev", returns the highest source ID lower than $sourceID,
and having the same product ID as $sourceID. In case $prevOrNext
keyword is "next", returns the lowest source ID higher than
$sourceID, and having the same product ID as $sourceID.
Returns an array with product ID in element 0 and product
name in element 1, or undef on
failure.
getMostFrequentValidFromVersionOfSourceAndCountry ( $dbh, $sourceID, $countryID )
Will return the most frequent validFromVersion id for the
combination of source $sourceID and country $countryID.
getInUseFromSourceAndCountry ( $dbh, $sourceID, $countryID )
Will return the value of inUse for the combination
of source $sourceID and country $countryID. The result
will come in an array, for the case if in use is both
1 and 0 for a source and country (which it should not be)
getInUseInfoFromSource ( $dbh, $sourceID )
Will print out info about inUse, validFromVersion and
validToVersion for each country in source $sourceID. Returns hash
with this info.
getSourcesOfProduct ( $dbh, $productID )
Using database connection $dbh, returns the sourceIDs of a product
with $productID. Returns an array with sourceIDs, or undef on failure.
getMaxAttributeTypeID( $dbh )
Get the max ID from the POIAttributeTypes table.
getMaxAttributeTypeIDLessThanInfo( $dbh )
Get the max ID from the POIAttributeTypes table where
id is less than poi info.
db_connect ( $noPrint )
Connects to poi db. Returns database handle.
Give $noPrint = 01 if dbg print is no wanted.
db_disconnect ( $dbh )
Disconnects from db.
getMapVersionByCountryID( $dbh, $countriesDirs, $resultHash )
Fills in the hash reference in $resultHash with the map versions found
in mapOrigin.txt of countries in $countriesDirs. Using $dbh for finding
database IDs of countries. Use mapVersionToEDVersionID to get EDVersion
from the map version in $resultHash.
handleMapVerOverrides ( $dbh, $iso3166AndMapVerStr, $resultHash )
Changes map version values of the counties that match the countries
given in $iso3166AndMapVerStr of the $resultHash. Give $iso3166AndMapVerStr
on the form iso3166Code:mapVer,iso3166Code:mapVer, ... for example:
sg:TA_2007_02,hk:TA_2006_10,se:TA_2006_10. $resultHash is the both in and out
parameter on the same format as returned by getMapVersionByCountryID.
mapVersionToEDVersionID ($dbh, $mapVersion)
Translate the map version string in $mapVersion to WASP EDVersion.ID using
$dbh.
getStaticIDForPOIID ($dbh, $poiID)
Get the static ID from POIStatic table for a POI with id $poiID
getNbrStaticIDsOfSourceID($dbh, $sourceID)
Get number of static IDs of POIs of $sourceID. Counts multiple static IDs
of the same POI many times.
getNbrPOIsOfSourceID($dbh, $sourceID)
Get number of POIs of the given $sourceID
storePOIs($allPOIs, $addSynonyms, $addToWasp, $cpifFile)
Stores all POIObjects given in the hash $allPOIs sorted on source reference.
The $addSynonyms is obsolete - no action
Storing options are $addToWasp, which makes the POIs get inserted to WASP
and $cpifFile, which if specified writes the POIs to the CPIF file
given in the option.
getSourceIsMapPoi($dbh, $sourceID)
Returns true if POI sets of $sourceID are considered map POIs, that is, POIs
that may have entry points delivered from the map provider.
getSourceInfo($dbh, $sourceID)
Returns an array with different statistical counts of the POI set from
source $sourceID. Returned values:
($nbrTotal, $nbrCountries, $nbrDeleted, $nbrInUse, $nbrStaticOK, $nbrPosOK)
getCountriesOfSource($dbh, $sourceID)
Returns an array with countryIDs of the source $sourceID.
getAllCountryDirNames($dbh)
Returns an array with the dir name of all countries.
getProductID($dbh, $sourceID)
Returns product ID of $sourceID.
| wayfinder/Wayfinder-Server | Server/bin/Scripts/MapGen/PerlWASPTools.pm | Perl | bsd-3-clause | 46,250 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.