code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#-----------------------------------------------------------
# defbrowser.pl
# Get default browser information - check #1 can apply to HKLM
# as well as to HKCU
#
# Change History:
# 20091116 - Added Check #1
# 20081105 - created
#
# copyright 2009 H. Carvey, keydet89@yahoo.com
#-----------------------------------------------------------
package defbrowser;
use strict;
my %config = (hive => "Software",
osmask => 22,
hasShortDescr => 1,
hasDescr => 0,
hasRefs => 0,
version => 20091116);
sub getConfig{return %config}
sub getShortDescr {
return "Gets default browser setting from HKLM";
}
sub getDescr{}
sub getRefs {}
sub getHive {return $config{hive};}
sub getVersion {return $config{version};}
my $VERSION = getVersion();
sub pluginmain {
my $class = shift;
my $hive = shift;
::logMsg("Launching defbrowser v.".$VERSION);
::rptMsg("defbrowser v.".$VERSION); # banner
::rptMsg("(".$config{hive}.") ".getShortDescr()."\n"); # banner
my $reg = Parse::Win32Registry->new($hive);
my $root_key = $reg->get_root_key;
my $key_path = "Clients\\StartMenuInternet";
if (my $key = $root_key->get_subkey($key_path)) {
::rptMsg("Default Browser Check #1");
::rptMsg($key_path);
::rptMsg("LastWrite Time ".gmtime($key->get_timestamp())." (UTC)");
::rptMsg("");
my $browser = $key->get_value("")->get_data();
::rptMsg("Default Browser : ".$browser);
}
else {
::rptMsg($key_path." not found.");
}
::rptMsg("");
my $key_path = "Classes\\HTTP\\shell\\open\\command";
my $key;
if ($key = $root_key->get_subkey($key_path)) {
::rptMsg("Default Browser Check #2");
::rptMsg($key_path);
::rptMsg("LastWrite Time ".gmtime($key->get_timestamp())." (UTC)");
::rptMsg("");
my $browser;
eval {
$browser = $key->get_value("")->get_data();
};
if ($@) {
::rptMsg("Error locating default browser setting.");
}
else {
::rptMsg("Default Browser = ".$browser);
}
}
else {
::rptMsg($key_path." not found.");
}
}
1; | mhmdfy/autopsy | RecentActivity/release/rr-full/plugins/defbrowser.pl | Perl | apache-2.0 | 2,071 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
F900 FAFF
END
| Dokaponteam/ITF_Project | xampp/perl/lib/unicore/lib/Blk/CJKComp3.pl | Perl | mit | 421 |
#
# Written by Ryan Kereliuk <ryker@ryker.org>. This file may be
# distributed under the same terms as Perl itself.
#
# The RFC 3261 sip URI is <scheme>:<authority>;<params>?<query>.
#
package URI::sip;
use strict;
use warnings;
use parent qw(URI::_server URI::_userpass);
use URI::Escape qw(uri_unescape);
our $VERSION = '1.71';
$VERSION = eval $VERSION;
sub default_port { 5060 }
sub authority
{
my $self = shift;
$$self =~ m,^($URI::scheme_re:)?([^;?]*)(.*)$,os or die;
my $old = $2;
if (@_) {
my $auth = shift;
$$self = defined($1) ? $1 : "";
my $rest = $3;
if (defined $auth) {
$auth =~ s/([^$URI::uric])/ URI::Escape::escape_char($1)/ego;
$$self .= "$auth";
}
$$self .= $rest;
}
$old;
}
sub params_form
{
my $self = shift;
$$self =~ m,^((?:$URI::scheme_re:)?)(?:([^;?]*))?(;[^?]*)?(.*)$,os or die;
my $paramstr = $3;
if (@_) {
my @args = @_;
$$self = $1 . $2;
my $rest = $4;
my @new;
for (my $i=0; $i < @args; $i += 2) {
push(@new, "$args[$i]=$args[$i+1]");
}
$paramstr = join(";", @new);
$$self .= ";" . $paramstr . $rest;
}
$paramstr =~ s/^;//o;
return split(/[;=]/, $paramstr);
}
sub params
{
my $self = shift;
$$self =~ m,^((?:$URI::scheme_re:)?)(?:([^;?]*))?(;[^?]*)?(.*)$,os or die;
my $paramstr = $3;
if (@_) {
my $new = shift;
$$self = $1 . $2;
my $rest = $4;
$$self .= $paramstr . $rest;
}
$paramstr =~ s/^;//o;
return $paramstr;
}
# Inherited methods that make no sense for a SIP URI.
sub path {}
sub path_query {}
sub path_segments {}
sub abs { shift }
sub rel { shift }
sub query_keywords {}
1;
| jkb78/extrajnm | local/lib/perl5/URI/sip.pm | Perl | mit | 1,735 |
package UI::Health;
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
use UI::Utils;
use Mojo::Base 'Mojolicious::Controller';
use Utils::Rascal;
use Utils::CCR;
use Utils::Helper;
use Data::Dumper;
use Carp qw(cluck confess);
sub healthfull {
my $self = shift;
my $data_obj;
my %condition = ( 'profile.name' => [ { like => 'MID%' }, { like => 'EDGE%' } ] );
my $rs = $self->db->resultset('Server')->search( \%condition, { prefetch => [ { 'profile' => undef } ] } );
while ( my $row = $rs->next ) {
push( @{ $data_obj->{ $row->profile->name }->{'servers'} }, $row->host_name . "." . $row->domain_name );
}
%condition = ( 'parameter.config_file' => 'rascal.properties' );
$rs = $self->db->resultset('ProfileParameter')->search( \%condition, { prefetch => [ { 'parameter' => undef }, { 'profile' => undef } ] } );
while ( my $row = $rs->next ) {
push(
@{ $data_obj->{ $row->profile->name }->{'parameters'} },
( { 'name' => $row->parameter->name, 'value' => $row->parameter->value, 'last_updated' => $row->parameter->last_updated } )
);
}
$self->render( json => $data_obj );
}
sub healthprofile {
my $self = shift;
my $data_obj;
my %condition = ( 'parameter.config_file' => 'rascal.properties' );
my $rs = $self->db->resultset('ProfileParameter')->search( \%condition, { prefetch => [ { 'parameter' => undef }, { 'profile' => undef } ] } );
while ( my $row = $rs->next ) {
push(
@{ $data_obj->{ $row->profile->name } },
( { 'name' => $row->parameter->name, 'value' => $row->parameter->value, 'last_updated' => $row->parameter->last_updated } )
);
}
$self->render( json => $data_obj );
}
sub rascal_config {
my $self = shift;
my $data_obj = $self->get_health_config( $self->param('cdnname') );
$self->render( json => $data_obj );
}
1;
| rscrimojr/incubator-trafficcontrol | traffic_ops/app/lib/UI/Health.pm | Perl | apache-2.0 | 2,287 |
###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite time locale
# generator (0.04). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate_from_cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file ss.xml.
# The source file version number was 1.17, generated on
# 2007/07/14 23:02:17.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::ss;
use strict;
BEGIN
{
if ( $] >= 5.006 )
{
require utf8; utf8->import;
}
}
use DateTime::Locale::root;
@DateTime::Locale::ss::ISA = qw(DateTime::Locale::root);
my @day_names = (
"uMsombuluko",
"Lesibili",
"Lesitsatfu",
"Lesine",
"Lesihlanu",
"uMgcibelo",
"Lisontfo",
);
my @day_abbreviations = (
"Mso",
"Bil",
"Tsa",
"Ne",
"Hla",
"Mgc",
"Son",
);
my @day_narrows = (
"2",
"3",
"4",
"5",
"6",
"7",
"1",
);
my @month_names = (
"Bhimbidvwane",
"iNdlovana",
"iNdlovu\-lenkhulu",
"Mabasa",
"iNkhwekhweti",
"iNhlaba",
"Kholwane",
"iNgci",
"iNyoni",
"iMphala",
"Lweti",
"iNgongoni",
);
my @month_abbreviations = (
"Bhi",
"Van",
"Vol",
"Mab",
"Nkh",
"Nhl",
"Kho",
"Ngc",
"Nyo",
"Mph",
"Lwe",
"Ngo",
);
my @month_narrows = (
"1",
"2",
"3",
"4",
"5",
"6",
"7",
"8",
"9",
"10",
"11",
"12",
);
my @quarter_names = (
"Q1",
"Q2",
"Q3",
"Q4",
);
my @quarter_abbreviations = (
"Q1",
"Q2",
"Q3",
"Q4",
);
my @am_pms = (
"AM",
"PM",
);
my @era_names = (
"BC",
"AD",
);
my @era_abbreviations = (
"BC",
"AD",
);
my $date_before_time = "1";
my $date_parts_order = "ymd";
sub day_names { \@day_names }
sub day_abbreviations { \@day_abbreviations }
sub day_narrows { \@day_narrows }
sub month_names { \@month_names }
sub month_abbreviations { \@month_abbreviations }
sub month_narrows { \@month_narrows }
sub quarter_names { \@quarter_names }
sub quarter_abbreviations { \@quarter_abbreviations }
sub am_pms { \@am_pms }
sub era_names { \@era_names }
sub era_abbreviations { \@era_abbreviations }
sub full_date_format { "\%A\,\ \%\{ce_year\}\ \%B\ \%d" }
sub long_date_format { "\%\{ce_year\}\ \%B\ \%\{day\}" }
sub medium_date_format { "\%\{ce_year\}\ \%b\ \%\{day\}" }
sub short_date_format { "\%y\/\%m\/\%d" }
sub full_time_format { "\%H\:\%M\:\%S\ v" }
sub long_time_format { "\%H\:\%M\:\%S\ \%\{time_zone_long_name\}" }
sub medium_time_format { "\%H\:\%M\:\%S" }
sub short_time_format { "\%H\:\%M" }
sub date_before_time { $date_before_time }
sub date_parts_order { $date_parts_order }
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/Locale/ss.pm | Perl | mit | 3,044 |
#!/usr/bin/perl
# EECS678
# Adopted from CS 241 @ The University of Illinois
for $file (<examples/*>){
if( $file =~ /proc(\d+)-c(\d+)-(\w+)\.out/){
# print "Proc $1 CORE $2 Proc $3\n";
`./simulator -c $2 -s $3 examples/proc$1.csv | tail -7 > output1`;
`tail -7 $file > output2`;
$diff = `diff output1 output2`;
if($diff){
print "Test file $file differs\n$diff";
}
}
}
#cleanup
`rm output1 output2`;
| deasley0123/EECS678_SchedulerProject | scheduler/examples.pl | Perl | mit | 417 |
#!/usr/bin/perl
use warnings;
use strict;
open (FASTA, "/users/liype/maf/reference/HumanDB/hg18_knownGeneMrna.fa") or die "Error: cannot read from fastafile: $!\n";
my (%mrnaseq);
my ($curname, $curseq);
while (<FASTA>) {
s/[\r\n]+$//;
if (m/^>([\w\.]+)/) { #NEED TO CHECK THIS
if ($curseq) {
$mrnaseq{$curname} = $curseq;
}
$curname = $1;
$curseq = '';
} else {
$curseq .= $_;
}
$curseq and $mrnaseq{$curname} = $curseq; #process the last sequence
}
| cancerregulome/gidget | commands/maf_processing/perl/archive/check_fasta.pl | Perl | mit | 473 |
package O2::Dispatch::ModPerlGlobals::Context;
use strict;
use Tie::Scalar;
our @ISA = qw(Tie::StdScalar);
#------------------------------------------------------------------------------------------------------------
sub TIESCALAR {
my ($className) = @_;
return bless {}, $className;
}
#------------------------------------------------------------------------------------------------------------
sub FETCH {
my ($obj) = @_;
my $context = $obj->{ $ENV{O2REQUESTID} };
if (!$context) {
require O2::Context;
$context = O2::Context->new();
$obj->STORE($context);
}
return $context;
}
#------------------------------------------------------------------------------------------------------------
sub STORE {
my ($obj, $context) = @_;
$obj->{ $ENV{O2REQUESTID} } = $context;
}
#------------------------------------------------------------------------------------------------------------
sub DESTROY {
my ($obj) = @_;
}
#------------------------------------------------------------------------------------------------------------
1;
| haakonsk/O2-Framework | lib/O2/Dispatch/ModPerlGlobals/Context.pm | Perl | mit | 1,060 |
#!/usr/bin/perl
use strict;
use warnings;
package WWW::Shopify::Tooltips;
use WWW::Shopify;
use Exporter qw(import);
my $tooltips = {
'WWW::Shopify::Model::FulfillmentService' => {
'inventory_management' => 'States if the fulfillment service tracks product inventory and provides updates to Shopify. Valid values are "true" and "false".',
'credential1' => 'Login information of the customer (usually in the form of an email address).',
'name' => 'The name of the fulfillment service as seen by merchants and their customers.',
'callback_url' => 'States the URL endpoint that Shopify needs to retrieve inventory and tracking updates. This field is necessary if either inventory_management or tracking_support is set to "true".',
'provider_id' => 'A unique identifier for the fulfillment service provider.',
'requires_shipping_method' => 'States if the fulfillment service requires products to be physically shipped. Valid values are "true" and "false".',
'tracking_support' => 'States if the fulfillment service provides tracking numbers for packages. Valid values are "true" and "false".',
'credential2_exists' => 'States whether or not there is a password for the login information of the customer. Valid values are "true" or "false".',
'format' => 'Specifies the format of the API output. Valid values are json and xml.',
'handle' => 'A human-friendly unique string for the fulfillment service generated from its title.'
},
'WWW::Shopify::Model::Shop' => {
'source' => '',
'google_apps_domain' => 'Feature is present when a shop has a google app domain. It will be returned as a URL. If the shop does not have this feature enabled it will default to "null."',
'country_code' => 'The two-letter country code corresponding to the shop\'s country.',
'taxes_included' => 'The setting for whether applicable taxes are included in product prices. Valid values are: "true" or "null."',
'money_format' => 'A string representing the way currency is formatted when the currency isn\'t specified.',
'tax_shipping' => 'Specifies wether or not taxes were charged for shipping. Valid values are: "true" or "false."',
'email' => 'The contact email address for the shop.',
'currency' => 'The three-letter code for the currency that the shop accepts.',
'domain' => 'The shop\'s domain.',
'city' => 'The city in which the shop is located.',
'created_at' => 'The date and time when the shop was created. The API returns this value in ISO 8601 format.',
'latitude' => 'Geographic coordinate specifying the north/south location of a shop.',
'public' => '',
'myshopify_domain' => 'The shop\'s \'myshopify.com\' domain.',
'google_apps_login_enabled' => 'Feature is present if a shop has google apps enabled. Those shops with this feature will be able to login to the google apps login. Shops without this feature enabled will default to "null."',
'shop_owner' => 'The username of the shop owner.',
'province_code' => 'The two-letter code for the shop\'s province or state.',
'province' => 'The shop\'s normalized province or state name.',
'id' => 'A unique numeric identifier for the shop.',
'country' => 'The shop\'s country (by default equal to the two-letter country code).',
'country_name' => 'The shop\'s normalized country name.',
'longitude' => 'Geographic coordinate specifying the east/west location of a shop.',
'timezone' => 'The name of the timezone the shop is in.',
'customer_email' => 'The customer\'s email.',
'name' => 'The name of the shop.',
'display_plan_name' => 'The display name of the Shopify plan the shop is on.',
'phone' => 'The contact phone number for the shop.',
'money_with_currency_format' => 'A string representing the way currency is formatted when the currency is specified.',
'zip' => 'The zip or postal code of the shop\'s address.',
'plan_name' => 'The name of the Shopify plan the shop is on.',
'address1' => 'The shop\'s street address.'
},
'WWW::Shopify::Model::Event' => {
'subject_type' => 'The type of the resource that generated the event. This will be one of the following:<ul>',
'message' => 'Human readable text that describes the event.',
'body' => 'A text field containing information about the event.',
'created_at' => 'The date and time when the event was created. The API returns this value in ISO 8601 format.',
'subject_id' => 'The id of the resource that generated the event.',
'verb' => 'The type of event that took place. Different resources generate different types of event; see Resources and their event verbs and messages (below) for details.',
'arguments' => 'Refers to a certain event and its resources.',
'id' => 'The unique numeric identifier for the event.'
},
'WWW::Shopify::Model::ProductSearchEngine' => {},
'WWW::Shopify::Model::Order::LineItem' => {
'sku' => 'A unique identifier of the item in the fulfillment.',
'fulfillment_service' => 'Service provider who is doing the fulfillment. Valid values are either "manual" or the name of the provider. eg: "amazon", "shipwire", etc.',
'gift_card' => 'States wether or not the line_item is a gift card. If so, the item is not taxed or considered for shipping charges.',
'product_id' => 'The unique numeric identifier for the product in the fulfillment. Can be null if the original product associated with the order is deleted at a later date',
'id' => 'The id of the line item.',
'grams' => 'The weight of the item in grams.',
'tax_lines' => 'A list of <code>tax_line</code> objects, each of which details the taxes applicable to this line_item.',
'quantity' => 'The number of products that were purchased.',
'name' => 'The name of the product variant.',
'taxable' => 'States whether or not the product was taxable. Values are: true or false.',
'variant_title' => 'The title of the product variant.',
'fulfillment_status' => 'How far along an order is in terms line items fulfilled. Valid values are: fulfilled, null or partial.',
'variant_id' => 'The id of the product variant.',
'price' => 'The price of the item.',
'title' => 'The title of the product.',
'requires_shipping' => 'States whether or not the fulfillment requires shipping. Values are: true or false.',
'vendor' => 'The name of the supplier of the item.'
},
'WWW::Shopify::Model::Refund' => {
'created_at' => 'The date and time when the refund was created. The API returns this value in ISO 8601 format.',
'refund_line_items' => '',
'transactions' => 'The list of transactions involved in the refund.',
'restock' => 'Whether or not the line items were added back to the store inventory.',
'user_id' => 'The unique identifier of the user who performed the refund.',
'id' => 'The unique numeric identifier for the refund. This one is used for API purposes.',
'note' => 'The optional note attached to a refund.'
},
'WWW::Shopify::Model::Page' => {
'published_at' => 'This can have two different types of values, depending on whether the page has been published (i.e., made visible to the blog\'s readers).<ul>',
'shop_id' => 'The id of the shop to which the page belongs.',
'metafields' => '',
'author' => 'The name of the person who created the page.',
'template_suffix' => 'The suffix of the liquid template being used. By default, the original template is called page.liquid, without any suffix. Any additional templates will be: page.suffix.liquid.',
'created_at' => 'The date and time when the page was created. The API returns this value in ISO 8601 format.',
'handle' => 'A human-friendly unique string for the page automatically generated from its title. This is used in shop themes by the Liquid templating language to refer to the page.',
'updated_at' => 'The date and time when the page was last updated. The API returns this value in ISO 8601 format.',
'title' => 'The title of the page.',
'body_html' => 'Text content of the page, complete with HTML markup.',
'id' => 'The unique numeric identifier for the page.'
},
'WWW::Shopify::Model::Checkout::ShippingLine' => {
'title' => '',
'price' => 'The price of this shipping method.'
},
'WWW::Shopify::Model::SmartCollection' => {
'published_at' => 'This can have two different types of values, depending on whether the smart collection has been published (i.e., made visible to customers):<ul>',
'sort_order' => 'The order in which products in the smart collection appear. Valid values are:<ul>',
'rules' => '',
'published_scope' => 'The sales channels in which the smart collection is visible.',
'image' => 'The collection image.',
'template_suffix' => 'The suffix of the template you are using. By default, the original template is called product.liquid, without any suffix. Any additional templates will be: product.suffix.liquid.',
'handle' => 'A human-friendly unique string for the smart collection automatically generated from its title. This is used in shop themes by the Liquid templating language to refer to the smart collection.',
'updated_at' => 'The date and time when the smart collection was last modified. The API returns this value in ISO 8601 format.',
'title' => 'The name of the smart collection.',
'body_html' => 'The best selling ipod ever',
'id' => 'The unique numeric identifier for the smart collection.'
},
'WWW::Shopify::Model::CustomerGroup' => {
'created_at' => 'The date and time when the customer group was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the customer group was last modified. The API returns this value in ISO 8601 format.',
'query' => 'The set of conditions that determines which customers will go into the customer group. Queries are covered in more detail in Customer group queries.',
'name' => 'The name given by the shop owner to the customer group.',
'id' => 'A unique numeric identifier for the customer group.'
},
'WWW::Shopify::Model::Order::ShippingLine' => {
'source' => 'The source of the shipping method.',
'tax_lines' => 'A list of <code>tax_line</code> objects, each of which details the taxes applicable to this shipping_line.',
'title' => 'The title of the shipping method.',
'price' => 'The price of this shipping method.',
'code' => 'A reference to the shipping method.'
},
'WWW::Shopify::Model::Item' => {},
'WWW::Shopify::Model::Transaction' => {
'test' => 'The option to use the transaction for testing purposes. Valid values are "true" or "false."',
'status' => 'The status of the transaction. Valid values are: pending, failure, success or error.',
'gateway' => 'The name of the gateway the transaction was issued through. A list of gateways can be found on Shopify\'s Payment Gateway page.',
'order_id' => 'A unique numeric identifier for the order.',
'authorization' => 'The authorization code associated with the transaction.',
'device_id' => 'The unique identifier for the device.',
'amount' => 'The amount of money that the transaction was for.',
'created_at' => 'The date and time when the transaction was created. The API returns this value in ISO 8601 format.',
'kind' => 'The kind of transaction:',
'user_id' => 'The unique identifier for the user.',
'id' => 'A unique numeric identifier for the transaction.',
'receipt' => ''
},
'WWW::Shopify::Model::Order::PaymentDetails' => {
'credit_card_company' => 'The name of the company who issued the customer\'s credit card.',
'credit_card_bin' => 'The issuer identification number (IIN), formerly known as bank identification number (BIN) ] of the customer\'s credit card. This is made up of the first few digits of the credit card number.',
'cvv_result_code' => 'The Response code from the credit card company indicating whether the customer entered the card security code, a.k.a. card verification value, correctly. The code is a single letter or empty string; see this chart for the codes and their definitions.',
'credit_card_number' => 'The customer\'s credit card number, with most of the leading digits redacted with Xs.'
},
'WWW::Shopify::Model::Order::ClientDetails' => {
'browser_ip' => 'The browser IP address.',
'session_hash' => 'A hash of the session.',
'user_agent' => '',
'accept_language' => ''
},
'WWW::Shopify::Model::Transaction::Receipt' => {
'authorization' => '',
'testcase' => ''
},
'WWW::Shopify::Model::Order::Fulfillment::LineItem' => {
'sku' => 'A unique identifier of the item in the fulfillment.',
'product_exists' => 'States whether or not the product exists. Valid values are "true" or "false".',
'fulfillment_service' => 'Service provider who is doing the fulfillment. Valid values are: manual, ',
'product_id' => 'The unique numeric identifier for the product in the fulfillment.',
'id' => 'The id of the <code>line_item</code> within the fulfillment.',
'grams' => 'The weight of the item in grams.',
'quantity' => 'The number of items in the fulfillment.',
'name' => 'The name of the product variant.',
'properties' => 'Returns additional properties associated with the line item.',
'variant_title' => 'The title of the product variant being fulfilled.',
'fulfillment_status' => 'Status of an order in terms of the <code>line_items</code> being fulfilled. Valid values are: fulfilled, null or partial.',
'variant_id' => 'The id of the product variant being fulfilled.',
'price' => 'The price of the item.',
'title' => 'The title of the product.',
'variant_inventory_management' => 'Returns the name of the inventory management system.',
'requires_shipping' => 'Specifies whether or not a customer needs to provide a shipping address when placing an order for this product variant. Valid values are: "true" or "false."',
'vendor' => 'The name of the supplier of the item.'
},
'WWW::Shopify::Model::Article' => {
'summary_html' => 'The text of the summary of the article, complete with HTML markup.',
'published_at' => 'The date and time when the article was published. The API returns this value in ISO 8601 format.',
'metafields' => '',
'author' => 'The name of the author of this article',
'tags' => 'Tags are additional short descriptors formatted as a string of comma-separated values. For example, if an article has three tags: tag1, tag2, tag3.',
'published' => 'States whether or not the article is visible. Valid values are "true" for published or "false" for hidden.',
'created_at' => 'The date and time when the article was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the article was last updated. The API returns this value in ISO 8601 format.',
'user_id' => 'A unique numeric identifier for the author of the article.',
'title' => 'The title of the article.',
'blog_id' => 'A unique numeric identifier for the blog containing the article.',
'body_html' => 'The text of the body of the article, complete with HTML markup.',
'id' => 'A unique numeric identifier for the article.'
},
'WWW::Shopify::Model::Checkout' => {
'total_price' => 'The sum of all the prices of all the items in the order, taxes and discounts included.',
'line_items' => '',
'billing_address' => '',
'discount_codes' => '',
'shipping_lines' => '',
'taxes_included' => '',
'buyer_accepts_marketing' => 'Indicates whether or not the person who placed the order would like to receive email updates from the shop. This is set when checking the "I want to receive occasional emails about new products, promotions and other news" checkbox during checkout. Valid values are "true" and "false."',
'abandoned_checkout_url' => 'The full recovery URL to be sent to a customer to recover their abandoned checkout.',
'email' => 'The customer\'s email address.',
'created_at' => 'The date and time when the order was created. The API returns this value in ISO 8601 format.',
'id' => 'The unique numeric identifier for the order. This one is used for API purposes. This is different from the order_number property (see below), which is also a unique numeric identifier for the order, but used by the shop owner and customer.',
'token' => 'Unique identifier for a particular order.',
'total_discounts' => 'The total amount of the discounts to be applied to the price of the order.',
'landing_site' => 'The URL for the page where the buyer landed when entering the shop.',
'total_weight' => 'The sum of all the weights of the line items in the order, in grams.',
'tax_lines' => '',
'cart_token' => 'Unique identifier for a particular cart that is attached to a particular order.',
'referring_site' => 'The website that the customer clicked on to come to the shop.',
'note' => 'The text of an optional note that a shopowner can attach to the order.',
'source_name' => 'Where the checkout originated from. Returned value will be "web" or "pos"',
'total_line_items_price' => 'The sum of all the prices of all the items in the order.',
'updated_at' => 'The date and time when the order was last modified. The API returns this value in ISO 8601 format.',
'customer' => '',
'shipping_address' => '',
'subtotal_price' => 'Price of the order before shipping and taxes',
'total_tax' => 'The sum of all the taxes applied to the line items in the order.'
},
'WWW::Shopify::Model::Comment' => {
'published_at' => 'The date and time when the comment was published. In the case of comments, this is the date and time when the comment was created, meaning that it has the same value as created_at. The API returns this value in ISO 8601 format.',
'article_id' => 'A unique numeric identifier for the article to which the comment belongs.',
'user_agent' => 'The user agent string provided by the software (usually a browser) used to create the comment.',
'status' => 'The status of the comment. The possible values are:<ul>',
'ip' => 'The IP address from which the comment was posted.',
'body' => 'The basic textile markup of a comment.',
'email' => 'The email address of the author of the comment.',
'created_at' => 'The date and time when the comment was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the comment was last modified. When the comment is first created, this is the date and time when the comment was created, meaning that it has the same value as created_at. If the blog requires comments to be approved, this value is updated to the date and time the comment was approved upon approval. The API returns this value in ISO 8601 format.',
'blog_id' => 'A unique numeric identifier for the blog containing the article that the comment belongs to.',
'body_html' => 'The text of the comment, complete with HTML markup.',
'id' => 'A unique numeric identifier for the comment.'
},
'WWW::Shopify::Model::Blog' => {
'feedburner_location' => 'URL to the feedburner location for blogs that have enabled feedburner through their store admin.',
'metafields' => '',
'feedburner' => 'Feedburner is a web feed management provider and can be enabled to provide custom RSS feeds for Shopify bloggers. This property will default to blank or "null" unless feedburner is enabled through the shop admin.',
'tags' => 'Tags are additional short descriptors formatted as a string of comma-separated values. For example, if an article has three tags: tag1, tag2, tag3.',
'template_suffix' => 'States the name of the template a blog is using if it is using an alternate template. If a blog is using the default blog.liquid template, the value returned is "null".',
'created_at' => 'The date and time when the blog was created. The API returns this value in ISO 8601 format.',
'handle' => 'A human-friendly unique string for a blog automatically generated from its title. This handle is used by the Liquid templating language to refer to the blog.',
'updated_at' => 'The date and time when changes were last made to the blog\'s properties. Note that this is not updated when creating, modifying or deleting articles in the blog. The API returns this value in ISO 8601 format.',
'title' => 'The title of the blog.',
'id' => 'A unique numeric identifier for the blog.',
'commentable' => 'Indicates whether readers can post comments to the blog and if comments are moderated or not. Possible values are:<ul>'
},
'WWW::Shopify::Model::Customer' => {
'last_order_name' => 'The name of the customer\'s last order. This is directly related to the Order\'s name field.',
'orders_count' => 'The number of orders associated with this customer.',
'state' => 'The state of the customer in a shop. Customers start out as "disabled." They are invited by email to setup an account with a shop. The customer can then:',
'last_name' => 'The customer\'s last name.',
'email' => 'The email address of the customer.',
'created_at' => 'The date and time when the customer was created. The API returns this value in ISO 8601 format.',
'multipass_identifier' => 'The customer\'s identifier used with Multipass login',
'verified_email' => 'States whether or not the email address has been verified.',
'id' => 'A unique numeric identifier for the customer.',
'last_order_id' => 'The id of the customer\'s last order.',
'metafields' => '',
'accepts_marketing' => 'Indicates whether the customer has consented to be sent marketing material via email. Valid values are "true" and "false."',
'note' => 'A note about the customer.',
'tags' => 'Tags are additional short descriptors formatted as a string of comma-separated values. For example, if an article has three tags: tag1, tag2, tag3.',
'addresses' => '',
'updated_at' => 'The date and time when the customer information was updated. The API returns this value in ISO 8601 format.',
'default_address' => '',
'total_spent' => 'The total amount of money that the customer has spent at the shop.',
'first_name' => 'The customer\'s first name.'
},
'WWW::Shopify::Model::CarrierService' => {
'name' => 'The name of the shipping service as seen by merchants and their customers.',
'callback_url' => 'States the URL endpoint that shopify needs to retrieve shipping rates. This must be a public URL.',
'active' => 'States whether or not this carrier service is active. Valid values are "true" and "false".',
'service_discovery' => 'States if merchants are able to send dummy data to your service through the Shopify admin to see shipping rate examples. Valid values are "true" and "false"',
'carrier_service_type' => 'Distinguishes between api or legacy carrier services.'
},
'WWW::Shopify::Model::ApplicationCharge' => {
'test' => 'States whether or not the application charge is a test transaction. Valid values are "true" or "null".',
'status' => 'The status of the application charge. Valid values are:<ul>',
'name' => 'The name of the one-time application charge.',
'return_url' => 'The URL the customer is sent to once they accept/decline a charge.',
'confirmation_url' => 'The URL that the customer is taken to, to accept or decline the one-time application charge.',
'created_at' => 'The date and time when the one-time application charge was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the charge was last updated. The API returns this value in ISO 8601 format.',
'id' => 'A unique numeric identifier for the one-time application charge.',
'price' => 'The price of the the one-time application charge.'
},
'WWW::Shopify::Model::Order::Fulfillment' => {
'line_items' => '',
'status' => 'The status of the fulfillment. Valid values are:<ul>',
'order_id' => 'The unique numeric identifier for the order.',
'created_at' => 'The date and time when the fulfillment was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the fulfillment was last modified. The API returns this value in ISO 8601 format.',
'id' => 'A unique numeric identifier for the fulfillment.',
'tracking_company' => 'The name of the shipping company.',
'receipt' => ''
},
'WWW::Shopify::Model::Order' => {
'total_price' => 'The sum of all the prices of all the items in the order, taxes and discounts included (must be positive).',
'line_items' => '',
'closed_at' => 'The date and time when the order was closed. If the order was closed, the API returns this value in ISO 8601 format. If the order was not closed, this value is null.',
'billing_address' => '',
'taxes_included' => 'States whether or not taxes are included in the order subtotal. Valid values are "true" or "false".',
'email' => 'The customer\'s email address. Is required when a billing address is present.',
'id' => 'The unique numeric identifier for the order. This one is used for API purposes. This is different from the order_number property (see below), which is also a unique numeric identifier for the order, but used by the shop owner and customer.',
'payment_details' => '',
'total_discounts' => 'The total amount of the discounts to be applied to the price of the order.',
'order_number' => 'A unique numeric identifier for the order. This one is used by the shop owner and customer. This is different from the id property, which is also a unique numeric identifier for the order, but used for API purposes.',
'financial_status' => '<ul>',
'landing_site' => 'The URL for the page where the buyer landed when entering the shop.',
'name' => 'The customer\'s order name as represented by a number.',
'cart_token' => 'Unique identifier for a particular cart that is attached to a particular order.',
'total_line_items_price' => 'The sum of all the prices of all the items in the order.',
'note_attributes' => 'Extra information that is added to the order. Each array entry must contain a hash with "name" and "value" keys as shown above.',
'updated_at' => 'The date and time when the order was last modified. The API returns this value in ISO 8601 format.',
'shipping_address' => '',
'fulfillment_status' => '<ul>',
'subtotal_price' => 'Price of the order before shipping and taxes',
'total_tax' => 'The sum of all the taxes applied to the order (must be positive).',
'number' => 'Numerical identifier unique to the shop. A number is sequential and starts at 1000.',
'discount_codes' => '',
'gateway' => '<strong>Deprecated as of July 14, 2014. This information is instead available on <a href=\'=/api/transaction#properties\'>transactions</strong>',
'shipping_lines' => '',
'buyer_accepts_marketing' => 'Indicates whether or not the person who placed the order would like to receive email updates from the shop. This is set when checking the "I want to receive occasional emails about new products, promotions and other news" checkbox during checkout. Valid values are "true" and "false."',
'cancel_reason' => 'The reason why the order was cancelled. If the order was not cancelled, this value is "null." If the order was cancelled, the value will be one of the following:',
'currency' => 'The three letter code (ISO 4217) for the currency used for the payment.',
'created_at' => 'The date and time when the order was created. The API returns this value in ISO 8601 format.',
'token' => 'Unique identifier for a particular order.',
'total_weight' => 'The sum of all the weights of the line items in the order, in grams.',
'tax_lines' => '',
'client_details' => '',
'cancelled_at' => 'The date and time when the order was cancelled. If the order was cancelled, the API returns this value in ISO 8601 format. If the order was not cancelled, this value is "null."',
'processing_method' => 'States the type of payment processing method. Valid values are: checkout, direct, manual, offsite or express.',
'referring_site' => 'The website that the customer clicked on to come to the shop.',
'tags' => 'Tags are additional short descriptors formatted as a string of comma-separated values. For example, if an order has three tags: tag1, tag2, tag3.',
'note' => 'The text of an optional note that a shop owner can attach to the order.',
'browser_ip' => 'The IP address of the browser used by the customer when placing the order.',
'source_name' => 'Where the order originated from. Returned value will be "web" or "pos"',
'customer' => '',
'fulfillments' => ''
},
'WWW::Shopify::Model::CustomCollection::Collect' => {
'position' => 'A number specifying the order in which the product appears in the custom collection, with 1 denoting the first item in the collection. This value applies only when the custom collection\'s sort-order property is set to manual.',
'sort_value' => 'This is the same value as <tt>position</tt> but padded with leading zeroes to make it alphanumeric-sortable.',
'created_at' => 'The date and time when the collect was created. The API returns this value in ISO 8601 format.',
'featured' => 'States whether or not the collect is featured. Valid values are "true" or "false".',
'product_id' => 'The unique numeric identifier for the product in the custom collection.',
'updated_at' => 'The date and time when the collect was last updated. The API returns this value in ISO 8601 format.',
'collection_id' => 'The id of the custom collection containing the product.',
'id' => 'A unique numeric identifier for the collect.'
},
'WWW::Shopify::Model::RecurringApplicationCharge' => {
'trial_days' => 'Number of days that the customer is eligible for a free trial.',
'test' => 'States whether or not the application charge is a test transaction. Valid values are "true" or "null".',
'name' => 'The name of the recurring application charge.',
'cancelled_on' => 'The date and time when the customer cancelled their recurring application charge. The API returns this value in ISO 8601 format.<br/>Note: If the recurring application charge is not cancelled it will default to "null".',
'trial_ends_on' => 'The date and time when the free trial ends. The API returns this value in ISO 8601 format.',
'return_url' => 'The URL the customer is sent to once they accept/decline a charge.',
'confirmation_url' => 'The URL that the customer is taken to, to accept or decline the recurring application charge.',
'created_at' => 'The date and time when the recurring application charge was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the recurring application charge was last updated. The API returns this value in ISO 8601 format.',
'activated_on' => 'The date and time when the customer activated the recurring application charge. The API returns this value in ISO 8601 format.<br/>Note: The recurring application charge must be activated or the returning value will be "null".',
'id' => 'A unique numeric identifier for the recurring application charge.',
'price' => 'The price of the the recurring application charge.',
'billing_on' => 'The date and time when the customer will be billed. The API returns this value in ISO 8601 format.<br/>Note: The recurring application charge must be accepted or the returning value will be "null".'
},
'WWW::Shopify::Model::Location' => {
'country' => 'The country the location is in',
'location_type' => 'The location type',
'name' => 'The name of the location',
'phone' => 'The phone number of the location, can contain special chars like - and +',
'address2' => 'The second line of the address',
'created_at' => 'The date and time when the location was created. The API returns this value in ISO 8601 format.',
'zip' => 'The zip or postal code',
'city' => 'The city the location is in',
'updated_at' => 'The date and time when the location was last updated. The API returns this value in ISO 8601 format.',
'id' => 'A unique numeric identifier for the location.',
'address1' => 'The first line of the address',
'province' => 'The province the location is in'
},
'WWW::Shopify::Model::SmartCollection::Rule' => {
'relation' => ' The relation between the identifier for the condition and the numeric amount.',
'column' => '',
'condition' => ' Select products for a collection using a condition.'
},
'WWW::Shopify::Model::Order::Risk' => {},
'WWW::Shopify::Model::Checkout::LineItem' => {
'sku' => 'A unique identifier of the item in the fulfillment.',
'grams' => 'The weight of the item in grams.',
'fulfillment_service' => 'Service provider who is doing the fulfillment. Valid values are: manual, ',
'quantity' => 'The number of products that were purchased.',
'variant_title' => 'The title of the product variant.',
'product_id' => 'The unique numeric identifier for the product in the fulfillment.',
'variant_id' => 'The id of the product variant.',
'price' => 'The price of the item.',
'title' => 'The title of the product.',
'vendor' => 'The name of the supplier of the item.',
'requires_shipping' => 'States whether or not the fulfillment requires shipping. Values are: true or false.'
},
'WWW::Shopify::Model::Country::Province' => {
'tax_percentage' => 'The tax value in percent format.',
'tax_type' => 'A tax_type is applied for a compounded sales tax. For example, the Canadian HST is a compounded sales tax of both PST and GST.',
'name' => 'The name of the province or state.',
'tax_name' => 'The name of the tax as it is referred to in the applicable province/state. For example, in Ontario, Canada the tax is referred to as HST.',
'tax' => 'The tax value in decimal format.',
'id' => 'The unique numeric identifier for the particular province or state.',
'code' => 'The two letter province or state code.'
},
'WWW::Shopify::Model::Checkout::TaxLine' => {
'rate' => 'The rate of tax to be applied.',
'title' => 'The name of the tax.',
'price' => 'The amount of tax to be charged.'
},
'WWW::Shopify::Model::Address' => {
'country_code' => 'The two-letter country code corresponding to the customer\'s country.',
'last_name' => 'The customer\'s last name.',
'city' => 'The customer\'s city.',
'latitude' => 'The latitude of the billing address.',
'id' => 'A unique numeric identifier for the address.',
'province_code' => 'The two-letter pcode for the customer\'s province or state.',
'province' => 'The customer\'s province or state name.',
'company' => 'The customer\'s company.',
'country' => 'The customer\'s country.',
'longitude' => 'The longitude of the billing address.',
'name' => 'The customer\'s name.',
'phone' => 'The customer\'s phone number.',
'address2' => 'An additional field for the customer\'s mailing address.',
'zip' => 'The customer\'s zip or postal code.',
'address1' => 'The customer\'s mailing address.',
'first_name' => 'The customer\'s first name.'
},
'WWW::Shopify::Model::Asset' => {
'attachment' => 'An asset attached to a store\'s theme.',
'value' => 'The asset that you are adding.',
'src' => 'Specifies the location of an asset.',
'source_key' => 'The source key copies an asset.',
'public_url' => 'The public facing URL of the asset.',
'size' => 'The asset size in bytes.',
'key' => 'The path to the asset within a shop. For example, the asset bg-body-green.gif is located in the assets folder.',
'content_type' => 'MIME representation of the content, consisting of the type and subtype of the asset.',
'created_at' => 'The date and time when the asset was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when an asset was last updated. The API returns this value in ISO 8601 format.'
},
'WWW::Shopify::Model::Order::TaxLine' => {
'rate' => 'The rate of tax to be applied.',
'title' => 'The name of the tax.',
'price' => 'The amount of tax to be charged.'
},
'WWW::Shopify::Model::CustomCollection' => {
'published_at' => 'This can have two different types of values, depending on whether the custom collection has been published (i.e., made visible to customers):<ul>',
'sort_order' => 'The order in which products in the custom collection appear. Valid values are:<ul>',
'metafields' => '',
'published_scope' => 'The sales channels in which the custom collection is visible.',
'image' => '',
'template_suffix' => 'The suffix of the liquid template being used. By default, the original template is called product.liquid, without any suffix. Any additional templates will be: product.suffix.liquid.',
'published' => 'States whether the custom collection is visible. Valid values are "true" for visible and "false" for hidden.',
'handle' => 'A human-friendly unique string for the custom collection automatically generated from its title. This is used in shop themes by the Liquid templating language to refer to the custom collection.',
'updated_at' => 'The date and time when the custom collection was last modified. The API returns this value in ISO 8601 format.',
'title' => 'The name of the custom collection.',
'body_html' => 'The description of the custom collection, complete with HTML markup. Many templates display this on their custom collection pages.',
'id' => 'The unique numeric identifier for the custom collection.'
},
'WWW::Shopify::Model::Cart' => {},
'WWW::Shopify::Model::Metafield' => {
'namespace' => 'Container for a set of metadata. Namespaces help distinguish between metadata you created against metadata created by another individual with a similar namespace (maximum of 20 characters).',
'value' => 'Information to be stored as metadata.',
'description' => 'Additional information about the metafield. This property is optional.',
'key' => 'Identifier for the metafield (maximum of 30 characters).',
'value_type' => 'States whether the information in the value is stored as a \'string\' or \'integer.\'',
'created_at' => 'The date and time when the metafield was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the metafield was published. The API returns this value in ISO 8601 format.',
'id' => 'Unique numeric identifier for the metafield.',
'owner_id' => 'A unique numeric identifier for the metafield\'s owner.',
'owner_resource' => 'Unique id for that particular resource.'
},
'WWW::Shopify::Model::Refund::LineItem' => {
'quantity' => 'The quantity of the associated line item that was returned.',
'line_item' => 'The single line item being returned.',
'id' => 'The unique identifier of the refund line item.',
'line_item_id' => 'The id of the related line item.'
},
'WWW::Shopify::Model::Theme' => {
'created_at' => 'The date and time when the theme was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the theme was last updated. The API returns this value in ISO 8601 format.',
'name' => 'The name of the theme.',
'id' => 'A unique numeric identifier for the theme.',
'role' => 'Specifies how the theme is being used within the shop. Valid values are:'
},
'WWW::Shopify::Model::Country' => {
'name' => 'The full name of the country, in English.',
'tax' => 'The national sales tax rate to be applied to orders made by customers from that country.',
'id' => 'The unique numeric identifier for the country. <br /> It is important to note that the id for a given country in one shop will not be the same as the id for the same country in another shop.',
'provinces' => '',
'code' => 'The ISO 3166-1 alpha-2 two-letter country code for the country. The code for a given country will be the same as the code for the same country in another shop.'
},
'WWW::Shopify::Model::Product' => {
'published_at' => 'The date and time when the product was published. The API returns this value in ISO 8601 format.',
'images' => 'A list of image objects, each one representing an image associated with the product.',
'options' => 'Custom product property names like "Size", "Color", and "Material". products are based on permutations of these options. A product may have a maximum of 3 options.',
'tags' => 'A categorization that a product can be tagged with, commonly used for filtering and searching.',
'published_scope' => 'The sales channels in which the product is visible.',
'variants' => 'A list of variant objects, each one representing a slightly different version of the product. For example, if a product comes in different sizes and colors, each size and color permutation (such as "small black", "medium black", "large blue"), would be a variant.',
'template_suffix' => 'The suffix of the liquid template being used. By default, the original template is called product.liquid, without any suffix. Any additional templates will be: product.suffix.liquid.',
'product_type' => 'A categorization that a product can be tagged with, commonly used for filtering and searching.',
'created_at' => 'The date and time when the product was created. The API returns this value in ISO 8601 format.',
'handle' => 'A human-friendly unique string for the Product automatically generated from its title. They are used by the Liquid templating language to refer to objects.',
'updated_at' => 'The date and time when the product was last modified. The API returns this value in ISO 8601 format.',
'title' => 'The name of the product. In a shop\'s catalog, clicking on a product\'s title takes you to that product\'s page. On a product\'s page, the product\'s title typically appears in a large font.',
'body_html' => 'The description of the product, complete with HTML formatting.',
'id' => 'The unique numeric identifier for the product. Product ids are unique across the entire Shopify system; no two products will have the same id, even if they\'re from different shops.',
'vendor' => 'The name of the vendor of the product.'
},
'WWW::Shopify::Model::Redirect' => {
'target' => 'The "after" path or URL to be redirected to. When the user visits the path specified by path, s/he will be redirected to this path or URL. This property can be set to any path on the shop\'s site, or any URL, even one on a completely different domain.',
'path' => 'The "before" path to be redirected. When the user this path, s/he will be redirected to the path specified by target.',
'id' => 'The unique numeric identifier for the redirect.'
},
'WWW::Shopify::Model::ScriptTag' => {
'created_at' => 'The date and time when the ScriptTag was created. The API returns this value in ISO 8601 format.',
'updated_at' => 'The date and time when the ScriptTag was last updated. The API returns this value in ISO 8601 format.',
'src' => 'Specifies the location of the ScriptTag.',
'id' => 'The unique numeric identifier for the ScriptTag.',
'event' => 'DOM event which triggers the loading of the script. Valid values are: "onload."'
},
'WWW::Shopify::Model::Webhook' => {
'topic' => 'The event that will trigger the webhook. Valid values are: orders/create, orders/delete, orders/updated, orders/paid, orders/cancelled, orders/fulfilled, orders/partially_fulfilled, carts/create, carts/update, checkouts/create, checkouts/update, checkouts/delete, refunds/create, products/create, products/update, products/delete, collections/create, collections/update, collections/delete, customer_groups/create, customer_groups/update, customer_groups/delete, customers/create, customers/enable, customers/disable, customers/update, customers/delete, fulfillments/create, fulfillments/update, shop/update, app/uninstalled',
'created_at' => 'The date and time when the webhook was created. The API returns this value in ISO 8601 format.',
'format' => 'The format in which the webhook should send the data. Valid values are json and xml.',
'updated_at' => 'The date and time when the webhook was updated. The API returns this value in ISO 8601 format.',
'id' => 'The unique numeric identifier for the webhook.',
'address' => 'The URI where the webhook should send the POST request when the event occurs.'
},
'WWW::Shopify::Model::CustomCollection::Image' => {
'attachment' => 'An image attached to a shop\'s theme returned as Base64-encoded binary data.',
'src' => 'Source URL that specifies the location of the image.'
}
};
our @EXPORT_OK = qw(get_tooltip);
sub get_tooltip {
my ($package, $field_name) = @_;
$package = WWW::Shopify->translate_model($package);
return undef unless exists $tooltips->{$package} && $tooltips->{$package}->{$field_name};
return $tooltips->{$package}->{$field_name};
}
1; | gitpan/WWW-Shopify | lib/WWW/Shopify/Tooltips.pm | Perl | mit | 68,283 |
#!/usr/bin/perl
#
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2008, 2009, 2010 Zimbra, Inc.
#
# The contents of this file are subject to the Zimbra Public License
# Version 1.3 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
use strict;
use lib '/opt/zimbra/zimbramon/lib';
use Net::LDAP;
use XML::Simple;
use Getopt::Std;
if ( ! -d "/opt/zimbra/openldap/etc" ) {
print "ERROR: openldap does not appear to be installed - exiting\n";
exit(1);
}
my $id = getpwuid($<);
chomp $id;
if ($id ne "zimbra") {
print STDERR "Error: must be run as zimbra user\n";
exit (1);
}
my $localxml = XMLin("/opt/zimbra/conf/localconfig.xml");
my $ldap_root_password = $localxml->{key}->{ldap_root_password}->{value};
chomp($ldap_root_password);
my $ldap_is_master = $localxml->{key}->{ldap_is_master}->{value};
chomp($ldap_is_master);
my $zimbra_home = $localxml->{key}->{zimbra_home}->{value};
if ($zimbra_home eq "") {
$zimbra_home = "/opt/zimbra";
}
my $ldap = Net::LDAP->new('ldapi://%2fopt%2fzimbra%2fopenldap%2fvar%2frun%2fldapi/') or die "$@";
my $mesg = $ldap->bind("cn=config", password=>"$ldap_root_password");
$mesg->code && die "Bind: ". $mesg->error . "\n";
my $bdn="olcDatabase={2}mdb,cn=config";
if(lc($ldap_is_master) eq "true") {
$mesg = $ldap->search(
base=> "cn=accesslog",
filter=>"(objectClass=*)",
scope => "base",
attrs => ['1.1'],
);
my $size = $mesg->count;
if ($size > 0 ) {
$bdn="olcDatabase={3}mdb,cn=config";
}
}
$mesg = $ldap ->search(
base=>"$bdn",
filter=>"(objectClass=olcUniqueConfig)",
scope=>"sub",
attrs => ['1.1'],
);
my $size = $mesg->count;
if ($size > 0) {
my $dn= $mesg->entry(0)->dn;
$mesg = $ldap->modify( "$dn",
add =>{olcUniqueURI => 'ldap:///?DKIMSelector?sub'},
);
$mesg->code && warn "failed to add entry: ", $mesg->error ;
}
$ldap->unbind;
| nico01f/z-pec | ZimbraServer/src/ldap/migration/migrate20120507-UniqueDKIMSelector.pl | Perl | mit | 2,397 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 12.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly. Use Unicode::UCD to access the Unicode character data
# base.
return <<'END';
V10
1329
1367
1369
1417
1418
1419
1421
1424
64275
64280
END
| operepo/ope | client_tools/svc/rc/usr/share/perl5/core_perl/unicore/lib/Sc/Armn.pl | Perl | mit | 543 |
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% CWT-Prolog Server Interface %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
:- use_module(library(http/thread_httpd)).
:- use_module(library(http/http_dispatch)).
:- use_module(library(http/http_parameters)).
:- use_module(database).
% Basic header for plain text
header('text/plain', 'Content-type: text/plain~n~n').
%--------------------------------------------------------------------------------%
% Main Interface
%--------------------------------------------------------------------------------%
:- dynamic port/1.
:- http_handler(/, server_status, []).
:- http_handler('/login', login, []).
:- http_handler('/logout', logout, []).
:- http_handler('/ping', ping, []).
:- http_handler('/create_game', create_game, []).
:- http_handler('/join_game', join_game, []).
:- http_handler('/resign_game', resign_game, []).
%% start_server(+File:atom, +Port:between(1, 0xffff)) is semidet.
%
% Attach to the specified database file, and start the server on the specified
% port.
start_server(File, Port) :-
must_be(between(1, 0xffff), Port),
attach_db(File),
db_sync(gc),
asserta(port(Port)),
http_server(http_dispatch, [port(Port)]).
%% server_status(+Request) is det.
%
% Friendly message to let client know that the server is up.
server_status(_Request) :-
header('text/plain', Header),
format(Header),
format('Server is up.~n').
%% send_status(+Status:string) is det.
%
% Takes a response status and sends the information to the client.
send_status(Status) :-
header('text/plain', Header),
format(Header),
format('~s', [Status]).
%% disconnect is det.
%
% Shut down server on specified port and clean up information from top level.
disconnect :-
port(Port),
http_stop_server(Port, []),
retractall(port(_)).
%--------------------------------------------------------------------------------%
% Queries
%--------------------------------------------------------------------------------%
%% login(+Query:compound) is det.
%
% Attempt login and send status back to client.
login(Query) :-
http_parameters(Query, [name(User, [string])]),
login(user(User), Response),
send_status(Response).
%% login(+Query:compound) is det.
%
% Attempt logout and send status back to client.
logout(Query) :-
http_parameters(Query, [name(User, [string])]),
logout(user(User), Response),
send_status(Response).
%% ping(+Query:compound) is det.
%
% Receive ping from client with username.
ping(Query) :-
http_parameters(Query, [name(User, [string])]),
ping(user(User), Response),
send_status(Response).
%% create_game(+Query:compound) is det.
%
% Create a game if all internal restrictions are met for creation.
create_game(Query) :-
http_parameters(Query, [
user(User, [string]),
pos(Pos, [between(1, 4)]),
game(Game, [string]),
limit(Limit, [between(1, 4)]),
layout(Layout, [string])
]),
string_codes(Layout, Codes),
maplist(code_lower_char, Codes, Chars),
create_game(game(User, Game, limit(Limit), Chars), pos(Pos), Response),
send_status(Response).
code_lower_char(Code, Char) :-
to_lower(Code, Lower),
char_code(Char, Lower).
%% join_game(+Query:compound) is det.
%
% Allow a user to join a game if all internal restrictions are met for admission.
join_game(Query) :-
http_parameters(Query, [
user(User, [string]),
pos(Pos, [between(1, 4)]),
game(Game, [string])
]),
join_game(user(User), pos(Pos), Game, Response),
send_status(Response).
%% resign_game(+Query:compound) is det.
%
% Resign a user from a game.
resign_game(Query) :-
http_parameters(Query, [
user(User, [string]),
game(Game, [string]),
pos(Pos, [between(1, 4)])
]),
resign_game(user(User), Game, pos(Pos), Response),
send_status(Response).
| eazar001/cwt-prolog | interface.pl | Perl | mit | 3,885 |
#!/usr/bin/perl
use strict;
use warnings;
use FindBin qw($RealBin);
use Term::ReadKey;
use Digest::SHA1 qw(sha1_hex);;
use Storable qw(lock_store);
$| = 1;
print "Type your password:";
ReadMode('noecho');
chomp(my $password = <STDIN>);
ReadMode(0);
print "\n";
my @set = ('0' ..'9', 'a' .. 'f');
my $salt = join('', map $set[rand @set], 1 .. 32);
my $password_data = {
salt => $salt,
hash => sha1_hex($salt, $password),
};
lock_store $password_data => "$RealBin/../.admin_password";
| zarfmouse/zgallery | bin/set_password.pl | Perl | cc0-1.0 | 503 |
eval '(exit $?0)' && eval 'exec perl -S $0 ${1+"$@"}'
& eval 'exec perl -S $0 $argv:q'
if 0;
# $Id: run_test.pl 96572 2012-12-21 09:23:06Z johnnyw $
# -*- perl -*-
use lib "$ENV{ACE_ROOT}/bin";
use PerlACE::TestTarget;
$status = 0;
$synchbase = "ready";
my $target1 = PerlACE::TestTarget::create_target (1) || die "Create target 1 failed\n";
my $target2 = PerlACE::TestTarget::create_target (2) || die "Create target 2 failed\n";
$synchfile = $target1->LocalFile ("$synchbase");
my $port = $target1->RandomPort ();
my $host = $target1->HostName();
my $SV = $target1->CreateProcess("server", "-p $port -o $synchfile");
my $CL = $target2->CreateProcess ("client", " -h $host -p $port");
$target1->DeleteFile ($synchbase);
$target2->DeleteFile ($synchbase);
$server_status = $SV->Spawn ();
if ($server_status != 0) {
print STDERR "ERROR: server returned $server_status\n";
exit 1;
}
if ($target1->WaitForFileTimed ($synchbase,
$target1->ProcessStartWaitInterval()) == -1) {
print STDERR "ERROR: cannot find file <$synchfile>\n";
$SV->Kill (); $SV->TimedWait (1);
exit 1;
}
$target1->DeleteFile ($synchbase);
$client = $CL->SpawnWaitKill ($target2->ProcessStartWaitInterval() + 285);
if ($client != 0) {
print STDERR "ERROR: client returned $client\n";
$status = 1;
}
$server = $SV->WaitKill ($target1->ProcessStopWaitInterval());
if ($server != 0) {
print STDERR "ERROR: server returned $server\n";
$status = 1;
}
$target1->GetStderrLog();
$target2->GetStderrLog();
$target1->DeleteFile ($synchbase);
$target2->DeleteFile ($synchbase);
exit $status;
| batmancn/TinySDNController | ACE_wrappers/protocols/tests/HTBP/Send_Recv_Tests/run_test.pl | Perl | apache-2.0 | 1,641 |
package Moose::Meta::Method::Accessor::Native::Hash::exists;
our $VERSION = '2.1404';
use strict;
use warnings;
use Moose::Role;
with 'Moose::Meta::Method::Accessor::Native::Reader',
'Moose::Meta::Method::Accessor::Native::Hash';
sub _minimum_arguments { 1 }
sub _maximum_arguments { 1 }
sub _inline_check_arguments {
my $self = shift;
return $self->_inline_check_var_is_valid_key('$_[0]');
}
sub _return_value {
my $self = shift;
my ($slot_access) = shift;
return 'exists ' . $slot_access . '->{ $_[0] }';
}
no Moose::Role;
1;
| ray66rus/vndrv | local/lib/perl5/x86_64-linux-thread-multi/Moose/Meta/Method/Accessor/Native/Hash/exists.pm | Perl | apache-2.0 | 564 |
package Paws::Discovery::Tag;
use Moose;
has Key => (is => 'ro', isa => 'Str', request_name => 'key', traits => ['NameInRequest'], required => 1);
has Value => (is => 'ro', isa => 'Str', request_name => 'value', traits => ['NameInRequest'], required => 1);
1;
### main pod documentation begin ###
=head1 NAME
Paws::Discovery::Tag
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::Discovery::Tag object:
$service_obj->Method(Att1 => { Key => $value, ..., Value => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::Discovery::Tag object:
$result = $service_obj->Method(...);
$result->Att1->Key
=head1 DESCRIPTION
Metadata that help you categorize IT assets.
=head1 ATTRIBUTES
=head2 B<REQUIRED> Key => Str
The type of tag on which to filter.
=head2 B<REQUIRED> Value => Str
A value for a tag key on which to filter.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::Discovery>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Discovery/Tag.pm | Perl | apache-2.0 | 1,494 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Common::DisplayCallToAction;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
text => $args->{text},
textColor => $args->{textColor},
urlCollectionId => $args->{urlCollectionId}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Common/DisplayCallToAction.pm | Perl | apache-2.0 | 1,124 |
#!perl
use strict;
use warnings;
# Compares qx entries.
if ($#ARGV != 1)
{
print "Usage: perl sameq.pl no1 no2\n";
exit;
}
my $lin1 = $ARGV[0];
my $lin2 = $ARGV[1];
my $DIR;
if (`uname -a` =~ /CDD/)
{
# Laptop
$DIR = "../../../bridgedata/BBOVG";
}
else
{
# PC
$DIR = "../../../bridgedata/hands/BBOVG";
}
my $file1 = no2file($lin1);
my $file2 = no2file($lin2);
my (%list1, %list2);
file2list($file1, \%list1);
file2list($file2, \%list2);
my (@only1, @only2);
my $overlap1 = 0;
my $overlap2 = 0;
for my $k (keys %list1)
{
if (defined $list2{$k})
{
$overlap1++;
}
else
{
push @only1, $k;
}
}
for my $k (keys %list2)
{
if (defined $list1{$k})
{
$overlap2++;
}
else
{
push @only2, $k;
}
}
die "Odd overlaps" unless $overlap1 == $overlap2;
if ($#only1 == -1 && $#only2 == -1)
{
print "Complete qx overlap\n";
}
elsif ($#only1 == -1)
{
print "$lin1 qx's are completely contained in $lin2\n";
}
elsif ($#only2 == -1)
{
print "$lin2 qx's are completely contained in $lin1\n";
}
else
{
print "$lin1 ($overlap1):";
print " $_" for (sort @only1);
print "\n";
print "$lin2 ($overlap2):";
print " $_" for (sort @only2);
print "\n";
}
sub no2file
{
my $no = pop;
return "" unless $no =~ /^\d+$/;
return "$DIR/000000/$no.lin" if $no < 1000;
my $t = int($no/1000);
return "$DIR/00${t}000/$no.lin" if $no < 10000;
return "$DIR/0${t}000/$no.lin";
}
sub file2list
{
my ($file, $list_ref) = @_;
open my $fr, '<', $file or die "Can't open $file $!";
while (my $line = <$fr>)
{
if ($line =~ /^qx\|([^,\|]+)/ || $line =~ /\|qx\|([^,\|]+)/)
{
$list_ref->{$1} = 1;
}
}
close $fr;
}
| data-bridge/build | src/sameq.pl | Perl | apache-2.0 | 1,683 |
new76(A,B,C,D,E,F,G) :- B=0.
new76(A,B,C,D,E,F,G) :- H=1+E, B=< -1, new4(A,C,D,H,F,G).
new76(A,B,C,D,E,F,G) :- H=1+E, B>=1, new4(A,C,D,H,F,G).
new74(A,B,C,D,E,F) :- G=1, D>=0, new76(A,G,B,C,D,E,F).
new74(A,B,C,D,E,F) :- G=0, D=< -1, new76(A,G,B,C,D,E,F).
new72(A,B,C,D,E,F,G) :- B=0.
new72(A,B,C,D,E,F,G) :- B=< -1, new74(A,C,D,E,F,G).
new72(A,B,C,D,E,F,G) :- B>=1, new74(A,C,D,E,F,G).
new70(A,B,C,D,E,F) :- G=1, D-E=< -1, new72(A,G,B,C,D,E,F).
new70(A,B,C,D,E,F) :- G=0, D-E>=0, new72(A,G,B,C,D,E,F).
new68(A,B,C,D,E,F,G) :- B=0.
new68(A,B,C,D,E,F,G) :- H=1+D, I=1+E, B=< -1, new70(A,C,H,I,F,G).
new68(A,B,C,D,E,F,G) :- H=1+D, I=1+E, B>=1, new70(A,C,H,I,F,G).
new66(A,B,C,D,E,F) :- G=1, D>=0, new68(A,G,B,C,D,E,F).
new66(A,B,C,D,E,F) :- G=0, D=< -1, new68(A,G,B,C,D,E,F).
new64(A,B,C,D,E,F,G) :- B=0.
new64(A,B,C,D,E,F,G) :- B=< -1, new66(A,C,D,E,F,G).
new64(A,B,C,D,E,F,G) :- B>=1, new66(A,C,D,E,F,G).
new62(A,B,C,D,E,F) :- G=1, D-E=< -1, new64(A,G,B,C,D,E,F).
new62(A,B,C,D,E,F) :- G=0, D-E>=0, new64(A,G,B,C,D,E,F).
new60(A,B,C,D,E,F,G) :- B=0.
new60(A,B,C,D,E,F,G) :- B=< -1, new62(A,C,D,E,F,G).
new60(A,B,C,D,E,F,G) :- B>=1, new62(A,C,D,E,F,G).
new58(A,B,C,D,E,F) :- G=1, C>=0, new60(A,G,B,C,D,E,F).
new58(A,B,C,D,E,F) :- G=0, C=< -1, new60(A,G,B,C,D,E,F).
new56(A,B,C,D,E,F,G) :- B=0.
new56(A,B,C,D,E,F,G) :- B=< -1, new58(A,C,D,E,F,G).
new56(A,B,C,D,E,F,G) :- B>=1, new58(A,C,D,E,F,G).
new54(A,B,C,D,E,F) :- G=1, B-C>=1, new56(A,G,B,C,D,E,F).
new54(A,B,C,D,E,F) :- G=0, B-C=<0, new56(A,G,B,C,D,E,F).
new52(A,B,C,D,E,F,G) :- B=0.
new52(A,B,C,D,E,F,G) :- H=1+D, I=1+E, B=< -1, new54(A,C,H,I,F,G).
new52(A,B,C,D,E,F,G) :- H=1+D, I=1+E, B>=1, new54(A,C,H,I,F,G).
new50(A,B,C,D,E,F) :- G=1, D>=0, new52(A,G,B,C,D,E,F).
new50(A,B,C,D,E,F) :- G=0, D=< -1, new52(A,G,B,C,D,E,F).
new48(A,B,C,D,E,F,G) :- B=0.
new48(A,B,C,D,E,F,G) :- B=< -1, new50(A,C,D,E,F,G).
new48(A,B,C,D,E,F,G) :- B>=1, new50(A,C,D,E,F,G).
new46(A,B,C,D,E,F) :- G=1, D-E=< -1, new48(A,G,B,C,D,E,F).
new46(A,B,C,D,E,F) :- G=0, D-E>=0, new48(A,G,B,C,D,E,F).
new44(A,B,C,D,E,F,G) :- B=0.
new44(A,B,C,D,E,F,G) :- B=< -1, new46(A,C,D,E,F,G).
new44(A,B,C,D,E,F,G) :- B>=1, new46(A,C,D,E,F,G).
new42(A,B,C,D,E,F) :- G=1, C>=0, new44(A,G,B,C,D,E,F).
new42(A,B,C,D,E,F) :- G=0, C=< -1, new44(A,G,B,C,D,E,F).
new40(A,B,C,D,E,F,G) :- B=0.
new40(A,B,C,D,E,F,G) :- B=< -1, new42(A,C,D,E,F,G).
new40(A,B,C,D,E,F,G) :- B>=1, new42(A,C,D,E,F,G).
new39(A,B,C,D,E,F) :- G=1, B-C>=1, new40(A,G,B,C,D,E,F).
new39(A,B,C,D,E,F) :- G=0, B-C=<0, new40(A,G,B,C,D,E,F).
new35(A,B,C,D,E,F) :- A=< -1, new12(A,B,C,D,E,F).
new35(A,B,C,D,E,F) :- A>=1, new12(A,B,C,D,E,F).
new35(A,B,C,D,E,F) :- A=0, new39(A,B,C,D,E,F).
new33(A,B,C,D,E,F,G) :- B=0.
new33(A,B,C,D,E,F,G) :- B=< -1, new35(A,C,D,E,F,G).
new33(A,B,C,D,E,F,G) :- B>=1, new35(A,C,D,E,F,G).
new31(A,B,C,D,E,F) :- G=1, C>=0, new33(A,G,B,C,D,E,F).
new31(A,B,C,D,E,F) :- G=0, C=< -1, new33(A,G,B,C,D,E,F).
new29(A,B,C,D,E,F,G) :- B=0.
new29(A,B,C,D,E,F,G) :- B=< -1, new31(A,C,D,E,F,G).
new29(A,B,C,D,E,F,G) :- B>=1, new31(A,C,D,E,F,G).
new25(A,B,C,D,E,F,G) :- B=0.
new25(A,B,C,D,E,F,G) :- H=1+D, I=1+E, B=< -1, new4(A,C,H,I,F,G).
new25(A,B,C,D,E,F,G) :- H=1+D, I=1+E, B>=1, new4(A,C,H,I,F,G).
new23(A,B,C,D,E,F) :- G=1, D>=0, new25(A,G,B,C,D,E,F).
new23(A,B,C,D,E,F) :- G=0, D=< -1, new25(A,G,B,C,D,E,F).
new21(A,B,C,D,E,F,G) :- B=0.
new21(A,B,C,D,E,F,G) :- B=< -1, new23(A,C,D,E,F,G).
new21(A,B,C,D,E,F,G) :- B>=1, new23(A,C,D,E,F,G).
new19(A,B,C,D,E,F) :- G=1, D-E=< -1, new21(A,G,B,C,D,E,F).
new19(A,B,C,D,E,F) :- G=0, D-E>=0, new21(A,G,B,C,D,E,F).
new17(A,B,C,D,E,F,G) :- B=0.
new17(A,B,C,D,E,F,G) :- B=< -1, new19(A,C,D,E,F,G).
new17(A,B,C,D,E,F,G) :- B>=1, new19(A,C,D,E,F,G).
new15(A,B,C,D,E,F) :- G=1, C>=0, new17(A,G,B,C,D,E,F).
new15(A,B,C,D,E,F) :- G=0, C=< -1, new17(A,G,B,C,D,E,F).
new13(A,B,C,D,E,F,G) :- B=0.
new13(A,B,C,D,E,F,G) :- B=< -1, new15(A,C,D,E,F,G).
new13(A,B,C,D,E,F,G) :- B>=1, new15(A,C,D,E,F,G).
new12(A,B,C,D,E,F) :- G=1, B-C>=1, new13(A,G,B,C,D,E,F).
new12(A,B,C,D,E,F) :- G=0, B-C=<0, new13(A,G,B,C,D,E,F).
new11(A,B,C,D,E,F) :- G=1, B-C>=2, new29(A,G,B,C,D,E,F).
new11(A,B,C,D,E,F) :- G=0, B-C=<1, new29(A,G,B,C,D,E,F).
new9(A,B,C,D,E,F) :- new3(A,B,C,D,E,F).
new8(A,B,C,D,E,F) :- B-C>=2, new11(A,B,C,D,E,F).
new8(A,B,C,D,E,F) :- B-C=<1, new12(A,B,C,D,E,F).
new6(A,B,C,D,E,F) :- new3(A,B,C,D,E,F).
new5(A,B,C,D,E,F) :- D-F=< -1, new8(A,B,C,D,E,F).
new5(A,B,C,D,E,F) :- D-F>=0, new9(A,B,C,D,E,F).
new4(A,B,C,D,E,F) :- B-C>=1, new5(A,B,C,D,E,F).
new4(A,B,C,D,E,F) :- B-C=<0, new6(A,B,C,D,E,F).
new3(A,B,C,D,E,F) :- G=0, B-C>=1, new4(A,B,C,G,E,F).
new2(A) :- B=0, C=4+D, new3(A,E,B,F,C,D).
new1 :- new2(A).
false :- new1.
| bishoksan/RAHFT | benchmarks_scp/misc/programs-clp/INVGEN-SpamAssassin-loop.map.c.map.pl | Perl | apache-2.0 | 4,650 |
#! /usr/bin/perl
use strict;
use utf8;
use warnings;
use Bio::SeqIO;
die"perl $0 <cds.fa> > out.fa\n" unless(@ARGV eq 1);
my $in = Bio::SeqIO->new(-file=>"< $ARGV[0]",-format=>"fasta");
while (my $seq = $in->next_seq()){
my $reseq = $seq->revcom;
my $tmp_id = $seq->id;
my $tmp_seq = $seq->seq;
my $re_tmp_id = $reseq->id;
my $re_tmp_seq = $reseq->seq;
for(my $i = 1; $i <= 3; $i++){
my $tag = 7 - $i;
my $tmp_desc = "F$i";
my $re_tmp_desc = "F$tag";
print ">$tmp_id $tmp_desc\n$tmp_seq\n";
print ">$re_tmp_id $re_tmp_desc\n$re_tmp_seq\n";
$tmp_seq =~ s/^\w//;
$re_tmp_seq =~ s/^\w//;
}
}
| BaconKwan/Perl_programme | sixPack/preDeal4sixPack.pl | Perl | apache-2.0 | 613 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package snmp_standard::mode::vrrp;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold);
sub custom_status_output {
my ($self, %options) = @_;
my $msg = sprintf("state : %s [admin state : '%s']",
$self->{result_values}->{operState}, $self->{result_values}->{adminState});
return $msg;
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{adminState} = $options{new_datas}->{$self->{instance} . '_admin_state'};
$self->{result_values}->{operStateLast} = $options{old_datas}->{$self->{instance} . '_oper_state'};
$self->{result_values}->{operState} = $options{new_datas}->{$self->{instance} . '_oper_state'};
$self->{result_values}->{masterIpAddr} = $options{new_datas}->{$self->{instance} . '_master_ip_addr'};
if (!defined($options{old_datas}->{$self->{instance} . '_oper_state'})) {
$self->{error_msg} = "buffer creation";
return -2;
}
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'vrrp', type => 1, cb_prefix_output => 'prefix_vrrp_output', message_multiple => 'All VRRP are ok' },
];
$self->{maps_counters}->{vrrp} = [
{ label => 'status', threshold => 0, set => {
key_values => [ { name => 'master_ip_addr' }, { name => 'admin_state' }, { name => 'oper_state' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => \&catalog_status_threshold,
}
},
];
}
sub prefix_vrrp_output {
my ($self, %options) = @_;
return "VRRP '" . $options{instance_value}->{master_ip_addr} . "' ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, statefile => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'warning-status:s' => { name => 'warning_status', default => '' },
'critical-status:s' => { name => 'critical_status', default => '%{adminState} eq "up" and %{operState} ne %{operStateLast}' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
$self->change_macros(macros => ['warning_status', 'critical_status']);
}
my %map_admin_state = (1 => 'up', 2 => 'down');
my %map_oper_state = (1 => 'initialize', 2 => 'backup', 3 => 'master');
my $mapping = {
vrrpOperState => { oid => '.1.3.6.1.2.1.68.1.3.1.3', map => \%map_oper_state },
vrrpOperAdminState => { oid => '.1.3.6.1.2.1.68.1.3.1.4', map => \%map_admin_state },
vrrpOperMasterIpAddr => { oid => '.1.3.6.1.2.1.68.1.3.1.7' },
};
my $oid_vrrpOperEntry = '.1.3.6.1.2.1.68.1.3.1';
sub manage_selection {
my ($self, %options) = @_;
$self->{vrrp} = {};
my $snmp_result = $options{snmp}->get_table(
oid => $oid_vrrpOperEntry,
end => $mapping->{vrrpOperMasterIpAddr}->{oid},
nothing_quit => 1
);
foreach my $oid (keys %{$snmp_result}) {
next if ($oid !~ /^$mapping->{vrrpOperState}->{oid}\.(.*)$/);
my $instance = $1;
my $result = $options{snmp}->map_instance(mapping => $mapping, results => $snmp_result, instance => $instance);
$self->{vrrp}->{$instance} = {
master_ip_addr => $result->{vrrpOperMasterIpAddr},
admin_state => $result->{vrrpOperAdminState},
oper_state => $result->{vrrpOperState},
};
}
$self->{cache_name} = "vrrp_" . $self->{mode} . '_' . $options{snmp}->get_hostname() . '_' . $options{snmp}->get_port() . '_' .
(defined($self->{option_results}->{filter_counters}) ? md5_hex($self->{option_results}->{filter_counters}) : md5_hex('all'));
}
1;
__END__
=head1 MODE
Check VRRP status (VRRP-MIB).
=over 8
=item B<--warning-status>
Set warning threshold for status.
Can used special variables like: %{adminState}, %{operStateLast}, %{operState}, %{masterIpAddr}
=item B<--critical-status>
Set critical threshold for status (Default: '%{adminState} eq "up" and %{operState} ne %{operStateLast}').
Can used special variables like: %{adminState}, %{operStateLast}, %{operState}, %{masterIpAddr}
=back
=cut
| Tpo76/centreon-plugins | snmp_standard/mode/vrrp.pm | Perl | apache-2.0 | 5,313 |
###########################################$
# Copyright 2008-2010 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License"). You may not
# use this file except in compliance with the License.
# A copy of the License is located at
#
# http://aws.amazon.com/apache2.0
#
# or in the "license" file accompanying this file. This file is distributed on
# an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing permissions
# and limitations under the License.
###########################################$
# __ _ _ ___
# ( )( \/\/ )/ __)
# /__\ \ / \__ \
# (_)(_) \/\/ (___/
#
# Amazon EC2 Perl Library
# API Version: 2010-06-15
# Generated: Wed Jul 21 13:37:54 PDT 2010
#
package Amazon::EC2::Model::AttachVpnGatewayRequest;
use base qw (Amazon::EC2::Model);
#
# Amazon::EC2::Model::AttachVpnGatewayRequest
#
# Properties:
#
#
# VpnGatewayId: string
# VpcId: string
#
#
#
sub new {
my ($class, $data) = @_;
my $self = {};
$self->{_fields} = {
VpnGatewayId => { FieldValue => undef, FieldType => "string"},
VpcId => { FieldValue => undef, FieldType => "string"},
};
bless ($self, $class);
if (defined $data) {
$self->_fromHashRef($data);
}
return $self;
}
sub getVpnGatewayId {
return shift->{_fields}->{VpnGatewayId}->{FieldValue};
}
sub setVpnGatewayId {
my ($self, $value) = @_;
$self->{_fields}->{VpnGatewayId}->{FieldValue} = $value;
return $self;
}
sub withVpnGatewayId {
my ($self, $value) = @_;
$self->setVpnGatewayId($value);
return $self;
}
sub isSetVpnGatewayId {
return defined (shift->{_fields}->{VpnGatewayId}->{FieldValue});
}
sub getVpcId {
return shift->{_fields}->{VpcId}->{FieldValue};
}
sub setVpcId {
my ($self, $value) = @_;
$self->{_fields}->{VpcId}->{FieldValue} = $value;
return $self;
}
sub withVpcId {
my ($self, $value) = @_;
$self->setVpcId($value);
return $self;
}
sub isSetVpcId {
return defined (shift->{_fields}->{VpcId}->{FieldValue});
}
1;
| electric-cloud/EC-EC2 | src/main/resources/project/lib/Amazon/EC2/Model/AttachVpnGatewayRequest.pm | Perl | apache-2.0 | 2,464 |
package Paws::Pinpoint::SMSMessage;
use Moose;
has Body => (is => 'ro', isa => 'Str');
has MessageType => (is => 'ro', isa => 'Str');
has SenderId => (is => 'ro', isa => 'Str');
has Substitutions => (is => 'ro', isa => 'Paws::Pinpoint::MapOfListOf__string');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Pinpoint::SMSMessage
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::Pinpoint::SMSMessage object:
$service_obj->Method(Att1 => { Body => $value, ..., Substitutions => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::Pinpoint::SMSMessage object:
$result = $service_obj->Method(...);
$result->Att1->Body
=head1 DESCRIPTION
SMS Message.
=head1 ATTRIBUTES
=head2 Body => Str
The message body of the notification, the email body or the text
message.
=head2 MessageType => Str
Is this a transaction priority message or lower priority.
=head2 SenderId => Str
Sender ID of sent message.
=head2 Substitutions => L<Paws::Pinpoint::MapOfListOf__string>
Default message substitutions. Can be overridden by individual address
substitutions.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::Pinpoint>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Pinpoint/SMSMessage.pm | Perl | apache-2.0 | 1,741 |
package VMOMI::ArrayOfHostMemberHealthCheckResult;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['HostMemberHealthCheckResult', 'HostMemberHealthCheckResult', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfHostMemberHealthCheckResult.pm | Perl | apache-2.0 | 459 |
min([First|Rest], Min) :- accMin(Rest, First, Min).
accMin([], Accumulator, Accumulator).
accMin([First|Rest], Accumulator, Min) :- First < Accumulator -> accMin(Rest, First, Min) ; accMin(Rest, Accumulator, Min).
| JoachimVandersmissen/CodingSolutions | prolog/LearnPrologNow/chapter5/practical1.pl | Perl | apache-2.0 | 214 |
:- module(tk_test_aux, [
hello/0,
factorial/2,
show/1,
quit/0],
[objects]).
hello :-
display('Hello !!!!'),
nl.
show(X) :-
display(X),
nl.
factorial(N2,X) :-
factorial_aux(N2,X).
%% display(X),
%% nl.
factorial_aux(0,1).
factorial_aux(N,X1) :-
N > 0,
N1 is N - 1,
factorial_aux(N1,X2),
X1 is X2 * N.
quit.
| leuschel/ecce | www/CiaoDE/ciao/library/tcltk/examples/tk_test_aux.pl | Perl | apache-2.0 | 445 |
#!/usr/bin/perl -w
#####
#
# bin2png.pl
#
#####
#####
#
# includes
#
#####
use File::Basename;
#####
#
# main
#
#####
if ( @ARGV == 0 ) {
print STDERR "usage: bin2png <file.bin>+\n";
} else {
while ( @ARGV ) {
my $file = shift @ARGV;
process_file($file);
}
}
#####
#
# process_file
#
#####
sub process_file {
my $file = shift;
my $dest = $file . ".png";
open INPUT, $file || die "Could not open $file: $!";
open OUTPUT, ">$dest" || die "Could not create $dest: $!";
while ( <INPUT> ) {
chomp;
s/^([0-9]+).*$/$1/g;
print OUTPUT chr($_);
}
close OUTPUT;
close INPUT;
}
| thelonious/svg-2d | utilities/js_png/tests/bin2png.pl | Perl | bsd-3-clause | 666 |
#!/usr/bin/env perl
use Getopt::Long;
use Pod::Usage;
use FindBin;
use lib "$FindBin::Bin/../lib";
use Blast qw (parse_xml revcomp_hsp);
use Genbank qw (parse_genbank get_sequence);
use Subfunctions qw (parse_fasta reverse_complement split_seq find_sequences consensus_str);
use File::Temp qw (tempfile);
use Data::Dumper;
use YAML::Tiny;
my $help = 0;
my $outfile = "";
my $reffile = "";
my $contigfile = "";
my $join = 0;
if (@ARGV == 0) {
pod2usage(-verbose => 1);
}
GetOptions ('reffile=s' => \$reffile,
'contigfile=s' => \$contigfile,
'outfile=s' => \$outfile,
'join' => \$join,
'help|?' => \$help) or pod2usage(-msg => "GetOptions failed.", -exitval => 2);
if ($help){
pod2usage(-verbose => 1);
}
if ($reffile eq "") {
pod2usage(-verbose => 1, -msg => "need to specify reference file");
}
if ($contigfile eq "") {
pod2usage(-verbose => 1, -msg => "need to specify contig file");
}
if ($outfile eq "") {
pod2usage(-verbose => 1, -msg => "need to specify output path");
}
my $refseq = "";
if ($reffile =~ /\.gb$/) {
my $gb = parse_genbank($reffile);
$refseq = get_sequence($gb);
} else {
my ($ref_hash, $ref_array) = parse_fasta($reffile);
$refseq = $ref_hash->{@$ref_array[0]};
}
my $reflen = length ($refseq);
my ($reffh, $refseqfile) = tempfile();
print $reffh ">reference\n$refseq\n";
close $reffh;
print "finding inverted repeats\n";
my ($fh, $refblast) = tempfile();
system("blastn -query $refseqfile -subject $refseqfile -outfmt 5 -out $refblast.xml -evalue 1e-90");
my $self_array = parse_xml ("$refblast.xml");
my @irs = ();
foreach my $hit (@$self_array) {
my @hsps = sort order_by_query_start @{$hit->{"hsps"}};
foreach my $hsp (@hsps) {
# only look at identical pieces that are smaller than the entire reference
if ((($hsp->{"query-to"} - $hsp->{"query-from"}) < ($reflen - 1)) && (($hsp->{"query-to"} - $hsp->{"query-from"}) > 10000)) {
push @irs, $hsp;
}
}
}
if (@irs > 2) {
die "Error! There seem to be more than two inverted repeats. Are you sure this is a plastome sequence?";
}
my $curr_pos = 1;
my $regions = ();
my $regions_hash = {};
# LSC goes from 1 to the start of @$irs[0] - 1:
my $region = {};
$regions_hash->{"LSC"} = $region;
$region->{"name"} = "LSC";
$region->{"start"} = 1;
$region->{"end"} = $irs[0]->{"query-from"} - 1;
(undef, $region->{"sequence"}, undef) = split_seq ($refseq, $region->{"start"}, $region->{"end"});
push @$regions, $region;
# IRB goes from the start of @$irs[0] to the end of @$irs[0] (inclusive):
$region = {};
$regions_hash->{"IRB"} = $region;
$region->{"name"} = "IRB";
$region->{"sequence"} = $irs[0]{"hseq"};
$region->{"start"} = $irs[0]->{"query-from"};
$region->{"end"} = $irs[0]->{"query-to"};
push @$regions, $region;
# SSC goes from the end of @$irs[0] + 1 to the start of @$irs[1] - 1:
$region = {};
$regions_hash->{"SSC"} = $region;
$region->{"name"} = "SSC";
$region->{"start"} = $irs[0]->{"query-to"} + 1;
$region->{"end"} = $irs[1]->{"query-from"} - 1;
(undef, $region->{"sequence"}, undef) = split_seq ($refseq, $region->{"start"}, $region->{"end"});
push @$regions, $region;
# IRA goes from the start of @$irs[1] to the end of @$irs[1] (inclusive):
$region = {};
$regions_hash->{"IRA"} = $region;
$region->{"name"} = "IRA";
$region->{"sequence"} = $irs[1]{"hseq"};
$region->{"start"} = $irs[1]->{"query-from"};
$region->{"end"} = $irs[1]->{"query-to"};
push @$regions, $region;
my ($fh, $refregions) = tempfile();
foreach $region (@$regions) {
print $fh ">" . $region->{"name"} . "\n" . $region->{"sequence"}. "\n";
# clean up the region hash for later use.
delete $region->{"sequence"};
# set up a hash value to receive the hits when we get them.
$region->{"hits"} = ();
$region->{"length"} = $region->{"end"} - $region->{"start"} + 1;
}
close $fh;
if (-e "$outfile.xml") {
print "skipping blastn\n";
} else {
print "running blastn\n";
system("blastn -query $contigfile -subject $refregions -outfmt 5 -out $outfile.xml -culling_limit 1 -evalue 1e-70");
}
print "parsing results\n";
my $hit_array = parse_xml ("$outfile.xml");
my @hit_list = ();
foreach my $hit (@$hit_array) {
# each hit represents a contig that we want to assign to a region.
my $contig = {};
$contig->{"name"} = $hit->{"query"}->{"name"};
$contig->{"length"} = $hit->{"query"}->{"length"};
push @hit_list, $contig->{"name"};
# push it into the appropriate region's bucket of hits.
my $region = $hit->{"subject"}->{"name"};
push @{$regions_hash->{$region}->{"hits"}}, $contig;
$contig->{"region"} = $region;
# each hsp represents a matching segment of this contig to this region.
foreach my $hsp (@{$hit->{"hsps"}}) {
if ($hsp->{"hit-from"} > $hsp->{"hit-to"}) {
# tag this contig as being revcomped, so we can fix it when we deal with whole contigs.
$contig->{"revcomp"} = " (reverse complement)";
}
}
# consolidate all of the matching segments into one large overall match.
my @query_ends = ();
my @hit_ends = ();
foreach my $hsp (@{$hit->{"hsps"}}) {
push @query_ends, $hsp->{"query-from"};
push @query_ends, $hsp->{"query-to"};
push @hit_ends, $hsp->{"hit-from"};
push @hit_ends, $hsp->{"hit-to"};
}
@query_ends = sort {$a <=> $b} @query_ends;
@hit_ends = sort {$a <=> $b} @hit_ends;
my $regoffset = $regions_hash->{$region}->{"start"} - 1;
$contig->{"hit-from"} = $hit_ends[0] + $regoffset;
$contig->{"hit-to"} = $hit_ends[@hit_ends-1] + $regoffset;
$contig->{"query-from"} = $query_ends[0];
$contig->{"query-to"} = $query_ends[@query_ends-1];
}
open OUTFH, ">", "$outfile.raw.yml";
print OUTFH YAML::Tiny->Dump(@$hit_array);
close OUTFH;
# put the sequences for the matching contigs back into the output hash.
my $contig_seqs = find_sequences ($contigfile, \@hit_list);
# write these best seqs out:
open OUTFH, ">", "$outfile.best.fasta";
foreach my $key (keys %$contig_seqs) {
print "$key\n";
print OUTFH ">$key\n";
print OUTFH $contig_seqs->{$key} . "\n";
}
close OUTFH;
my @all_hits = ();
foreach $region (@$regions) {
foreach my $contig (@{$region->{"hits"}}) {
$contig->{"sequence"} = $contig_seqs->{$contig->{"name"}};
if (exists $contig->{"revcomp"}) {
delete $contig->{"revcomp"};
$contig->{"sequence"} = reverse_complement ($contig->{"sequence"});
$contig->{"name"} .= "_rc";
# flip the query's indices: the q-from is now going to be (length - q-from) and the q-to is (length - q-to)
my $old_qto = $contig->{"query-to"};
$contig->{"query-to"} = $contig->{"length"} - $contig->{"query-from"};
$contig->{"query-from"} = $contig->{"length"} - $old_qto;
}
# do some cleanup of the hit and query windows.
# each contig's putative hit span is from the amount of query extending before the start of the match (hit-from), which is (hit-from - query-from), plus whatever portion of its length is beyond that (length - query-from + hit-from)
$contig->{"hit-to"} = $contig->{"hit-from"} - $contig->{"query-from"} + $contig->{"length"};
$contig->{"hit-from"} = $contig->{"hit-from"} - $contig->{"query-from"};
# print "cleaning up " . $contig->{"name"} . ", has length " . $contig->{"length"} . " and its " . $contig->{"query-from"} . "-" . $contig->{"query-to"} . " covers the ref " . $contig->{"hit-from"} . "-" . $contig->{"hit-to"} . "\n";
}
my @ordered_hits = sort order_by_hit_start @{$region->{"hits"}};
push @all_hits, @ordered_hits;
$region->{"hits"} = \@ordered_hits;
}
open OUTFH, ">", "$outfile.yml";
print OUTFH YAML::Tiny->Dump(@$regions);
close OUTFH;
# do the contigs connect to each other?
my @final_contigs = ();
my $first_hit = shift @all_hits;
push @final_contigs, $first_hit;
# compare the end of the last contig in final_contigs to the start of the next contig in all_hits
# while there's still anything left in all_hits
while (@all_hits > 0) {
# first contig to compare is the last one in final_contigs
my $contig_seq1 = @final_contigs[@final_contigs - 1];
# second contig to compare is the next unanalyzed one from all_hits
my $contig_seq2 = shift @all_hits;
print "comparing " . $contig_seq1->{"name"} . ", maps " . $contig_seq1->{"hit-from"}. "-" . $contig_seq1->{"hit-to"} . ", to " . $contig_seq2->{"name"} . ", maps " . ($contig_seq2->{"hit-to"} - $contig_seq2->{"length"}) ."-". $contig_seq2->{"hit-to"} . "\n";
# if the second contig's putative hit range is within the first, drop it.
if ($contig_seq2->{"hit-to"} <= $contig_seq1->{"hit-to"}) {
next;
}
# compare these two contigs' ends
print "can we meld these contigs? ";
(my $fh1, my $contig1) = tempfile();
my $contig_end = $contig_seq1->{"sequence"};
if ($contig_seq1->{"sequence"} =~ /^(.*)(.{50})$/) {
$contig_seq1->{"sequence"} = $1;
$contig_end = $2;
}
print $fh1 ">" . $contig_seq1->{"name"} . "_end\n$contig_end\n";
my $contig_start = $contig_seq2->{"sequence"};
if ($contig_seq2->{"sequence"} =~ /^(.{50})/) {
$contig_start = $1;
}
print $fh1 ">" . $contig_seq2->{"name"} . "_start\n$contig_start\n";
close $fh1;
(undef, my $temp_out) = tempfile(OPEN => 0);
system ("mafft --retree 2 --maxiterate 0 --op 10 $contig1 > $temp_out 2>/dev/null");
# the resulting sequences are a match if the consensus sequence has few ambiguous characters.
(my $aligned_bits, my $alignarray) = parse_fasta($temp_out);
my @seqs = ();
foreach my $k (@$alignarray) {
push @seqs, $aligned_bits->{$k};
}
my $cons_seq = consensus_str(\@seqs);
my @ambigs = $cons_seq =~ m/[NMRWSYKVHDB]/g;
if (@ambigs < 5) { # if there are less than 5 ambiguities when we align them...
# meld the sequence:
$contig_seq1->{"sequence"} = $contig_seq1->{"sequence"} . $cons_seq . $contig_seq2->{"sequence"};
# update the parameters for the newly-melded contig.
$contig_seq1->{"name"} = $contig_seq1->{"name"} . "+" . $contig_seq2->{"name"};
$contig_seq1->{"region"} = $contig_seq1->{"region"} . "+" . $contig_seq2->{"region"};
$contig_seq1->{"length"} = length $contig_seq1->{"sequence"};
$contig_seq1->{"hit-to"} = $contig_seq2->{"hit-to"};
$contig_seq1->{"query-to"} = "" . ($contig_seq1->{"length"} - $contig_seq1->{"query-from"});
print "yes, new length is " . $contig_seq1->{"length"} . ", covers " . $contig_seq1->{"hit-from"} . "-" . $contig_seq1->{"hit-to"} . "\n";
} elsif (($contig_seq2->{"hit-from"} - $contig_seq1->{"hit-to"}) < 300) {
# if the two contigs' hit ends are within 100 bp of each other, scaffold them together by adding Ns
$contig_seq1->{"sequence"} .= "N" x ($contig_seq2->{"hit-from"} - $contig_seq1->{"hit-to"}) . $contig_seq2->{"sequence"};
# update the parameters for the newly-melded contig.
$contig_seq1->{"name"} = $contig_seq1->{"name"} . "+" . $contig_seq2->{"name"};
$contig_seq1->{"region"} = $contig_seq1->{"region"} . "+" . $contig_seq2->{"region"};
$contig_seq1->{"length"} = length $contig_seq1->{"sequence"};
$contig_seq1->{"hit-to"} = $contig_seq2->{"hit-to"};
$contig_seq1->{"query-to"} = "" . ($contig_seq1->{"length"} - $contig_seq1->{"query-from"});
print "maybe? new length is " . $contig_seq1->{"length"} . ", covers " . $contig_seq1->{"hit-from"} . "-" . $contig_seq1->{"hit-to"} . "\n";
} else {
# if not meldable, push the second contig onto the final contigs too.
$contig_seq1->{"sequence"} .= $contig_end;
$contig_seq2->{"sequence"} = $contig_start . $contig_seq2->{"sequence"};
push @final_contigs, $contig_seq2;
print "no, span too large: " . $contig_seq2->{"hit-from"} ."-". $contig_seq1->{"hit-to"} . " = ". ($contig_seq2->{"hit-from"} - $contig_seq1->{"hit-to"}) . "\n";
}
}
my $final_len = 0;
foreach my $c (@final_contigs) {
$final_len += $c->{"length"};
}
print "final assembly has " . @final_contigs . " contigs, total length $final_len\n";
open OUTFH, ">", "$outfile.final.yml";
print OUTFH YAML::Tiny->Dump(@final_contigs);
close OUTFH;
open OUTFH, ">", "$outfile.draft.fasta";
if ($join) {
print OUTFH ">$outfile.draft.fasta\n";
foreach my $c (@final_contigs) {
print OUTFH $c->{"sequence"} . "NNNNNNNNNNN";
}
} else {
foreach my $c (@final_contigs) {
print OUTFH ">" . $c->{"name"} . "\n" . $c->{"sequence"} . "\n";
}
}
close OUTFH;
### Sorting functions
# if $a starts earlier than $b, return -1
sub order_by_hit_start {
my $bstart = $b->{"hit-from"};
my $astart = $a->{"hit-from"};
if ($astart < $bstart) { return -1; }
if ($astart > $bstart) { return 1; }
return 0;
}
sub order_by_ref_start {
my $bstart = $b->{"hit-from"};
my $astart = $a->{"hit-from"};
if ($astart < $bstart) { return -1; }
if ($astart > $bstart) { return 1; }
return 0;
}
sub order_by_query_start {
my $bstart = $b->{"query-from"};
my $astart = $a->{"query-from"};
if ($astart < $bstart) { return -1; }
if ($astart > $bstart) { return 1; }
return 0;
}
__END__
=head1 NAME
contigs_to_cp.pl
=head1 SYNOPSIS
contigs_to_cp.pl [-reffile reffile] [-contigfile contigfile] [-outputfile outputfile]
=head1 OPTIONS
-reffile: genbank or fasta file of reference plastome
-contigfile: fasta file of putative cp contigs
-outputfile: name of output file
=head1 DESCRIPTION
Aligns a list of putative cp contigs along a reference plastome. Outputs a YAML file of the best-matching contigs, in order.
=cut
| daisieh/phylogenomics | plastome/contigs_to_cp.pl | Perl | bsd-3-clause | 13,177 |
<?php
include 'CommonAssetsInit.lang.pl';
$APPLICATION_REFERENCE_IS_WRONG = 'Nie przekazałeś referencji do modułu [app::$application].';
$PASS_VALID_REFERENCE = 'Przekaż prawidłową reference do modułu [app::$application]. Aktualnie znajduje się tam <b>%s</b>.';
$CORRECT_REFERENCE_HERE = 'Popraw przekazywaną referencje.'; | deArcane/framework | src/Assets/_Exception/Init/RequireApplication.lang.pl | Perl | mit | 332 |
#------------------------------------------------------------------------------
# File: Leaf.pm
#
# Description: Read Creo Leaf EXIF meta information
#
# Revisions: 09/28/2005 - P. Harvey Created
#------------------------------------------------------------------------------
package Image::ExifTool::Leaf;
use strict;
use vars qw($VERSION);
use Image::ExifTool qw(:DataAccess :Utils);
use Image::ExifTool::Exif;
$VERSION = '1.07';
sub ProcessLeaf($$$);
%Image::ExifTool::Leaf::Main = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Camera' },
NOTES => q{
These tags are found in .MOS images from Leaf digital camera backs as
written by Creo Leaf Capture. They exist within the Leaf-specific directory
structure of EXIF tag 0x8606. The tables below list observed Leaf tags,
however ExifTool will extract any tags found in the Leaf directories even if
they don't appear in these tables.
},
icc_camera_profile => {
Name => 'ICC_Profile',
SubDirectory => {
TagTable => 'Image::ExifTool::ICC_Profile::Main',
},
},
icc_rgb_ws_profile => {
Name => 'RGB_Profile',
SubDirectory => {
TagTable => 'Image::ExifTool::ICC_Profile::Main',
},
},
camera_profile => {
Name => 'CameraProfile',
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::CameraProfile',
},
},
JPEG_preview_data => {
%Image::ExifTool::previewImageTagInfo,
Groups => { 2 => 'Preview' },
},
JPEG_preview_info => 'PreviewInfo',
icc_camera_to_tone_space_flow => {
Name => 'ToneSpaceFlow',
Description => 'ICC To Tone Space Flow',
Format => 'int16u',
},
icc_camera_to_tone_matrix => {
Name => 'ToneMatrix',
Description => 'ICC To Tone Matrix',
Format => 'int8u',
Binary => 1,
},
PDA_histogram_data => {
Name => 'PDAHistogram',
Binary => 1,
},
pattern_ratation_angle => {
Name => 'PatternAngle',
Description => 'Pattern Rotation Angle',
Format => 'int16u',
Notes => '"ratation" is not a typo',
},
back_serial_number => {
Name => 'BackSerial',
Description => 'Back Serial Number',
PrintConv => '$val =~ s/ .*//s; $val',
},
image_offset => { Format => 'int16u' },
);
%Image::ExifTool::Leaf::CameraProfile = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Camera' },
CamProf_version => 'CameraProfileVersion',
CamProf_name => 'CameraName',
CamProf_type => 'CameraType',
CamProf_back_type => 'CameraBackType',
CamProf_back_type => {
Name => 'CameraBackType',
},
CamProf_capture_profile => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::CaptureProfile',
},
},
CamProf_image_profile => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::ImageProfile',
},
},
);
%Image::ExifTool::Leaf::CaptureProfile = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
CaptProf_version => {},
CaptProf_name => {},
CaptProf_type => {},
CaptProf_back_type => {},
CaptProf_serial_number => {
Name => 'CaptureSerial',
Description => 'Capture Serial Number',
PrintConv => '$val =~ s/ .*//s; $val',
},
CaptProf_image_offset => {},
CaptProf_luminance_consts => {},
CaptProf_xy_offset_info => 'XYOffsetInfo',
CaptProf_color_matrix => {},
CaptProf_reconstruction_type=> {},
CaptProf_image_fields => {},
CaptProf_image_bounds => {},
CaptProf_number_of_planes => {},
CaptProf_raw_data_rotation => {},
CaptProf_color_averages => {},
CaptProf_mosaic_pattern => {},
CaptProf_dark_correction_type=>{},
CaptProf_right_dark_rect => {},
CaptProf_left_dark_rect => {},
CaptProf_center_dark_rect => {},
CaptProf_CCD_rect => {},
CaptProf_CCD_valid_rect => {},
CaptProf_CCD_video_rect => {},
);
%Image::ExifTool::Leaf::ImageProfile = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
ImgProf_version => {},
ImgProf_name => {},
ImgProf_type => {},
ImgProf_back_type => {},
ImgProf_shoot_setup => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::ShootSetup',
},
},
ImgProf_image_status => {},
ImgProf_rotation_angle => {},
);
%Image::ExifTool::Leaf::ShootSetup = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
ShootObj_version => {},
ShootObj_name => {},
ShootObj_type => {},
ShootObj_back_type => {},
ShootObj_capture_setup => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::CaptureSetup',
},
},
ShootObj_color_setup => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::ColorSetup',
},
},
ShootObj_save_setup => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::SaveSetup',
},
},
ShootObj_camera_setup => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::CameraSetup',
},
},
ShootObj_look_header => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::LookHeader',
},
},
);
%Image::ExifTool::Leaf::CaptureSetup = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
CaptureObj_version => {},
CaptureObj_name => {},
CaptureObj_type => {},
CaptureObj_back_type => {},
CaptureObj_neutals => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::Neutrals',
},
},
CaptureObj_selection => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::Selection',
},
},
CaptureObj_tone_curve => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::ToneCurve',
},
},
CaptureObj_sharpness => {
SubDirectory => {
TagTable => 'Image::ExifTool::Leaf::Sharpness',
},
},
CaptureObj_single_quality => {},
CaptureObj_Multi_quality => {},
);
%Image::ExifTool::Leaf::Neutrals = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
NeutObj_version => {},
NeutObj_name => {},
NeutObj_type => {},
NeutObj_back_type => {},
NeutObj_neutrals => {},
NeutObj_color_casts => {},
NeutObj_shadow_end_points => {},
NeutObj_highlight_end_points => {},
);
%Image::ExifTool::Leaf::Selection = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
SelObj_version => {},
SelObj_name => {},
SelObj_type => {},
SelObj_back_type => {},
SelObj_rect => {},
SelObj_resolution => {},
SelObj_scale => {},
SelObj_locks => {},
SelObj_orientation => {},
);
%Image::ExifTool::Leaf::ToneCurve = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
ToneObj_version => {},
ToneObj_name => {},
ToneObj_type => {},
ToneObj_back_type => {},
ToneObj_npts => {},
ToneObj_tones => {},
ToneObj_gamma => {},
);
%Image::ExifTool::Leaf::Sharpness = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
SharpObj_version => {},
SharpObj_name => {},
SharpObj_type => {},
SharpObj_back_type => {},
SharpObj_sharp_method => {},
SharpObj_data_len => {},
SharpObj_sharp_info => {},
);
%Image::ExifTool::Leaf::ColorSetup = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Image' },
ColorObj_version => {},
ColorObj_name => {},
ColorObj_type => {},
ColorObj_back_type => {},
ColorObj_has_ICC => {},
ColorObj_input_profile => {},
ColorObj_output_profile => {},
ColorObj_color_mode => {},
ColorObj_color_type => {},
);
%Image::ExifTool::Leaf::SaveSetup = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Other' },
SaveObj_version => {},
SaveObj_name => {},
SaveObj_type => {},
SaveObj_back_type => {},
SaveObj_leaf_auto_active=> {},
SaveObj_leaf_hot_folder => {},
SaveObj_leaf_output_file_type => {},
SaveObj_leaf_auto_base_name => {},
SaveObj_leaf_save_selection => {},
SaveObj_leaf_open_proc_HDR => {},
SaveObj_std_auto_active => {},
SaveObj_std_hot_folder => {},
SaveObj_std_output_file_type => {},
SaveObj_std_output_color_mode => {},
SaveObj_std_output_bit_depth => {},
SaveObj_std_base_name => {},
SaveObj_std_save_selection => {},
SaveObj_std_oxygen => {},
SaveObj_std_open_in_photoshop => {},
SaveObj_std_scaled_output => {},
SaveObj_std_sharpen_output => {},
);
%Image::ExifTool::Leaf::CameraSetup = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Camera' },
CameraObj_version => {},
CameraObj_name => {},
CameraObj_type => {},
CameraObj_back_type => {},
CameraObj_ISO_speed => {},
CameraObj_strobe => {},
CameraObj_camera_type => {},
CameraObj_lens_type => {},
CameraObj_lens_ID => {},
);
%Image::ExifTool::Leaf::LookHeader = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Other' },
LookHead_version => {},
LookHead_name => {},
LookHead_type => {},
LookHead_back_type => {},
);
# tag table for any unknown Leaf directories
%Image::ExifTool::Leaf::Unknown = (
PROCESS_PROC => \&ProcessLeaf,
GROUPS => { 0 => 'Leaf', 2 => 'Unknown' },
);
# table for Leaf SubIFD entries
%Image::ExifTool::Leaf::SubIFD = (
GROUPS => { 0 => 'MakerNotes', 1 => 'LeafSubIFD', 2 => 'Image'},
WRITE_PROC => \&Image::ExifTool::Exif::WriteExif,
NOTES => q{
Leaf writes a TIFF-format sub-IFD inside IFD0 of a MOS image. No tags in
this sub-IFD are currently known, except for tags 0x8602 and 0x8606 which
really shouldn't be here anyway (so they don't appear in the table below)
because they duplicate references to the same data from tags with the same
ID in IFD0.
},
);
# prepare Leaf tables by generating tag 'Name' and table 'NOTES'
{
my @tableList = ( 'Image::ExifTool::Leaf::Main' );
my ($tag, %doneTable);
# keep prefix in tag name of common tags
my %keepPrefix = ( Version=>1, Name=>1, Type=>1, BackType=>1 );
while (@tableList) {
my $table = shift @tableList;
next if $doneTable{$table};
my $prefix = ($table =~ /::Main$/) ? undef : '';
$doneTable{$table} = 1;
no strict 'refs';
$table = \%$table;
use strict 'refs';
foreach $tag (keys %$table) {
my $tagInfo = $$table{$tag};
next unless ref $tagInfo eq 'HASH';
next if $tag eq 'GROUPS';
if (defined $prefix and not $prefix) {
($prefix = $tag) =~ s/_.*//;
}
unless ($$tagInfo{Name}) {
my $name;
($name = $tag) =~ s/_(.)/\U$1/g;
if ($prefix) {
$name =~ s/^$prefix//;
$name = $prefix . $name if $keepPrefix{$name};
}
$$tagInfo{Name} = ucfirst($name);
}
next unless $$tagInfo{SubDirectory};
my $subTable = $tagInfo->{SubDirectory}->{TagTable};
next unless $subTable =~ /::Leaf::/;
push @tableList, $subTable;
}
next unless $prefix;
$$table{NOTES} = "All B<Tag ID>'s in the following table have a " .
"leading '${prefix}_' which\nhas been removed.\n";
}
}
#------------------------------------------------------------------------------
# Process Leaf information
# Inputs: 0) ExifTool object reference
# 1) Reference to directory information hash
# 2) Pointer to tag table for this directory
# Returns: 1 on success, otherwise returns 0 and sets a Warning
sub ProcessLeaf($$$)
{
my ($et, $dirInfo, $tagTablePtr) = @_;
my $dataPt = $$dirInfo{DataPt};
my $dirStart = $$dirInfo{DirStart} || 0;
my $dirLen = $$dirInfo{DirLen} || $$dirInfo{DataLen} - $dirStart;
my $dirEnd = $dirStart + $dirLen;
my $verbose = $et->Options('Verbose');
my $pos = $dirStart;
my $hdrLen = 52; # header length for PKTS information
my $success;
$verbose and $et->VerboseDir('Leaf');
for (;;) {
last if $pos + $hdrLen > $dirEnd;
my $header = substr($$dataPt, $pos, $hdrLen);
last unless substr($header, 0, 4) eq 'PKTS';
$success = 1;
my $size = Get32u(\$header, 48);
$pos += $hdrLen;
if ($pos + $size > $dirEnd) {
$et->Warn('Truncated Leaf data');
last;
}
my $tag = substr($header, 8, 40);
$tag =~ s/\0.*//s;
next unless $tag;
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
# generate tag info for unknown tags
my $val;
if ($tagInfo and $$tagInfo{Format}) {
$val = ReadValue($dataPt, $pos, $$tagInfo{Format}, undef, $size);
} else {
$val = substr($$dataPt, $pos, $size);
}
unless ($tagInfo) {
my $name = ucfirst($tag);
$name =~ s/_(.)/\U$1/g;
if ($val =~ /^PKTS\0\0\0\x01/) {
# also unpack unknown directories
$tagInfo = {
Name => $name,
SubDirectory => { TagTable => 'Image::ExifTool::Leaf::Unknown' },
};
} elsif ($tagTablePtr ne \%Image::ExifTool::Leaf::Main or
$et->Options('Unknown'))
{
$tagInfo = {
Name => $name,
Writable => 0,
PrintConv => 'length($val) > 60 ? substr($val,0,55) . "[...]" : $val',
};
# make tags in main table unknown because they tend to be binary
$$tagInfo{Unknown} = 1 if $tagTablePtr eq \%Image::ExifTool::Leaf::Main;
}
$tagInfo and AddTagToTable($tagTablePtr, $tag, $tagInfo);
}
if ($verbose) {
$et->VerboseInfo($tag, $tagInfo,
Table => $tagTablePtr,
Value => $val,
DataPt => $dataPt,
DataPos => $$dirInfo{DataPos},
Size => $size,
Start => $pos,
);
}
if ($tagInfo) {
if ($$tagInfo{SubDirectory}) {
my %subdirInfo = (
DataPt => $dataPt,
DirLen => $size,
DirStart => $pos,
DataPos => $$dirInfo{DataPos},
DirName => 'Leaf PKTS',
);
my $subTable = GetTagTable($tagInfo->{SubDirectory}->{TagTable});
$et->ProcessDirectory(\%subdirInfo, $subTable);
} else {
$val =~ tr/\n/ /; # translate newlines to spaces
$val =~ s/\0+$//; # remove null terminators
$et->FoundTag($tagInfo, $val);
}
}
$pos += $size;
}
$success or $et->Warn('Bad format Leaf data');
return $success;
}
1; # end
__END__
=head1 NAME
Image::ExifTool::Leaf - Read Creo Leaf EXIF meta information
=head1 SYNOPSIS
This module is loaded automatically by Image::ExifTool when required.
=head1 DESCRIPTION
This module contains definitions required by Image::ExifTool to interpret
meta information from Leaf digital camera backs written by Creo Leaf
Capture.
=head1 AUTHOR
Copyright 2003-2019, Phil Harvey (phil at owl.phy.queensu.ca)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 SEE ALSO
L<Image::ExifTool::TagNames/Leaf Tags>,
L<Image::ExifTool(3pm)|Image::ExifTool>
=cut
| philmoz/Focus-Points | focuspoints.lrdevplugin/bin/exiftool/lib/Image/ExifTool/Leaf.pm | Perl | apache-2.0 | 16,738 |
package OpenXPKI::Server::Workflow::Validator::PasswordQuality;
use strict;
use warnings;
use base qw( Workflow::Validator );
use Workflow::Exception qw( validation_error );
use OpenXPKI::Server::Context qw( CTX );
use OpenXPKI::Debug;
use English;
use Data::Password qw(:all);
sub _init {
my ( $self, $params ) = @_;
# set up Data::Password options from validator configuration
# file
if (exists $params->{DICTIONARY}) {
$DICTIONARY = $params->{DICTIONARY};
}
if (exists $params->{FOLLOWING}) {
$FOLLOWING = $params->{FOLLOWING};
}
if (exists $params->{'FOLLOWING_KEYBOARD'}) {
$FOLLOWING_KEYBOARD = $params->{'FOLLOWING_KEYBOARD'};
}
if (exists $params->{'GROUPS'}) {
$GROUPS = $params->{GROUPS};
}
if (exists $params->{'MINLEN'}) {
$MINLEN = $params->{MINLEN};
}
if (exists $params->{MAXLEN}) {
$MAXLEN = $params->{MAXLEN};
}
if (exists $params->{DICTIONARIES}) {
@DICTIONARIES = split(/,/, $params->{DICTIONARIES});
}
}
sub validate {
my ( $self, $wf, $password ) = @_;
## prepare the environment
if (my $reason = IsBadPassword($password)) {
##! 16: 'bad password entered: ' . $reason
validation_error("I18N_OPENXPKI_SERVER_WORKFLOW_VALIDATOR_PASSWORD_QUALITY_BAD_PASSWORD");
}
return 1;
}
1;
__END__
=head1 NAME
OpenXPKI::Server::Workflow::Validator::PasswordQuality
=head1 SYNOPSIS
<action name="CreateCSR">
<validator name="PasswordQuality"
class="OpenXPKI::Server::Workflow::Validator::PasswordQuality">
<arg value="$_password"/>
</validator>
</action>
=head1 DESCRIPTION
This validator checks a password for its quality using the
Data::Password module. All configuration that is possible for
Data::Password can be done using the validator config file as well.
Based on this data, the validator fails if it believes the password
to be bad.
| mrscotty/openxpki | core/server/OpenXPKI/Server/Workflow/Validator/PasswordQuality.pm | Perl | apache-2.0 | 1,940 |
#------------------------------------------------------------------------------
# File: PDF.pm
#
# Description: Read PDF meta information
#
# Revisions: 07/11/2005 - P. Harvey Created
# 07/25/2005 - P. Harvey Add support for encrypted documents
#
# References: 1) http://www.adobe.com/devnet/pdf/pdf_reference.html
# 2) http://search.cpan.org/dist/Crypt-RC4/
# 3) http://www.adobe.com/devnet/acrobat/pdfs/PDF32000_2008.pdf
# 4) http://www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/adobe_supplement_iso32000.pdf
# 5) http://tools.ietf.org/search/rfc3454
# 6) http://www.armware.dk/RFC/rfc/rfc4013.html
#------------------------------------------------------------------------------
package Image::ExifTool::PDF;
use strict;
use vars qw($VERSION $AUTOLOAD $lastFetched);
use Image::ExifTool qw(:DataAccess :Utils);
require Exporter;
$VERSION = '1.49';
sub FetchObject($$$$);
sub ExtractObject($$;$$);
sub ReadToNested($;$);
sub ProcessDict($$$$;$$);
sub ProcessAcroForm($$$$;$$);
sub ExpandArray($);
sub ReadPDFValue($);
sub CheckPDF($$$);
# $lastFetched - last fetched object reference (used for decryption)
# (undefined if fetched object was already decrypted, eg. object from stream)
my $cryptInfo; # encryption object reference (plus additional information)
my $cryptString; # flag that strings are encrypted
my $cryptStream; # flag that streams are encrypted
my $lastOffset; # last fetched object offset
my %streamObjs; # hash of stream objects
my %fetched; # dicts fetched in verbose mode (to avoid cyclical recursion)
my $pdfVer; # version of PDF file being processed
# filters supported in DecodeStream()
my %supportedFilter = (
'/FlateDecode' => 1,
'/Crypt' => 1,
'/Identity' => 1, # (not filtered)
'/DCTDecode' => 1, # (JPEG image - not filtered)
'/JPXDecode' => 1, # (Jpeg2000 image - not filtered)
'/LZWDecode' => 1, # (usually a bitmapped image)
'/ASCIIHexDecode' => 1,
'/ASCII85Decode' => 1,
# other standard filters that we currently don't support
#'/JBIG2Decode' => 0, # (JBIG2 image format not supported)
#'/CCITTFaxDecode' => 0,
#'/RunLengthDecode' => 0,
);
# tags in main PDF directories
%Image::ExifTool::PDF::Main = (
GROUPS => { 2 => 'Document' },
VARS => { CAPTURE => ['Main','Prev'] },
Info => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Info' },
# Adobe Acrobat 10.1.5 will create a duplicate Info dictionary with
# a different object number when metadata is edited. This flag
# is part of a patch to ignore this duplicate information (unless
# the IgnoreMinorErrors option is used)
IgnoreDuplicates => 1,
},
Root => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Root' },
},
Encrypt => {
NoProcess => 1, # don't process normally (processed in advance)
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Encrypt' },
},
_linearized => {
Name => 'Linearized',
Notes => 'flag set if document is linearized for fast web display; not a real Tag ID',
PrintConv => { 'true' => 'Yes', 'false' => 'No' },
},
);
# tags in PDF Info dictionary
%Image::ExifTool::PDF::Info = (
GROUPS => { 2 => 'Document' },
VARS => { CAPTURE => ['Info'] },
EXTRACT_UNKNOWN => 1, # extract all unknown tags in this directory
WRITE_PROC => \&Image::ExifTool::DummyWriteProc,
CHECK_PROC => \&CheckPDF,
WRITABLE => 'string',
# set PRIORITY to 0 so most recent Info dictionary takes precedence
# (Acrobat Pro bug? doesn't use same object/generation number for
# new Info dictionary when doing incremental update)
PRIORITY => 0,
NOTES => q{
As well as the tags listed below, the PDF specification allows for
user-defined tags to exist in the Info dictionary. These tags, which should
have corresponding XMP-pdfx entries in the XMP of the PDF XML Metadata
object, are also extracted by ExifTool.
B<Writable> specifies the value format, and may be C<string>, C<date>,
C<integer>, C<real>, C<boolean> or C<name> for PDF tags.
},
Title => { },
Author => { Groups => { 2 => 'Author' } },
Subject => { },
Keywords => { List => 'string' }, # this is a string list
Creator => { },
Producer => { },
CreationDate => {
Name => 'CreateDate',
Writable => 'date',
Groups => { 2 => 'Time' },
Shift => 'Time',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
ModDate => {
Name => 'ModifyDate',
Writable => 'date',
Groups => { 2 => 'Time' },
Shift => 'Time',
PrintConv => '$self->ConvertDateTime($val)',
PrintConvInv => '$self->InverseDateTime($val)',
},
Trapped => {
Protected => 1,
# remove leading '/' from '/True' or '/False'
ValueConv => '$val=~s{^/}{}; $val',
ValueConvInv => '"/$val"',
},
'AAPL:Keywords' => { #PH
Name => 'AppleKeywords',
List => 'array', # this is an array of values
Notes => q{
keywords written by Apple utilities, although they seem to use PDF:Keywords
when reading
},
},
);
# tags in the PDF Root document catalog
%Image::ExifTool::PDF::Root = (
GROUPS => { 2 => 'Document' },
# note: can't capture previous versions of Root since they are not parsed
VARS => { CAPTURE => ['Root'] },
NOTES => 'This is the PDF document catalog.',
MarkInfo => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::MarkInfo' },
},
Metadata => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Metadata' },
},
Pages => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Pages' },
},
Perms => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Perms' },
},
AcroForm => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::AcroForm' },
},
Lang => 'Language',
PageLayout => { },
PageMode => { },
Version => 'PDFVersion',
);
# tags extracted from the PDF Encrypt dictionary
%Image::ExifTool::PDF::Encrypt = (
GROUPS => { 2 => 'Document' },
NOTES => 'Tags extracted from the document Encrypt dictionary.',
Filter => {
Name => 'Encryption',
Notes => q{
extracted value is actually a combination of the Filter, SubFilter, V, R and
Length information from the Encrypt dictionary
},
},
P => {
Name => 'UserAccess',
ValueConv => '$val & 0x0f3c', # ignore reserved bits
PrintConvColumns => 2,
PrintConv => { BITMASK => {
2 => 'Print',
3 => 'Modify',
4 => 'Copy',
5 => 'Annotate',
8 => 'Fill forms',
9 => 'Extract',
10 => 'Assemble',
11 => 'Print high-res',
}},
},
);
# tags in PDF Pages dictionary
%Image::ExifTool::PDF::Pages = (
GROUPS => { 2 => 'Document' },
Count => 'PageCount',
Kids => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Kids' },
},
);
# tags in PDF Perms dictionary
%Image::ExifTool::PDF::Perms = (
NOTES => 'Additional document permissions imposed by digital signatures.',
DocMDP => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Signature' },
},
FieldMDP => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Signature' },
},
UR3 => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Signature' },
},
);
# tags in PDF Perms dictionary
%Image::ExifTool::PDF::AcroForm = (
PROCESS_PROC => \&ProcessAcroForm,
_has_xfa => {
Name => 'HasXFA',
Notes => q{
this tag is defined if a document contains form fields, and is true if it
uses XML Forms Architecture; not a real Tag ID
},
PrintConv => { 'true' => 'Yes', 'false' => 'No' },
},
);
# tags in PDF Kids dictionary
%Image::ExifTool::PDF::Kids = (
Metadata => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Metadata' },
},
PieceInfo => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::PieceInfo' },
},
Resources => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Resources' },
},
Kids => {
Condition => '$self->Options("ExtractEmbedded")',
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Kids' },
},
);
# tags in PDF Resources dictionary
%Image::ExifTool::PDF::Resources = (
ColorSpace => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::ColorSpace' },
},
XObject => {
Condition => '$self->Options("ExtractEmbedded")',
SubDirectory => { TagTable => 'Image::ExifTool::PDF::XObject' },
},
Properties => {
Condition => '$self->Options("ExtractEmbedded")',
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Properties' },
},
);
# tags in PDF ColorSpace dictionary
%Image::ExifTool::PDF::ColorSpace = (
DefaultRGB => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::DefaultRGB' },
ConvertToDict => 1, # (not seen yet, but just in case)
},
DefaultCMYK => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::DefaultRGB' },
# hack: this is stored as an array instead of a dictionary in my
# sample, so convert to a dictionary to extract the ICCBased element
ConvertToDict => 1,
},
Cs1 => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Cs1' },
},
);
# tags in PDF DefaultRGB dictionary
%Image::ExifTool::PDF::DefaultRGB = (
ICCBased => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::ICCBased' },
},
);
# tags in PDF Cs1 dictionary
%Image::ExifTool::PDF::Cs1 = (
_stream => {
SubDirectory => { TagTable => 'Image::ExifTool::ICC_Profile::Main' },
},
);
# tags in PDF ICCBased dictionary
%Image::ExifTool::PDF::ICCBased = (
_stream => {
SubDirectory => { TagTable => 'Image::ExifTool::ICC_Profile::Main' },
},
);
# tags in PDF XObject dictionary (parsed only if ExtractEmbedded is enabled)
%Image::ExifTool::PDF::XObject = (
EXTRACT_UNKNOWN => 0, # extract known but numbered tags (Im1, Im2, etc)
Im => {
Notes => q{
the L<ExtractEmbedded|../ExifTool.html#ExtractEmbedded> option enables information to be extracted from these
embedded images
},
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Im' },
},
);
# tags in PDF Im# dictionary
%Image::ExifTool::PDF::Im = (
NOTES => q{
Information extracted from embedded images with the L<ExtractEmbedded|../ExifTool.html#ExtractEmbedded> option.
The EmbeddedImage and its metadata are extracted only for JPEG and Jpeg2000
image formats.
},
Width => 'EmbeddedImageWidth',
Height => 'EmbeddedImageHeight',
Filter => { Name => 'EmbeddedImageFilter', List => 1 },
ColorSpace => {
Name => 'EmbeddedImageColorSpace',
List => 1,
RawConv => 'ref $val ? undef : $val', # (ignore color space data)
},
Image_stream => {
Name => 'EmbeddedImage',
Groups => { 2 => 'Preview' },
Binary => 1,
},
);
# tags in PDF Properties dictionary
%Image::ExifTool::PDF::Properties = (
EXTRACT_UNKNOWN => 0, # extract known but numbered tags (MC0, MC1, etc)
MC => {
Notes => q{
the L<ExtractEmbedded|../ExifTool.html#ExtractEmbedded> option enables information to be extracted from these
embedded metadata dictionaries
},
SubDirectory => { TagTable => 'Image::ExifTool::PDF::MC' },
}
);
# tags in PDF MC# dictionary
%Image::ExifTool::PDF::MC = (
Metadata => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Metadata' },
}
);
# tags in PDF PieceInfo dictionary
%Image::ExifTool::PDF::PieceInfo = (
AdobePhotoshop => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::AdobePhotoshop' },
},
Illustrator => {
# assume this is an illustrator file if it contains this directory
# and doesn't have a ".PDF" extension
Condition => q{
$self->OverrideFileType("AI") unless $$self{FILE_EXT} and $$self{FILE_EXT} eq 'PDF';
return 1;
},
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Illustrator' },
},
);
# tags in PDF AdobePhotoshop dictionary
%Image::ExifTool::PDF::AdobePhotoshop = (
Private => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Private' },
},
);
# tags in PDF Illustrator dictionary
%Image::ExifTool::PDF::Illustrator = (
Private => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::AIPrivate' },
},
);
# tags in PDF Private dictionary
%Image::ExifTool::PDF::Private = (
ImageResources => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::ImageResources' },
},
);
# tags in PDF AI Private dictionary
%Image::ExifTool::PDF::AIPrivate = (
GROUPS => { 2 => 'Document' },
EXTRACT_UNKNOWN => 0, # extract known but numbered tags
AIMetaData => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::AIMetaData' },
},
AIPrivateData => {
Notes => q{
the L<ExtractEmbedded|../ExifTool.html#ExtractEmbedded> option enables information to be extracted from embedded
PostScript documents in the AIPrivateData# and AIPDFPrivateData# streams
},
JoinStreams => 1, # join streams from numbered tags and process as one
SubDirectory => { TagTable => 'Image::ExifTool::PostScript::Main' },
},
AIPDFPrivateData => {
JoinStreams => 1, # join streams from numbered tags and process as one
SubDirectory => { TagTable => 'Image::ExifTool::PostScript::Main' },
},
RoundTripVersion => { },
ContainerVersion => { },
CreatorVersion => { },
);
# tags in PDF AIMetaData dictionary
%Image::ExifTool::PDF::AIMetaData = (
_stream => {
SubDirectory => { TagTable => 'Image::ExifTool::PostScript::Main' },
},
);
# tags in PDF ImageResources dictionary
%Image::ExifTool::PDF::ImageResources = (
_stream => {
SubDirectory => { TagTable => 'Image::ExifTool::Photoshop::Main' },
},
);
# tags in PDF MarkInfo dictionary
%Image::ExifTool::PDF::MarkInfo = (
GROUPS => { 2 => 'Document' },
Marked => {
Name => 'TaggedPDF',
Notes => "not a Tagged PDF if this tag is missing",
PrintConv => { 'true' => 'Yes', 'false' => 'No' },
},
);
# tags in PDF Metadata dictionary
%Image::ExifTool::PDF::Metadata = (
GROUPS => { 2 => 'Document' },
XML_stream => { # this is the stream for a Subtype /XML dictionary (not a real tag)
Name => 'XMP',
SubDirectory => { TagTable => 'Image::ExifTool::XMP::Main' },
},
);
# tags in PDF signature directories (DocMDP, FieldMDP or UR3)
%Image::ExifTool::PDF::Signature = (
GROUPS => { 2 => 'Document' },
ContactInfo => 'SignerContactInfo',
Location => 'SigningLocation',
M => {
Name => 'SigningDate',
Format => 'date',
Groups => { 2 => 'Time' },
PrintConv => '$self->ConvertDateTime($val)',
},
Name => 'SigningAuthority',
Reason => 'SigningReason',
Reference => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Reference' },
},
Prop_AuthTime => {
Name => 'AuthenticationTime',
PrintConv => 'ConvertTimeSpan($val) . " ago"',
},
Prop_AuthType => 'AuthenticationType',
);
# tags in PDF Reference dictionary
%Image::ExifTool::PDF::Reference = (
TransformParams => {
SubDirectory => { TagTable => 'Image::ExifTool::PDF::TransformParams' },
},
);
# tags in PDF TransformParams dictionary
%Image::ExifTool::PDF::TransformParams = (
GROUPS => { 2 => 'Document' },
Annots => {
Name => 'AnnotationUsageRights',
Notes => q{
possible values are Create, Delete, Modify, Copy, Import and Export;
additional values for UR3 signatures are Online and SummaryView
},
List => 1,
},
Document => {
Name => 'DocumentUsageRights',
Notes => 'only possible value is FullSave',
List => 1,
},
Form => {
Name => 'FormUsageRights',
Notes => q{
possible values are FillIn, Import, Export, SubmitStandalone and
SpawnTemplate; additional values for UR3 signatures are BarcodePlaintext and
Online
},
List => 1,
},
FormEX => {
Name => 'FormExtraUsageRights',
Notes => 'UR signatures only; only possible value is BarcodePlaintext',
List => 1,
},
Signature => {
Name => 'SignatureUsageRights',
Notes => 'only possible value is Modify',
List => 1,
},
EF => {
Name => 'EmbeddedFileUsageRights',
Notes => 'possible values are Create, Delete, Modify and Import',
List => 1,
},
Msg => 'UsageRightsMessage',
P => {
Name => 'ModificationPermissions',
Notes => q{
1-3 for DocMDP signatures, default 2; true/false for UR3 signatures, default
false
},
PrintConv => {
1 => 'No changes permitted',
2 => 'Fill forms, Create page templates, Sign',
3 => 'Fill forms, Create page templates, Sign, Create/Delete/Edit annotations',
'true' => 'Restrict all applications to reader permissions',
'false' => 'Do not restrict applications to reader permissions',
},
},
Action => {
Name => 'FieldPermissions',
Notes => 'FieldMDP signatures only',
PrintConv => {
'All' => 'Disallow changes to all form fields',
'Include' => 'Disallow changes to specified form fields',
'Exclude' => 'Allow changes to specified form fields',
},
},
Fields => {
Notes => 'FieldMDP signatures only',
Name => 'FormFields',
List => 1,
},
);
# unknown tags for use in verbose option
%Image::ExifTool::PDF::Unknown = (
GROUPS => { 2 => 'Unknown' },
);
#------------------------------------------------------------------------------
# AutoLoad our writer routines when necessary
#
sub AUTOLOAD
{
return Image::ExifTool::DoAutoLoad($AUTOLOAD, @_);
}
#------------------------------------------------------------------------------
# Convert from PDF to EXIF-style date/time
# Inputs: 0) PDF date/time string (D:YYYYmmddHHMMSS+HH'MM')
# Returns: EXIF date string (YYYY:mm:dd HH:MM:SS+HH:MM)
sub ConvertPDFDate($)
{
my $date = shift;
# remove optional 'D:' prefix
$date =~ s/^D://;
# fill in default values if necessary
# YYYYmmddHHMMSS
my $default = '00000101000000';
if (length $date < length $default) {
$date .= substr($default, length $date);
}
$date =~ /^(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})(\d{2})(.*)/ or return $date;
$date = "$1:$2:$3 $4:$5:$6";
if ($7) {
my $tz = $7;
if ($tz =~ /^\s*Z/i) {
# ignore any "HH'mm'" after the Z (OS X 10.6 does this)
$date .= 'Z';
# tolerate some improper formatting in timezone specification
} elsif ($tz =~ /^\s*([-+])\s*(\d+)[': ]+(\d*)/) {
$date .= $1 . $2 . ':' . ($3 || '00');
}
}
return $date;
}
#------------------------------------------------------------------------------
# Locate any object in the XRef tables (including compressed objects)
# Inputs: 0) XRef reference, 1) object reference string (or free object number)
# Returns: offset to object in file or compressed object reference string,
# 0 if object is free, or undefined on error
sub LocateAnyObject($$)
{
my ($xref, $ref) = @_;
return undef unless $xref;
return $$xref{$ref} if exists $$xref{$ref};
# get the object number
return undef unless $ref =~ /^(\d+)/;
my $objNum = $1;
# return 0 if the object number has been reused (old object is free)
return 0 if defined $$xref{$objNum};
#
# scan our XRef stream dictionaries for this object
#
return undef unless $$xref{dicts};
my $dict;
foreach $dict (@{$$xref{dicts}}) {
# quick check to see if the object is in the range for this xref stream
next if $objNum >= $$dict{Size};
my $index = $$dict{Index};
next if $objNum < $$index[0];
# scan the tables for the specified object
my $size = $$dict{_entry_size};
my $num = scalar(@$index) / 2;
my $tot = 0;
my $i;
for ($i=0; $i<$num; ++$i) {
my $start = $$index[$i*2];
my $count = $$index[$i*2+1];
# table is in ascending order, so quit if we have passed the object
last if $objNum < $start;
if ($objNum < $start + $count) {
my $offset = $size * ($objNum - $start + $tot);
last if $offset + $size > length $$dict{_stream};
my @c = unpack("x$offset C$size", $$dict{_stream});
# extract values from this table entry
# (can be 1, 2, 3, 4, etc.. bytes per value)
my (@t, $j, $k);
my $w = $$dict{W};
for ($j=0; $j<3; ++$j) {
# use default value if W entry is 0 (as per spec)
# - 0th element defaults to 1, others default to 0
$$w[$j] or $t[$j] = ($j ? 0 : 1), next;
$t[$j] = shift(@c);
for ($k=1; $k < $$w[$j]; ++$k) {
$t[$j] = 256 * $t[$j] + shift(@c);
}
}
# by default, use "o g R" as the xref key
# (o = object number, g = generation number)
my $ref2 = "$objNum $t[2] R";
if ($t[0] == 1) {
# normal object reference:
# $t[1]=offset of object from start, $t[2]=generation number
$$xref{$ref2} = $t[1];
} elsif ($t[0] == 2) {
# compressed object reference:
# $t[1]=stream object number, $t[2]=index of object in stream
$ref2 = "$objNum 0 R";
$$xref{$ref2} = "I$t[2] $t[1] 0 R";
} elsif ($t[0] == 0) {
# free object:
# $t[1]=next free object in linked list, $t[2]=generation number
$$xref{$ref2} = 0;
} else {
# treat as a null object
$$xref{$ref2} = undef;
}
$$xref{$objNum} = $t[1]; # remember offsets by object number too
return $$xref{$ref} if $ref eq $ref2;
return 0; # object is free or was reused
}
$tot += $count;
}
}
return undef;
}
#------------------------------------------------------------------------------
# Locate a regular object in the XRef tables (does not include compressed objects)
# Inputs: 0) XRef reference, 1) object reference string (or free object number)
# Returns: offset to object in file, 0 if object is free,
# or undef on error or if object was compressed
sub LocateObject($$)
{
my ($xref, $ref) = @_;
my $offset = LocateAnyObject($xref, $ref);
return undef if $offset and $offset =~ /^I/;
return $offset;
}
#------------------------------------------------------------------------------
# Check that the correct object is located at the specified file offset
# Inputs: 0) ExifTool ref, 1) object name, 2) object reference string, 3) file offset
# Returns: first non-blank line at start of object, or undef on error
sub CheckObject($$$$)
{
my ($et, $tag, $ref, $offset) = @_;
my ($data, $obj, $dat, $pat);
my $raf = $$et{RAF};
$raf->Seek($offset+$$et{PDFBase}, 0) or $et->Warn("Bad $tag offset"), return undef;
# verify that we are reading the expected object
$raf->ReadLine($data) or $et->Warn("Error reading $tag data"), return undef;
($obj = $ref) =~ s/R/obj/;
unless ($data =~ s/^$obj//) {
# handle cases where other whitespace characters are used in the object ID string
while ($data =~ /^\d+(\s+\d+)?\s*$/) {
$raf->ReadLine($dat);
$data .= $dat;
}
($pat = $obj) =~ s/ /\\s+/g;
unless ($data =~ s/$pat//) {
$tag = ucfirst $tag;
$et->Warn("$tag object ($obj) not found at offset $offset");
return undef;
}
}
# read the first line of data from the object (ignoring blank lines and comments)
for (;;) {
last if $data =~ /\S/ and $data !~ /^\s*%/;
$raf->ReadLine($data) or $et->Warn("Error reading $tag data"), return undef;
}
return $data;
}
#------------------------------------------------------------------------------
# Fetch indirect object from file (from inside a stream if required)
# Inputs: 0) ExifTool object reference, 1) object reference string,
# 2) xref lookup, 3) object name (for warning messages)
# Returns: object data or undefined on error
# Notes: sets $lastFetched to the object reference, or undef if the object
# was extracted from an encrypted stream
sub FetchObject($$$$)
{
my ($et, $ref, $xref, $tag) = @_;
$lastFetched = $ref; # save this for decoding if necessary
my $offset = LocateAnyObject($xref, $ref);
$lastOffset = $offset;
unless ($offset) {
$et->Warn("Bad $tag reference") unless defined $offset;
return undef;
}
my ($data, $obj);
if ($offset =~ s/^I(\d+) //) {
my $index = $1; # object index in stream
my ($objNum) = split ' ', $ref; # save original object number
$ref = $offset; # now a reference to the containing stream object
$obj = $streamObjs{$ref};
unless ($obj) {
# don't try to load the same object stream twice
return undef if defined $obj;
$streamObjs{$ref} = '';
# load the parent object stream
$obj = FetchObject($et, $ref, $xref, $tag);
# make sure it contains everything we need
return undef unless defined $obj and ref($obj) eq 'HASH';
return undef unless $$obj{First} and $$obj{N};
return undef unless DecodeStream($et, $obj);
# add a special '_table' entry to this dictionary which contains
# the list of object number/offset pairs from the stream header
my $num = $$obj{N} * 2;
my @table = split ' ', $$obj{_stream}, $num;
return undef unless @table == $num;
# remove everything before first object in stream
$$obj{_stream} = substr($$obj{_stream}, $$obj{First});
$table[$num-1] =~ s/^(\d+).*/$1/s; # trim excess from last number
$$obj{_table} = \@table;
# save the object stream so we don't have to re-load it later
$streamObjs{$ref} = $obj;
}
# verify that we have the specified object
my $i = 2 * $index;
my $table = $$obj{_table};
unless ($index < $$obj{N} and $$table[$i] == $objNum) {
$et->Warn("Bad index for stream object $tag");
return undef;
}
# extract the object at the specified index in the stream
# (offsets in table are in sequential order, so we can subtract from
# the next offset to get the object length)
$offset = $$table[$i + 1];
my $len = ($$table[$i + 3] || length($$obj{_stream})) - $offset;
$data = substr($$obj{_stream}, $offset, $len);
# avoid re-decrypting data in already decrypted streams
undef $lastFetched if $cryptStream;
return ExtractObject($et, \$data);
}
# load the start of the object
$data = CheckObject($et, $tag, $ref, $offset);
return undef unless defined $data;
return ExtractObject($et, \$data, $$et{RAF}, $xref);
}
#------------------------------------------------------------------------------
# Convert PDF value to something readable
# Inputs: 0) PDF object data
# Returns: converted object
sub ReadPDFValue($)
{
my $str = shift;
# decode all strings in an array
if (ref $str eq 'ARRAY') {
# create new list to not alter the original data when rewriting
my ($val, @vals);
foreach $val (@$str) {
push @vals, ReadPDFValue($val);
}
return \@vals;
}
length $str or return $str;
my $delim = substr($str, 0, 1);
if ($delim eq '(') { # literal string
$str = $1 if $str =~ /^.*?\((.*)\)/s; # remove brackets
# decode escape sequences in literal strings
while ($str =~ /\\(.)/sg) {
my $n = pos($str) - 2;
my $c = $1;
my $r;
if ($c =~ /[0-7]/) {
# get up to 2 more octal digits
$c .= $1 if $str =~ /\G([0-7]{1,2})/g;
# convert octal escape code
$r = chr(oct($c) & 0xff);
} elsif ($c eq "\x0d") {
# the string is continued if the line ends with '\'
# (also remove "\x0d\x0a")
$c .= $1 if $str =~ /\G(\x0a)/g;
$r = '';
} elsif ($c eq "\x0a") {
$r = '';
} else {
# convert escaped characters
($r = $c) =~ tr/nrtbf/\n\r\t\b\f/;
}
substr($str, $n, length($c)+1) = $r;
# continue search after this character
pos($str) = $n + length($r);
}
Crypt(\$str, $lastFetched) if $cryptString;
} elsif ($delim eq '<') { # hex string
# decode hex data
$str =~ tr/0-9A-Fa-f//dc;
$str .= '0' if length($str) & 0x01; # (by the spec)
$str = pack('H*', $str);
Crypt(\$str, $lastFetched) if $cryptString;
} elsif ($delim eq '/') { # name
$str = substr($str, 1);
# convert escape codes (PDF 1.2 or later)
$str =~ s/#([0-9a-f]{2})/chr(hex($1))/sgei if $pdfVer >= 1.2;
}
return $str;
}
#------------------------------------------------------------------------------
# Extract PDF object from combination of buffered data and file
# Inputs: 0) ExifTool object reference, 1) data reference,
# 2) optional raf reference, 3) optional xref table
# Returns: converted PDF object or undef on error
# a) dictionary object --> hash reference
# b) array object --> array reference
# c) indirect reference --> scalar reference
# d) string, name, integer, boolean, null --> scalar value
# - updates $$dataPt on return to contain unused data
# - creates two bogus entries ('_stream' and '_tags') in dictionaries to represent
# the stream data and a list of the tags (not including '_stream' and '_tags')
# in their original order
sub ExtractObject($$;$$)
{
my ($et, $dataPt, $raf, $xref) = @_;
my (@tags, $data, $objData);
my $dict = { };
my $delim;
for (;;) {
if ($$dataPt =~ /^\s*(<{1,2}|\[|\()/s) {
$delim = $1;
$$dataPt =~ s/^\s+//; # remove leading white space
$objData = ReadToNested($dataPt, $raf);
return undef unless defined $objData;
last;
} elsif ($$dataPt =~ s{^\s*(\S[^[(/<>\s]*)\s*}{}s) {
#
# extract boolean, numerical, string, name, null object or indirect reference
#
$objData = $1;
# look for an indirect reference
if ($objData =~ /^\d+$/ and $$dataPt =~ s/^(\d+)\s+R//s) {
$objData .= "$1 R";
$objData = \$objData; # return scalar reference
}
return $objData; # return simple scalar or scalar reference
}
$raf and $raf->ReadLine($data) or return undef;
$$dataPt .= $data;
}
#
# return literal string or hex string without parsing
#
if ($delim eq '(' or $delim eq '<') {
return $objData;
#
# extract array
#
} elsif ($delim eq '[') {
$objData =~ /^.*?\[(.*)\]/s or return undef;
my $data = $1; # brackets removed
my @list;
for (;;) {
last unless $data =~ m{\s*(\S[^[(/<>\s]*)}sg;
my $val = $1;
if ($val =~ /^(<{1,2}|\[|\()/) {
my $pos = pos($data) - length($val);
# nested dict, array, literal string or hex string
my $buff = substr($data, $pos);
$val = ReadToNested(\$buff);
last unless defined $val;
pos($data) = $pos + length($val);
$val = ExtractObject($et, \$val);
} elsif ($val =~ /^\d/) {
my $pos = pos($data);
if ($data =~ /\G\s+(\d+)\s+R/g) {
$val = \ "$val $1 R"; # make a reference
} else {
pos($data) = $pos;
}
}
push @list, $val;
}
return \@list;
}
#
# extract dictionary
#
# Note: entries are not necessarily separated by whitespace (doh!)
# eg) "/Tag/Name", "/Tag(string)", "/Tag[array]", etc are legal!
# Also, they may be separated by a comment (eg. "/Tag%comment\nValue"),
# but comments have already been removed
while ($objData =~ m{(\s*)/([^/[\]()<>{}\s]+)\s*(\S[^[(/<>\s]*)}sg) {
my $tag = $2;
my $val = $3;
if ($val =~ /^(<{1,2}|\[|\()/) {
# nested dict, array, literal string or hex string
$objData = substr($objData, pos($objData)-length($val));
$val = ReadToNested(\$objData, $raf);
last unless defined $val;
$val = ExtractObject($et, \$val);
pos($objData) = 0;
} elsif ($val =~ /^\d/) {
my $pos = pos($objData);
if ($objData =~ /\G\s+(\d+)\s+R/sg) {
$val = \ "$val $1 R"; # make a reference
} else {
pos($objData) = $pos;
}
}
if ($$dict{$tag}) {
# duplicate dictionary entries are not allowed
$et->Warn("Duplicate '${tag}' entry in dictionary (ignored)");
} else {
# save the entry
push @tags, $tag;
$$dict{$tag} = $val;
}
}
return undef unless @tags;
$$dict{_tags} = \@tags;
return $dict unless $raf; # direct objects can not have streams
#
# extract the stream object
#
# dictionary must specify stream Length
my $length = $$dict{Length} or return $dict;
if (ref $length) {
$length = $$length;
my $oldpos = $raf->Tell();
# get the location of the object specifying the length
# (compressed objects are not allowed)
my $offset = LocateObject($xref, $length) or return $dict;
$offset or $et->Warn('Bad stream Length object'), return $dict;
$data = CheckObject($et, 'stream Length', $length, $offset);
defined $data or return $dict;
$data =~ /^\s*(\d+)/ or $et->Warn('Stream Length not found'), return $dict;
$length = $1;
$raf->Seek($oldpos, 0); # restore position to start of stream
}
# extract the trailing stream data
for (;;) {
# find the stream token
if ($$dataPt =~ /(\S+)/) {
last unless $1 eq 'stream';
# read an extra line because it may contain our \x0a
$$dataPt .= $data if $raf->ReadLine($data);
# remove our stream header
$$dataPt =~ s/^\s*stream(\x0a|\x0d\x0a)//s;
my $more = $length - length($$dataPt);
if ($more > 0) {
unless ($raf->Read($data, $more) == $more) {
$et->Warn('Error reading stream data');
$$dataPt = '';
return $dict;
}
$$dict{_stream} = $$dataPt . $data;
$$dataPt = '';
} elsif ($more < 0) {
$$dict{_stream} = substr($$dataPt, 0, $length);
$$dataPt = substr($$dataPt, $length);
} else {
$$dict{_stream} = $$dataPt;
$$dataPt = '';
}
last;
}
$raf->ReadLine($data) or last;
$$dataPt .= $data;
}
return $dict;
}
#------------------------------------------------------------------------------
# Read to nested delimiter
# Inputs: 0) data reference, 1) optional raf reference
# Returns: data up to and including matching delimiter (or undef on error)
# - updates data reference with trailing data
# - unescapes characters in literal strings
my %closingDelim = ( # lookup for matching delimiter
'(' => ')',
'[' => ']',
'<' => '>',
'<<' => '>>',
);
sub ReadToNested($;$)
{
my ($dataPt, $raf) = @_;
my @delim = (''); # closing delimiter list, most deeply nested first
pos($$dataPt) = 0; # begin at start of data
for (;;) {
unless ($$dataPt =~ /(\\*)(\(|\)|<{1,2}|>{1,2}|\[|\]|%)/g) {
# must read some more data
my $buff;
last unless $raf and $raf->ReadLine($buff);
$$dataPt .= $buff;
pos($$dataPt) = length($$dataPt) - length($buff);
next;
}
# are we in a literal string?
if ($delim[0] eq ')') {
# ignore escaped delimiters (preceded by odd number of \'s)
next if length($1) & 0x01;
# ignore all delimiters but unescaped braces
next unless $2 eq '(' or $2 eq ')';
} elsif ($2 eq '%') {
# ignore the comment
my $pos = pos($$dataPt) - 1;
# remove everything from '%' up to but not including newline
$$dataPt =~ /.*/g;
my $end = pos($$dataPt);
$$dataPt = substr($$dataPt, 0, $pos) . substr($$dataPt, $end);
pos($$dataPt) = $pos;
next;
}
if ($closingDelim{$2}) {
# push the corresponding closing delimiter
unshift @delim, $closingDelim{$2};
next;
}
unless ($2 eq $delim[0]) {
# handle the case where we find a ">>>" and interpret it
# as ">> >" instead of "> >>"
next unless $2 eq '>>' and $delim[0] eq '>';
pos($$dataPt) = pos($$dataPt) - 1;
}
shift @delim; # remove from nesting list
next if $delim[0]; # keep going if we have more nested delimiters
my $pos = pos($$dataPt);
my $buff = substr($$dataPt, 0, $pos);
$$dataPt = substr($$dataPt, $pos);
return $buff; # success!
}
return undef; # didn't find matching delimiter
}
#------------------------------------------------------------------------------
# Decode LZW-encoded data (ref 1)
# Inputs: 0) data reference
# Returns: true on success and data is decoded, or false and data is untouched
sub DecodeLZW($)
{
my $dataPt = shift;
return 0 if length $$dataPt < 4;
my @lzw = (map(chr, 0..255), undef, undef); # LZW code table
my $mask = 0x01ff; # mask for least-significant 9 bits
my @dat = unpack 'n*', $$dataPt . "\0";
my $word = ($dat[0] << 16) | $dat[1];
my ($bit, $pos, $bits, $out) = (0, 2, 9, '');
my $lastVal;
for (;;) {
# bits are packed MSB first in PDF LZW (the PDF spec doesn't mention this)
my $shift = 32 - ($bit + $bits);
if ($shift < 0) {
return 0 if $pos >= @dat; # missing EOD marker
$word = (($word & 0xffff) << 16) | $dat[$pos++]; # read next word
$bit -= 16;
$shift += 16;
};
my $code = ($word >> $shift) & $mask;
$bit += $bits;
my $val = $lzw[$code];
if (defined $val) {
# store new code as previous sequence plus 1st char of new sequence
push @lzw, $lastVal . substr($val, 0, 1) if defined $lastVal;
} elsif ($code == @lzw) { # new code
return 0 unless defined $lastVal;
# we are using the code that we are about to generate, so the last
# character in the new sequence must be the same as the first
# character in the previous sequence (makes sense if you think about it)
$val = $lastVal . substr($lastVal, 0, 1);
push @lzw, $val;
} elsif ($code == 256) { # clear table
splice @lzw, 258;
$bits = 9;
$mask = 0x1ff;
undef $lastVal;
next;
} elsif ($code == 257) { # EOD marker
last; # all done!
} else {
return 0;
}
$out .= $val; # add this byte sequence to the output
# we added a new entry to the LZW table, so we must increase
# the bit width if necessary, up to a maximum of 12
@lzw >= $mask and $bits < 12 and ++$bits, $mask |= $mask << 1;
$lastVal = $val;
}
$$dataPt = $out; # return decompressed data
return 1;
}
#------------------------------------------------------------------------------
# Decode filtered stream
# Inputs: 0) ExifTool object reference, 1) dictionary reference
# Returns: true if stream has been decoded OK
sub DecodeStream($$)
{
local $_;
my ($et, $dict) = @_;
return 0 unless $$dict{_stream}; # no stream to decode
# get list of filters
my (@filters, @decodeParms, $filter);
if (ref $$dict{Filter} eq 'ARRAY') {
@filters = @{$$dict{Filter}};
} elsif (defined $$dict{Filter}) {
@filters = ($$dict{Filter});
}
# be sure we can process all the filters before we take the time to do the decryption
foreach $filter (@filters) {
next if $supportedFilter{$filter};
$et->WarnOnce("Unsupported Filter $filter");
return 0;
}
# apply decryption first if required (and if the default encryption
# has not been overridden by a Crypt filter. Note: the Crypt filter
# must be first in the Filter array: ref 3, page 38)
unless (defined $$dict{_decrypted} or ($filters[0] and $filters[0] eq '/Crypt')) {
CryptStream($dict, $lastFetched);
}
return 1 unless $$dict{Filter}; # Filter entry is mandatory
return 0 if defined $$dict{_filtered}; # avoid double-filtering
$$dict{_filtered} = 1; # set flag to prevent double-filtering
# get array of DecodeParms dictionaries
if (ref $$dict{DecodeParms} eq 'ARRAY') {
@decodeParms = @{$$dict{DecodeParms}};
} else {
@decodeParms = ($$dict{DecodeParms});
}
foreach $filter (@filters) {
my $decodeParms = shift @decodeParms;
if ($filter eq '/FlateDecode') {
# make sure we support the predictor (if used) before decoding
my $pre;
if (ref $decodeParms eq 'HASH') {
$pre = $$decodeParms{Predictor};
if ($pre and $pre ne '1' and $pre ne '12') {
$et->WarnOnce("FlateDecode Predictor $pre currently not supported");
return 0;
}
}
if (eval { require Compress::Zlib }) {
my $inflate = Compress::Zlib::inflateInit();
my ($buff, $stat);
$inflate and ($buff, $stat) = $inflate->inflate($$dict{_stream});
if ($inflate and $stat == Compress::Zlib::Z_STREAM_END()) {
$$dict{_stream} = $buff;
} else {
$et->Warn('Error inflating stream');
return 0;
}
} else {
$et->WarnOnce('Install Compress::Zlib to process filtered streams');
return 0;
}
next unless $pre and $pre eq '12'; # 12 = 'up' prediction
# apply anti-predictor
my $cols = $$decodeParms{Columns};
unless ($cols) {
# currently only support 'up' prediction
$et->WarnOnce('No Columns for decoding stream');
return 0;
}
my @bytes = unpack('C*', $$dict{_stream});
my @pre = (0) x $cols; # initialize predictor array
my $buff = '';
while (@bytes > $cols) {
unless (($_ = shift @bytes) == 2) {
$et->WarnOnce("Unsupported PNG filter $_"); # (yes, PNG)
return 0;
}
foreach (@pre) {
$_ = ($_ + shift(@bytes)) & 0xff;
}
$buff .= pack('C*', @pre);
}
$$dict{_stream} = $buff;
} elsif ($filter eq '/Crypt') {
# (we shouldn't have to check the _decrypted flag since we
# already checked the _filtered flag, but what the heck...)
next if defined $$dict{_decrypted};
# assume Identity filter (the default) if DecodeParms are missing
next unless ref $decodeParms eq 'HASH';
my $name = $$decodeParms{Name};
next unless defined $name or $name eq 'Identity';
if ($name ne 'StdCF') {
$et->WarnOnce("Unsupported Crypt Filter $name");
return 0;
}
unless ($cryptInfo) {
$et->WarnOnce('Missing Encrypt StdCF entry');
return 0;
}
# decrypt the stream manually because we want to:
# 1) ignore $cryptStream (StmF) setting
# 2) ignore EncryptMetadata setting (I can't find mention of how to
# reconcile this in the spec., but this would make sense)
# 3) avoid adding the crypt key extension (ref 3, page 58, Algorithm 1b)
# 4) set _decrypted flag so we will recrypt according to StmF when
# writing (since we don't yet write Filter'd streams)
Crypt(\$$dict{_stream}, 'none');
$$dict{_decrypted} = ($cryptStream ? 1 : 0);
} elsif ($filter eq '/LZWDecode') {
# make sure we don't have any unsupported decoding parameters
if (ref $decodeParms eq 'HASH') {
if ($$decodeParms{Predictor}) {
$et->WarnOnce("LZWDecode Predictor $$decodeParms{Predictor} currently not supported");
return 0;
} elsif ($$decodeParms{EarlyChange}) {
$et->WarnOnce("LZWDecode EarlyChange currently not supported");
return 0;
}
}
unless (DecodeLZW(\$$dict{_stream})) {
$et->WarnOnce('LZW decompress error');
return 0;
}
} elsif ($filter eq '/ASCIIHexDecode') {
$$dict{_stream} =~ s/>.*//; # truncate at '>' (end of data mark)
$$dict{_stream} =~ tr/0-9a-zA-Z//d; # remove illegal characters
$$dict{_stream} = pack 'H*', $$dict{_stream};
} elsif ($filter eq '/ASCII85Decode') {
my ($err, @out, $i);
my ($n, $val) = (0, 0);
foreach (split //, $$dict{_stream}) {
if ($_ ge '!' and $_ le 'u') {;
$val = 85 * $val + ord($_) - 33;
next unless ++$n == 5;
} elsif ($_ eq '~') {
$n == 1 and $err = 1; # error to have a single char in the last group of 5
for ($i=$n; $i<5; ++$i) { $val *= 85; }
} elsif ($_ eq 'z') {
$n and $err = 2, last; # error if 'z' isn't the first char
$n = 5;
} else {
next if /^\s$/; # ignore white space
$err = 3, last; # any other character is an error
}
$val = unpack('V', pack('N', $val)); # reverse byte order
while (--$n > 0) {
push @out, $val & 0xff;
$val >>= 8;
}
last if $_ eq '~';
# (both $n and $val are zero again now)
}
$err and $et->WarnOnce("ASCII85Decode error $err");
$$dict{_stream} = pack('C*', @out);
}
}
return 1;
}
#------------------------------------------------------------------------------
# Initialize state for RC4 en/decryption (ref 2)
# Inputs: 0) RC4 key string
# Returns: RC4 key hash reference
sub RC4Init($)
{
my @key = unpack('C*', shift);
my @state = (0 .. 255);
my ($i, $j) = (0, 0);
while ($i < 256) {
my $st = $state[$i];
$j = ($j + $st + $key[$i % scalar(@key)]) & 0xff;
$state[$i++] = $state[$j];
$state[$j] = $st;
}
return { State => \@state, XY => [ 0, 0 ] };
}
#------------------------------------------------------------------------------
# Apply RC4 en/decryption (ref 2)
# Inputs: 0) data reference, 1) RC4 key hash reference or RC4 key string
# - can call this method directly with a key string, or with with the key
# reference returned by RC4Init
# - RC4 is a symmetric algorithm, so encryption is the same as decryption
sub RC4Crypt($$)
{
my ($dataPt, $key) = @_;
$key = RC4Init($key) unless ref $key eq 'HASH';
my $state = $$key{State};
my ($x, $y) = @{$$key{XY}};
my @data = unpack('C*', $$dataPt);
foreach (@data) {
$x = ($x + 1) & 0xff;
my $stx = $$state[$x];
$y = ($stx + $y) & 0xff;
my $sty = $$state[$x] = $$state[$y];
$$state[$y] = $stx;
$_ ^= $$state[($stx + $sty) & 0xff];
}
$$key{XY} = [ $x, $y ];
$$dataPt = pack('C*', @data);
}
#------------------------------------------------------------------------------
# Update AES cipher with a bit of data
# Inputs: 0) data
# Returns: encrypted data
my $cipherMore;
sub CipherUpdate($)
{
my $dat = shift;
my $pos = 0;
$dat = $cipherMore . $dat if length $dat;
while ($pos + 16 <= length($dat)) {
substr($dat,$pos,16) = Image::ExifTool::AES::Cipher(substr($dat,$pos,16));
$pos += 16;
}
if ($pos < length $dat) {
$cipherMore = substr($dat,$pos);
$dat = substr($dat,0,$pos);
} else {
$cipherMore = '';
}
return $dat;
}
#------------------------------------------------------------------------------
# Get encrypted hash
# Inputs: 0) Password, 1) salt, 2) vector, 3) encryption revision
# Returns: hash
sub GetHash($$$$)
{
my ($password, $salt, $vector, $rev) = @_;
# return Rev 5 hash
return Digest::SHA::sha256($password, $salt, $vector) if $rev == 5;
# compute Rev 6 hardened hash
# (ref http://code.google.com/p/origami-pdf/source/browse/lib/origami/encryption.rb)
my $blockSize = 32;
my $input = Digest::SHA::sha256($password, $salt, $vector) . ("\0" x 32);
my $key = substr($input, 0, 16);
my $iv = substr($input, 16, 16);
my $h;
my $x = '';
my $i = 0;
while ($i < 64 or $i < ord(substr($x,-1,1))+32) {
my $block = substr($input, 0, $blockSize);
$x = '';
Image::ExifTool::AES::Crypt(\$x, $key, $iv, 1);
$cipherMore = '';
my ($j, $digest);
for ($j=0; $j<64; ++$j) {
$x = '';
$x .= CipherUpdate($password) if length $password;
$x .= CipherUpdate($block);
$x .= CipherUpdate($vector) if length $vector;
if ($j == 0) {
my @a = unpack('C16', $x);
my $sum = 0;
$sum += $_ foreach @a;
# set SHA block size (32, 48 or 64 bytes = SHA-256, 384 or 512)
$blockSize = 32 + ($sum % 3) * 16;
$digest = Digest::SHA->new($blockSize * 8);
}
$digest->add($x);
}
$h = $digest->digest();
$key = substr($h, 0, 16);
substr($input,0,16) = $h;
$iv = substr($h, 16, 16);
++$i;
}
return substr($h, 0, 32);
}
#------------------------------------------------------------------------------
# Initialize decryption
# Inputs: 0) ExifTool object reference, 1) Encrypt dictionary reference,
# 2) ID from file trailer dictionary
# Returns: error string or undef on success (and sets $cryptInfo)
sub DecryptInit($$$)
{
local $_;
my ($et, $encrypt, $id) = @_;
undef $cryptInfo;
unless ($encrypt and ref $encrypt eq 'HASH') {
return 'Error loading Encrypt object';
}
my $filt = $$encrypt{Filter};
unless ($filt and $filt =~ s/^\///) {
return 'Encrypt dictionary has no Filter!';
}
# extract some interesting tags
my $ver = $$encrypt{V} || 0;
my $rev = $$encrypt{R} || 0;
my $enc = "$filt V$ver";
$enc .= ".$rev" if $filt eq 'Standard';
$enc .= " ($1)" if $$encrypt{SubFilter} and $$encrypt{SubFilter} =~ /^\/(.*)/;
$enc .= ' (' . ($$encrypt{Length} || 40) . '-bit)' if $filt eq 'Standard';
my $tagTablePtr = GetTagTable('Image::ExifTool::PDF::Encrypt');
$et->HandleTag($tagTablePtr, 'Filter', $enc);
if ($filt ne 'Standard') {
return "Encryption filter $filt currently not supported";
} elsif (not defined $$encrypt{R}) {
return 'Standard security handler missing revision';
}
unless ($$encrypt{O} and $$encrypt{P} and $$encrypt{U}) {
return 'Incomplete Encrypt specification';
}
if ("$ver.$rev" >= 5.6) {
# apologize for poor performance (AES is a pure Perl implementation)
$et->Warn('Decryption is very slow for encryption V5.6 or higher', 3);
}
$et->HandleTag($tagTablePtr, 'P', $$encrypt{P});
my %parm; # optional parameters extracted from Encrypt dictionary
if ($ver == 1 or $ver == 2) {
$cryptString = $cryptStream = 1;
} elsif ($ver == 4 or $ver == 5) {
# initialize our $cryptString and $cryptStream flags
foreach ('StrF', 'StmF') {
my $flagPt = $_ eq 'StrF' ? \$cryptString : \$cryptStream;
$$flagPt = $$encrypt{$_};
undef $$flagPt if $$flagPt and $$flagPt eq '/Identity';
return "Unsupported $_ encryption $$flagPt" if $$flagPt and $$flagPt ne '/StdCF';
}
if ($cryptString or $cryptStream) {
return 'Missing or invalid Encrypt StdCF entry' unless ref $$encrypt{CF} eq 'HASH' and
ref $$encrypt{CF}{StdCF} eq 'HASH' and $$encrypt{CF}{StdCF}{CFM};
my $cryptMeth = $$encrypt{CF}{StdCF}{CFM};
unless ($cryptMeth =~ /^\/(V2|AESV2|AESV3)$/) {
return "Unsupported encryption method $cryptMeth";
}
# set "_aesv2" or "_aesv3" flag in %$encrypt hash if AES encryption was used
$$encrypt{'_' . lc($1)} = 1 if $cryptMeth =~ /^\/(AESV2|AESV3)$/;
}
if ($ver == 5) {
# validate OE and UE entries
foreach ('OE', 'UE') {
return "Missing Encrypt $_ entry" unless $$encrypt{$_};
$parm{$_} = ReadPDFValue($$encrypt{$_});
return "Invalid Encrypt $_ entry" unless length $parm{$_} == 32;
}
require Image::ExifTool::AES; # will need this later
}
} else {
return "Encryption version $ver currently not supported";
}
$id or return "Can't decrypt (no document ID)";
# make sure we have the necessary libraries available
if ($ver < 5) {
unless (eval { require Digest::MD5 }) {
return "Install Digest::MD5 to process encrypted PDF";
}
} else {
unless (eval { require Digest::SHA }) {
return "Install Digest::SHA to process AES-256 encrypted PDF";
}
}
# calculate file-level en/decryption key
my $pad = "\x28\xBF\x4E\x5E\x4E\x75\x8A\x41\x64\x00\x4E\x56\xFF\xFA\x01\x08".
"\x2E\x2E\x00\xB6\xD0\x68\x3E\x80\x2F\x0C\xA9\xFE\x64\x53\x69\x7A";
my $o = ReadPDFValue($$encrypt{O});
my $u = ReadPDFValue($$encrypt{U});
# set flag indicating whether metadata is encrypted
# (in version 4 and higher, metadata streams may not be encrypted)
if ($ver < 4 or not $$encrypt{EncryptMetadata} or $$encrypt{EncryptMetadata} !~ /false/i) {
$$encrypt{_meta} = 1;
}
# try no password first, then try provided password if available
my ($try, $key);
for ($try=0; ; ++$try) {
my $password;
if ($try == 0) {
$password = '';
} elsif ($try == 1) {
$password = $et->Options('Password');
return 'Document is password protected (use Password option)' unless defined $password;
# make sure there is no UTF-8 flag on the password
if ($] >= 5.006 and (eval { require Encode; Encode::is_utf8($password) } or $@)) {
# repack by hand if Encode isn't available
$password = $@ ? pack('C*',unpack($] < 5.010000 ? 'U0C*' : 'C0C*',$password)) : Encode::encode('utf8',$password);
}
} else {
return 'Incorrect password';
}
if ($ver < 5) {
if (length $password) {
# password must be encoding in PDFDocEncoding (ref iso32000)
$password = $et->Encode($password, 'PDFDoc');
# truncate or pad the password to exactly 32 bytes
if (length($password) > 32) {
$password = substr($password, 0, 32);
} elsif (length($password) < 32) {
$password .= substr($pad, 0, 32-length($password));
}
} else {
$password = $pad;
}
$key = $password . $o . pack('V', $$encrypt{P}) . $id;
my $rep = 1;
if ($rev == 3 or $rev == 4) {
# must add this if metadata not encrypted
$key .= "\xff\xff\xff\xff" unless $$encrypt{_meta};
$rep += 50; # repeat MD5 50 more times if revision is 3 or greater
}
my ($len, $i, $dat);
if ($ver == 1) {
$len = 5;
} else {
$len = $$encrypt{Length} || 40;
$len >= 40 or return 'Bad Encrypt Length';
$len = int($len / 8);
}
for ($i=0; $i<$rep; ++$i) {
$key = substr(Digest::MD5::md5($key), 0, $len);
}
# decrypt U to see if a user password is required
if ($rev >= 3) {
$dat = Digest::MD5::md5($pad . $id);
RC4Crypt(\$dat, $key);
for ($i=1; $i<=19; ++$i) {
my @key = unpack('C*', $key);
foreach (@key) { $_ ^= $i; }
RC4Crypt(\$dat, pack('C*', @key));
}
$dat .= substr($u, 16);
} else {
$dat = $pad;
RC4Crypt(\$dat, $key);
}
last if $dat eq $u; # all done if this was the correct key
} else {
return 'Invalid O or U Encrypt entries' if length($o) < 48 or length($u) < 48;
if (length $password) {
# Note: this should be good for passwords containing reasonable characters,
# but to be bullet-proof we need to apply the SASLprep (IETF RFC 4013) profile
# of stringprep (IETF RFC 3454) to the password before encoding in UTF-8
$password = $et->Encode($password, 'UTF8');
$password = substr($password, 0, 127) if length($password) > 127;
}
# test for the owner password
my $sha = GetHash($password, substr($o,32,8), substr($u,0,48), $rev);
if ($sha eq substr($o, 0, 32)) {
$key = GetHash($password, substr($o,40,8), substr($u,0,48), $rev);
my $dat = ("\0" x 16) . $parm{OE};
# decrypt with no padding
my $err = Image::ExifTool::AES::Crypt(\$dat, $key, 0, 1);
return $err if $err;
$key = $dat; # use this as the file decryption key
last;
}
# test for the user password
$sha = GetHash($password, substr($u,32,8), '', $rev);
if ($sha eq substr($u, 0, 32)) {
$key = GetHash($password, substr($u,40,8), '', $rev);
my $dat = ("\0" x 16) . $parm{UE};
my $err = Image::ExifTool::AES::Crypt(\$dat, $key, 0, 1);
return $err if $err;
$key = $dat; # use this as the file decryption key
last;
}
}
}
$$encrypt{_key} = $key; # save the file-level encryption key
$cryptInfo = $encrypt; # save reference to the file-level Encrypt object
return undef; # success!
}
#------------------------------------------------------------------------------
# Decrypt/Encrypt data
# Inputs: 0) data ref
# 1) PDF object reference to use as crypt key extension (may be 'none' to
# avoid extending the encryption key, as for streams with Crypt Filter)
# 2) encrypt flag (false for decryption)
sub Crypt($$;$)
{
return unless $cryptInfo;
my ($dataPt, $keyExt, $encrypt) = @_;
# do not decrypt if the key extension object is undefined
# (this doubles as a flag to disable decryption/encryption)
return unless defined $keyExt;
my $key = $$cryptInfo{_key};
# apply the necessary crypt key extension
unless ($$cryptInfo{_aesv3}) {
unless ($keyExt eq 'none') {
# extend crypt key using object and generation number
unless ($keyExt =~ /^(I\d+ )?(\d+) (\d+)/) {
$$cryptInfo{_error} = 'Invalid object reference for encryption';
return;
}
$key .= substr(pack('V', $2), 0, 3) . substr(pack('V', $3), 0, 2);
}
# add AES-128 salt if necessary (this little gem is conveniently
# omitted from the Adobe PDF 1.6 documentation, causing me to
# waste 12 hours trying to figure out why this wasn't working --
# it appears in ISO32000 though, so I should have been using that)
$key .= 'sAlT' if $$cryptInfo{_aesv2};
my $len = length($key);
$key = Digest::MD5::md5($key); # get 16-byte MD5 digest
$key = substr($key, 0, $len) if $len < 16; # trim if necessary
}
# perform the decryption/encryption
if ($$cryptInfo{_aesv2} or $$cryptInfo{_aesv3}) {
require Image::ExifTool::AES;
my $err = Image::ExifTool::AES::Crypt($dataPt, $key, $encrypt);
$err and $$cryptInfo{_error} = $err;
} else {
RC4Crypt($dataPt, $key);
}
}
#------------------------------------------------------------------------------
# Decrypt/Encrypt stream data
# Inputs: 0) dictionary ref, 1) PDF object reference to use as crypt key extension
sub CryptStream($$)
{
return unless $cryptStream;
my ($dict, $keyExt) = @_;
my $type = $$dict{Type} || '';
# XRef streams are not encrypted (ref 3, page 50),
# and Metadata may or may not be encrypted
if ($cryptInfo and $type ne '/XRef' and
($$cryptInfo{_meta} or $type ne '/Metadata'))
{
Crypt(\$$dict{_stream}, $keyExt, $$dict{_decrypted});
# toggle _decrypted flag
$$dict{_decrypted} = ($$dict{_decrypted} ? undef : 1);
} else {
$$dict{_decrypted} = 0; # stream should never be encrypted
}
}
#------------------------------------------------------------------------------
# Generate a new PDF tag (based on its ID) and add it to a tag table
# Inputs: 0) tag table ref, 1) tag ID
# Returns: tag info ref
sub NewPDFTag($$)
{
my ($tagTablePtr, $tag) = @_;
my $name = $tag;
# translate URL-like escape sequences
$name =~ s/#([0-9a-f]{2})/chr(hex($1))/ige;
$name =~ s/[^-\w]+/_/g; # translate invalid characters to an underline
$name =~ s/(^|_)([a-z])/\U$2/g; # start words with upper case
my $tagInfo = { Name => $name };
AddTagToTable($tagTablePtr, $tag, $tagInfo);
return $tagInfo;
}
#------------------------------------------------------------------------------
# Process AcroForm dictionary to set HasXMLFormsArchitecture flag
# Inputs: Same as ProcessDict
sub ProcessAcroForm($$$$;$$)
{
my ($et, $tagTablePtr, $dict, $xref, $nesting, $type) = @_;
$et->HandleTag($tagTablePtr, '_has_xfa', $$dict{XFA} ? 'true' : 'false');
return ProcessDict($et, $tagTablePtr, $dict, $xref, $nesting, $type);
}
#------------------------------------------------------------------------------
# Expand array into a string
# Inputs: 0) array ref
# Return: string
sub ExpandArray($)
{
my $val = shift;
my @list = @$val;
foreach (@list) {
ref $_ eq 'SCALAR' and $_ = "ref($$_)", next;
ref $_ eq 'ARRAY' and $_ = ExpandArray($_), next;
defined $_ or $_ = '<undef>', next;
}
return '[' . join(',',@list) . ']';
}
#------------------------------------------------------------------------------
# Process PDF dictionary extract tag values
# Inputs: 0) ExifTool object reference, 1) tag table reference
# 2) dictionary reference, 3) cross-reference table reference,
# 4) nesting depth, 5) dictionary capture type
sub ProcessDict($$$$;$$)
{
my ($et, $tagTablePtr, $dict, $xref, $nesting, $type) = @_;
my $verbose = $et->Options('Verbose');
my $unknown = $$tagTablePtr{EXTRACT_UNKNOWN};
my $embedded = (defined $unknown and not $unknown and $et->Options('ExtractEmbedded'));
my @tags = @{$$dict{_tags}};
my ($next, %join);
my $index = 0;
$nesting = ($nesting || 0) + 1;
if ($nesting > 50) {
$et->WarnOnce('Nesting too deep (directory ignored)');
return;
}
# save entire dictionary for rewriting if specified
if ($$et{PDF_CAPTURE} and $$tagTablePtr{VARS} and
$tagTablePtr->{VARS}->{CAPTURE})
{
my $name;
foreach $name (@{$tagTablePtr->{VARS}->{CAPTURE}}) {
next if $$et{PDF_CAPTURE}{$name};
# make sure we load the right type if indicated
next if $type and $type ne $name;
$$et{PDF_CAPTURE}{$name} = $dict;
last;
}
}
#
# extract information from all tags in the dictionary
#
for (;;) {
my ($tag, $isSubDoc);
if (@tags) {
$tag = shift @tags;
} elsif (defined $next and not $next) {
$tag = 'Next';
$next = 1;
} else {
last;
}
my $val = $$dict{$tag};
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag);
if ($tagInfo) {
undef $tagInfo if $$tagInfo{NoProcess};
} elsif ($embedded and $tag =~ /^(.*?)(\d+)$/ and
$$tagTablePtr{$1} and (ref $val ne 'SCALAR' or not $fetched{$$val}))
{
my ($name, $num) = ($1, $2);
$tagInfo = $et->GetTagInfo($tagTablePtr, $name);
if (ref $tagInfo eq 'HASH' and $$tagInfo{JoinStreams}) {
$fetched{$$val} = 1;
my $obj = FetchObject($et, $$val, $xref, $tag);
$join{$name} = [] unless $join{$name};
next unless ref $obj eq 'HASH' and $$obj{_stream};
# save all the stream data to join later
DecodeStream($et, $obj);
$join{$name}->[$num] = $$obj{_stream};
undef $tagInfo; # don't process
} else {
$isSubDoc = 1; # treat as a sub-document
}
}
if ($verbose) {
my ($val2, $extra);
if (ref $val eq 'SCALAR') {
$extra = ", indirect object ($$val)";
if ($fetched{$$val}) {
$val2 = "ref($$val)";
} elsif ($tag eq 'Next' and not $next) {
# handle 'Next' links after all others
$next = 0;
next;
} else {
$fetched{$$val} = 1;
$val = FetchObject($et, $$val, $xref, $tag);
unless (defined $val) {
my $str;
if (defined $lastOffset) {
$val2 = '<free>';
$str = 'Object was freed';
} else {
$val2 = '<err>';
$str = 'Error reading object';
}
$et->VPrint(0, "$$et{INDENT}${str}:\n");
}
}
} elsif (ref $val eq 'HASH') {
$extra = ', direct dictionary';
} elsif (ref $val eq 'ARRAY') {
$extra = ', direct array of ' . scalar(@$val) . ' objects';
} else {
$extra = ', direct object';
}
my $isSubdir;
if (ref $val eq 'HASH') {
$isSubdir = 1;
} elsif (ref $val eq 'ARRAY') {
# recurse into objects in arrays only if they are lists of
# dictionaries or indirect objects which could be dictionaries
$isSubdir = 1 if @$val;
foreach (@$val) {
next if ref $_ eq 'HASH' or ref $_ eq 'SCALAR';
undef $isSubdir;
last;
}
}
if ($isSubdir) {
# create bogus subdirectory to recurse into this dict
$tagInfo or $tagInfo = {
Name => $tag,
SubDirectory => { TagTable => 'Image::ExifTool::PDF::Unknown' },
};
} else {
$val2 = ExpandArray($val) if ref $val eq 'ARRAY';
# generate tag info if we will use it later
if (not $tagInfo and defined $val and $unknown) {
$tagInfo = NewPDFTag($tagTablePtr, $tag);
}
}
$et->VerboseInfo($tag, $tagInfo,
Value => $val2 || $val,
Extra => $extra,
Index => $index++,
);
next unless defined $val;
}
unless ($tagInfo) {
# add any tag found in Info dictionary to table
next unless $unknown;
$tagInfo = NewPDFTag($tagTablePtr, $tag);
}
# increment document number if necessary
my ($oldDocNum, $oldNumTags);
if ($isSubDoc) {
$oldDocNum = $$et{DOC_NUM};
$oldNumTags = $$et{NUM_FOUND};
$$et{DOC_NUM} = ++$$et{DOC_COUNT};
}
if ($$tagInfo{SubDirectory}) {
# process the subdirectory
my @subDicts;
if (ref $val eq 'ARRAY') {
# hack to convert array to dictionary if necessary
if ($$tagInfo{ConvertToDict} and @$val == 2 and not ref $$val[0]) {
my $tg = $$val[0];
$tg =~ s(^/)(); # remove name
my %dict = ( _tags => [ $tg ], $tg => $$val[1] );
@subDicts = ( \%dict );
} else {
@subDicts = @{$val};
}
} else {
@subDicts = ( $val );
}
# loop through all values of this tag
for (;;) {
my $subDict = shift @subDicts or last;
# save last fetched object in case we fetch another one here
my $prevFetched = $lastFetched;
if (ref $subDict eq 'SCALAR') {
# only fetch once (other copies are obsolete)
next if $fetched{$$subDict};
if ($$tagInfo{IgnoreDuplicates}) {
my $flag = "ProcessedPDF_$tag";
if ($$et{$flag}) {
next if $et->WarnOnce("Ignored duplicate $tag dictionary", 2);
} else {
$$et{$flag} = 1;
}
}
# load dictionary via an indirect reference
$fetched{$$subDict} = 1;
my $obj = FetchObject($et, $$subDict, $xref, $tag);
unless (defined $obj) {
unless (defined $lastOffset) {
$et->Warn("Error reading $tag object ($$subDict)");
}
next;
}
$subDict = $obj;
}
if (ref $subDict eq 'ARRAY') {
# convert array of key/value pairs to a hash
next if @$subDict < 2;
my %hash = ( _tags => [] );
while (@$subDict >= 2) {
my $key = shift @$subDict;
$key =~ s/^\///;
push @{$hash{_tags}}, $key;
$hash{$key} = shift @$subDict;
}
$subDict = \%hash;
} else {
next unless ref $subDict eq 'HASH';
}
# set flag to re-crypt all strings when rewriting if the dictionary
# came from an encrypted stream
$$subDict{_needCrypt}{'*'} = 1 unless $lastFetched;
my $subTablePtr = GetTagTable($tagInfo->{SubDirectory}->{TagTable});
if (not $verbose) {
my $proc = $$subTablePtr{PROCESS_PROC} || \&ProcessDict;
&$proc($et, $subTablePtr, $subDict, $xref, $nesting);
} elsif ($next) {
# handle 'Next' links at this level to avoid deep recursion
undef $next;
$index = 0;
$tagTablePtr = $subTablePtr;
$dict = $subDict;
@tags = @{$$subDict{_tags}};
$et->VerboseDir($tag, scalar(@tags));
} else {
my $oldIndent = $$et{INDENT};
my $oldDir = $$et{DIR_NAME};
$$et{INDENT} .= '| ';
$$et{DIR_NAME} = $tag;
$et->VerboseDir($tag, scalar(@{$$subDict{_tags}}));
ProcessDict($et, $subTablePtr, $subDict, $xref, $nesting);
$$et{INDENT} = $oldIndent;
$$et{DIR_NAME} = $oldDir;
}
$lastFetched = $prevFetched;
}
} else {
# fetch object if necessary
# (OS X 10.6 writes indirect objects in the Info dictionary!)
if (ref $val eq 'SCALAR') {
my $prevFetched = $lastFetched;
# (note: fetching the same object multiple times is OK here)
$val = FetchObject($et, $$val, $xref, $tag);
if (defined $val) {
$val = ReadPDFValue($val);
# set flag to re-encrypt if necessary if rewritten
$$dict{_needCrypt}{$tag} = ($lastFetched ? 0 : 1) if $cryptString;
$lastFetched = $prevFetched; # restore last fetched object reference
}
} else {
$val = ReadPDFValue($val);
}
# convert from UTF-16 (big endian) to UTF-8 or Latin if necessary
# unless this is binary data (hex-encoded strings would not have been converted)
if (ref $val) {
if (ref $val eq 'ARRAY') {
my $v;
foreach $v (@$val) {
$et->FoundTag($tagInfo, $v);
}
}
} elsif (defined $val) {
my $format = $$tagInfo{Format} || $$tagInfo{Writable} || 'string';
$val = ConvertPDFDate($val) if $format eq 'date';
if (not $$tagInfo{Binary} and $val =~ /[\x18-\x1f\x80-\xff]/) {
# text string is already in Unicode if it starts with "\xfe\xff",
# otherwise we must first convert from PDFDocEncoding
$val = $et->Decode($val, ($val=~s/^\xfe\xff// ? 'UCS2' : 'PDFDoc'), 'MM');
}
if ($$tagInfo{List} and not $$et{OPTIONS}{NoPDFList}) {
# separate tokens in comma or whitespace delimited lists
my @values = ($val =~ /,/) ? split /,+\s*/, $val : split ' ', $val;
foreach $val (@values) {
$et->FoundTag($tagInfo, $val);
}
} else {
# a simple tag value
$et->FoundTag($tagInfo, $val);
}
}
}
if ($isSubDoc) {
# restore original document number
$$et{DOC_NUM} = $oldDocNum;
--$$et{DOC_COUNT} if $oldNumTags == $$et{NUM_FOUND};
}
}
#
# extract information from joined streams if necessary
#
if (%join) {
my ($tag, $i);
foreach $tag (sort keys %join) {
my $list = $join{$tag};
last unless defined $$list[1] and $$list[1] =~ /^%.*?([\x0d\x0a]*)/;
my $buff = "%!PS-Adobe-3.0$1"; # add PS header with same line break
for ($i=1; defined $$list[$i]; ++$i) {
$buff .= $$list[$i];
undef $$list[$i]; # free memory
}
# increment document number for tags extracted from embedded EPS
my $oldDocNum = $$et{DOC_NUM};
my $oldNumTags = $$et{NUM_FOUND};
$$et{DOC_NUM} = ++$$et{DOC_COUNT};
# extract PostScript information
$et->HandleTag($tagTablePtr, $tag, $buff);
$$et{DOC_NUM} = $oldDocNum;
# revert document counter if we didn't add any new tags
--$$et{DOC_COUNT} if $oldNumTags == $$et{NUM_FOUND};
delete $$et{DOC_NUM};
}
}
#
# extract information from stream object if it exists (eg. Metadata stream)
#
for (;;) { # (cheap goto)
last unless $$dict{_stream};
my $tag = '_stream';
# add Subtype (if it exists) to stream name and remove leading '/'
($tag = $$dict{Subtype} . $tag) =~ s/^\/// if $$dict{Subtype};
last unless $$tagTablePtr{$tag};
my $tagInfo = $et->GetTagInfo($tagTablePtr, $tag) or last;
unless ($$tagInfo{SubDirectory}) {
# don't build filter lists across different images
delete $$et{LIST_TAGS}{$$tagTablePtr{Filter}};
# we arrive here only when extracting embedded images
# - only extract known image types and ignore others
my $filter = $$dict{Filter} || '';
$filter = @$filter[-1] if ref $filter eq 'ARRAY'; # (get last Filter type)
my $result;
if ($filter eq '/DCTDecode' or $filter eq '/JPXDecode') {
DecodeStream($et, $dict) or last;
# save the image itself
$et->FoundTag($tagInfo, \$$dict{_stream});
# extract information from embedded image
$result = $et->ExtractInfo(\$$dict{_stream}, { ReEntry => 1 });
}
unless ($result) {
$et->FoundTag('FileType', defined $result ? '(unknown)' : '(unsupported)');
}
last;
}
# decode stream if necessary
DecodeStream($et, $dict) or last;
if ($verbose > 2) {
$et->VPrint(2,"$$et{INDENT}$$et{DIR_NAME} stream data\n");
$et->VerboseDump(\$$dict{_stream});
}
# extract information from stream
my %dirInfo = (
DataPt => \$$dict{_stream},
DataLen => length $$dict{_stream},
DirStart => 0,
DirLen => length $$dict{_stream},
Parent => 'PDF',
);
my $subTablePtr = GetTagTable($tagInfo->{SubDirectory}->{TagTable});
unless ($et->ProcessDirectory(\%dirInfo, $subTablePtr)) {
$et->Warn("Error processing $$tagInfo{Name} information");
}
last;
}
}
#------------------------------------------------------------------------------
# Extract information from PDF file
# Inputs: 0) ExifTool object reference, 1) dirInfo reference
# Returns: 0 if not a PDF file, 1 on success, otherwise a negative error number
sub ReadPDF($$)
{
my ($et, $dirInfo) = @_;
my $raf = $$dirInfo{RAF};
my $verbose = $et->Options('Verbose');
my ($buff, $encrypt, $id);
#
# validate PDF file
#
# (linearization dictionary must be in the first 1024 bytes of the file)
$raf->Read($buff, 1024) >= 8 or return 0;
$buff =~ /^(\s*)%PDF-(\d+\.\d+)/ or return 0;
$$et{PDFBase} = length $1 and $et->Warn('PDF header is not at start of file',1);
$pdfVer = $2;
$et->SetFileType(); # set the FileType tag
$et->Warn("The PDF $pdfVer specification is held hostage by the ISO") if $pdfVer >= 2.0;
# store PDFVersion tag
my $tagTablePtr = GetTagTable('Image::ExifTool::PDF::Root');
$et->HandleTag($tagTablePtr, 'Version', $pdfVer);
$tagTablePtr = GetTagTable('Image::ExifTool::PDF::Main');
#
# check for a linearized PDF (only if reading)
#
my $capture = $$et{PDF_CAPTURE};
unless ($capture) {
my $lin = 'false';
if ($buff =~ /<</g) {
$buff = substr($buff, pos($buff) - 2);
my $dict = ExtractObject($et, \$buff);
if (ref $dict eq 'HASH' and $$dict{Linearized} and $$dict{L}) {
if (not $$et{VALUE}{FileSize}) {
undef $lin; # can't determine if it is linearized
} elsif ($$dict{L} == $$et{VALUE}{FileSize} - $$et{PDFBase}) {
$lin = 'true';
}
}
}
$et->HandleTag($tagTablePtr, '_linearized', $lin) if $lin;
}
#
# read the xref tables referenced from startxref at the end of the file
#
my @xrefOffsets;
$raf->Seek(0, 2) or return -2;
# the %%EOF must occur within the last 1024 bytes of the file (PDF spec, appendix H)
my $len = $raf->Tell();
$len = 1024 if $len > 1024;
$raf->Seek(-$len, 2) or return -2;
$raf->Read($buff, $len) == $len or return -3;
# find the LAST xref table in the file (may be multiple %%EOF marks,
# and comments between "startxref" and "%%EOF")
$buff =~ /^.*startxref(\s+)(\d+)(\s+)(%[^\x0d\x0a]*\s+)*%%EOF/s or return -4;
my $ws = $1 . $3;
my $xr = $2;
push @xrefOffsets, $xr, 'Main';
# set input record separator
local $/ = $ws =~ /(\x0d\x0a|\x0d|\x0a)/ ? $1 : "\x0a";
my (%xref, @mainDicts, %loaded, $mainFree);
# initialize variables to capture when rewriting
if ($capture) {
$capture->{startxref} = $xr;
$capture->{xref} = \%xref;
$capture->{newline} = $/;
$capture->{mainFree} = $mainFree = { };
}
XRef:
while (@xrefOffsets) {
my $offset = shift @xrefOffsets;
my $type = shift @xrefOffsets;
next if $loaded{$offset}; # avoid infinite recursion
unless ($raf->Seek($offset+$$et{PDFBase}, 0)) {
%loaded or return -5;
$et->Warn('Bad offset for secondary xref table');
next;
}
# Note: care must be taken because ReadLine may read more than we want if
# the newline sequence for this table is different than the rest of the file
for (;;) {
unless ($raf->ReadLine($buff)) {
%loaded or return -6;
$et->Warn('Bad offset for secondary xref table');
next XRef;
}
last if $buff =~/\S/; # skip blank lines
}
my $loadXRefStream;
if ($buff =~ s/^\s*xref\s+//s) {
# load xref table
for (;;) {
# read another line if necessary (skipping blank lines)
$raf->ReadLine($buff) or return -6 until $buff =~ /\S/;
last if $buff =~ s/^\s*trailer([\s<[(])/$1/s;
$buff =~ s/^\s*(\d+)\s+(\d+)\s+//s or return -4;
my ($start, $num) = ($1, $2);
$raf->Seek(-length($buff), 1) or return -4;
my $i;
for ($i=0; $i<$num; ++$i) {
$raf->Read($buff, 20) == 20 or return -6;
$buff =~ /^\s*(\d{10}) (\d{5}) (f|n)/s or return -4;
my $num = $start + $i;
# locate object to generate entry from stream if necessary
# (must do this before we test $xref{$num})
LocateAnyObject(\%xref, $num) if $xref{dicts};
# save offset for newest copy of all objects
# (or next object number for free objects)
unless (defined $xref{$num}) {
my ($offset, $gen) = (int($1), int($2));
$xref{$num} = $offset;
if ($3 eq 'f') {
# save free objects in last xref table for rewriting
$$mainFree{$num} = [ $offset, $gen, 'f' ] if $mainFree;
next;
}
# also save offset keyed by object reference string
$xref{"$num $gen R"} = $offset;
}
}
# (I have a sample from Adobe which has an empty xref table)
# %xref or return -4; # xref table may not be empty
$buff = '';
}
undef $mainFree; # only do this for the last xref table
} elsif ($buff =~ s/^\s*(\d+)\s+(\d+)\s+obj//s) {
# this is a PDF-1.5 cross-reference stream dictionary
$loadXRefStream = 1;
} else {
%loaded or return -4;
$et->Warn('Invalid secondary xref table');
next;
}
my $mainDict = ExtractObject($et, \$buff, $raf, \%xref);
unless (ref $mainDict eq 'HASH') {
%loaded or return -8;
$et->Warn('Error loading secondary dictionary');
next;
}
if ($loadXRefStream) {
# decode and save our XRef stream from PDF-1.5 file
# (but parse it later as required to save time)
# Note: this technique can potentially result in an old object
# being used if the file was incrementally updated and an older
# object from an xref table was replaced by a newer object in an
# xref stream. But doing so isn't a good idea (if allowed at all)
# because a PDF 1.4 consumer would also make this same mistake.
if ($$mainDict{Type} eq '/XRef' and $$mainDict{W} and
@{$$mainDict{W}} > 2 and $$mainDict{Size} and
DecodeStream($et, $mainDict))
{
# create Index entry if it doesn't exist
$$mainDict{Index} or $$mainDict{Index} = [ 0, $$mainDict{Size} ];
# create '_entry_size' entry for internal use
my $w = $$mainDict{W};
my $size = 0;
foreach (@$w) { $size += $_; }
$$mainDict{_entry_size} = $size;
# save this stream dictionary to use later if required
$xref{dicts} = [] unless $xref{dicts};
push @{$xref{dicts}}, $mainDict;
} else {
%loaded or return -9;
$et->Warn('Invalid xref stream in secondary dictionary');
}
}
$loaded{$offset} = 1;
# load XRef stream in hybrid file if it exists
push @xrefOffsets, $$mainDict{XRefStm}, 'XRefStm' if $$mainDict{XRefStm};
$encrypt = $$mainDict{Encrypt} if $$mainDict{Encrypt};
if ($$mainDict{ID} and ref $$mainDict{ID} eq 'ARRAY') {
$id = ReadPDFValue($mainDict->{ID}->[0]);
}
push @mainDicts, $mainDict, $type;
# load previous xref table if it exists
push @xrefOffsets, $$mainDict{Prev}, 'Prev' if $$mainDict{Prev};
}
#
# extract encryption information if necessary
#
if ($encrypt) {
if (ref $encrypt eq 'SCALAR') {
$encrypt = FetchObject($et, $$encrypt, \%xref, 'Encrypt');
}
# generate Encryption tag information
my $err = DecryptInit($et, $encrypt, $id);
if ($err) {
$et->Warn($err);
$$capture{Error} = $err if $capture;
return -1;
}
}
#
# extract the information beginning with each of the main dictionaries
#
my $i = 0;
my $num = (scalar @mainDicts) / 2;
while (@mainDicts) {
my $dict = shift @mainDicts;
my $type = shift @mainDicts;
if ($verbose) {
++$i;
my $n = scalar(@{$$dict{_tags}});
$et->VPrint(0, "PDF dictionary ($i of $num) with $n entries:\n");
}
ProcessDict($et, $tagTablePtr, $dict, \%xref, 0, $type);
}
# handle any decryption errors
if ($encrypt) {
my $err = $$encrypt{_error};
if ($err) {
$et->Warn($err);
$$capture{Error} = $err if $capture;
return -1;
}
}
return 1;
}
#------------------------------------------------------------------------------
# ReadPDF() warning strings for each error return value
my %pdfWarning = (
# -1 is reserved as error return value with no associated warning
-2 => 'Error seeking in file',
-3 => 'Error reading file',
-4 => 'Invalid xref table',
-5 => 'Invalid xref offset',
-6 => 'Error reading xref table',
-7 => 'Error reading trailer',
-8 => 'Error reading main dictionary',
-9 => 'Invalid xref stream in main dictionary',
);
#------------------------------------------------------------------------------
# Extract information from PDF file
# Inputs: 0) ExifTool object reference, 1) dirInfo reference
# Returns: 1 if this was a valid PDF file
sub ProcessPDF($$)
{
my ($et, $dirInfo) = @_;
undef $cryptInfo; # (must not delete after returning so writer can use it)
undef $cryptStream;
undef $cryptString;
my $result = ReadPDF($et, $dirInfo);
if ($result < 0) {
$et->Warn($pdfWarning{$result}) if $pdfWarning{$result};
$result = 1;
}
# clean up and return
undef %streamObjs;
undef %fetched;
return $result;
}
1; # end
__END__
=head1 NAME
Image::ExifTool::PDF - Read PDF meta information
=head1 SYNOPSIS
This module is loaded automatically by Image::ExifTool when required.
=head1 DESCRIPTION
This code reads meta information from PDF (Adobe Portable Document Format)
files. It supports object streams introduced in PDF-1.5 but only with a
limited set of Filter and Predictor algorithms, however all standard
encryption methods through PDF-1.7 extension level 3 are supported,
including AESV2 (AES-128) and AESV3 (AES-256).
=head1 AUTHOR
Copyright 2003-2020, Phil Harvey (philharvey66 at gmail.com)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 REFERENCES
=over 4
=item L<http://partners.adobe.com/public/developer/pdf/index_reference.html>
=item L<Crypt::RC4|Crypt::RC4>
=item L<http://www.adobe.com/devnet/acrobat/pdfs/PDF32000_2008.pdf>
=item L<http://www.adobe.com/content/dam/Adobe/en/devnet/pdf/pdfs/adobe_supplement_iso32000.pdf>
=item L<http://tools.ietf.org/search/rfc3454>
=item L<http://www.armware.dk/RFC/rfc/rfc4013.html>
=back
=head1 SEE ALSO
L<Image::ExifTool::TagNames/PDF Tags>,
L<Image::ExifTool(3pm)|Image::ExifTool>
=cut
| mkjanke/Focus-Points | focuspoints.lrdevplugin/bin/exiftool/lib/Image/ExifTool/PDF.pm | Perl | apache-2.0 | 92,260 |
package Paws::RDS::ModifyDBSubnetGroupResult;
use Moose;
has DBSubnetGroup => (is => 'ro', isa => 'Paws::RDS::DBSubnetGroup');
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::RDS::ModifyDBSubnetGroupResult
=head1 ATTRIBUTES
=head2 DBSubnetGroup => L<Paws::RDS::DBSubnetGroup>
=head2 _request_id => Str
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/RDS/ModifyDBSubnetGroupResult.pm | Perl | apache-2.0 | 385 |
package Google::Ads::AdWords::v201409::AdGroupAdLabelOperation;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201409::Operation);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %operator_of :ATTR(:get<operator>);
my %Operation__Type_of :ATTR(:get<Operation__Type>);
my %operand_of :ATTR(:get<operand>);
__PACKAGE__->_factory(
[ qw( operator
Operation__Type
operand
) ],
{
'operator' => \%operator_of,
'Operation__Type' => \%Operation__Type_of,
'operand' => \%operand_of,
},
{
'operator' => 'Google::Ads::AdWords::v201409::Operator',
'Operation__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'operand' => 'Google::Ads::AdWords::v201409::AdGroupAdLabel',
},
{
'operator' => 'operator',
'Operation__Type' => 'Operation.Type',
'operand' => 'operand',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::AdGroupAdLabelOperation
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
AdGroupAdLabelOperation from the namespace https://adwords.google.com/api/adwords/cm/v201409.
Operations for adding/removing labels from AdGroupAds.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * operand
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/AdGroupAdLabelOperation.pm | Perl | apache-2.0 | 1,877 |
package Zonemaster::LDNS::RR::DLV;
use strict;
use warnings;
use parent 'Zonemaster::LDNS::RR';
1;
=head1 NAME
Zonemaster::LDNS::RR::DLV - Type DLV record
=head1 DESCRIPTION
A subclass of L<Zonemaster::LDNS::RR>, so it has all the methods of that class available in addition to the ones documented here.
=head1 METHODS
No RDATA methods implemented yet.
=cut
| dotse/net-ldns | lib/Zonemaster/LDNS/RR/DLV.pm | Perl | bsd-2-clause | 368 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.2.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
0600 0604
0606 061B
061E 06DC
06DE 06FF
0750 077F
08A0
08A2 08AC
08E4 08FE
FB50 FBC1
FBD3 FD3D
FD50 FD8F
FD92 FDC7
FDF0 FDFD
FE70 FE74
FE76 FEFC
10E60 10E7E
1EE00 1EE03
1EE05 1EE1F
1EE21 1EE22
1EE24
1EE27
1EE29 1EE32
1EE34 1EE37
1EE39
1EE3B
1EE42
1EE47
1EE49
1EE4B
1EE4D 1EE4F
1EE51 1EE52
1EE54
1EE57
1EE59
1EE5B
1EE5D
1EE5F
1EE61 1EE62
1EE64
1EE67 1EE6A
1EE6C 1EE72
1EE74 1EE77
1EE79 1EE7C
1EE7E
1EE80 1EE89
1EE8B 1EE9B
1EEA1 1EEA3
1EEA5 1EEA9
1EEAB 1EEBB
1EEF0 1EEF1
END
| Bjay1435/capstone | rootfs/usr/share/perl/5.18.2/unicore/lib/Scx/Arab.pl | Perl | mit | 926 |
package Moose::Object;
BEGIN {
$Moose::Object::AUTHORITY = 'cpan:STEVAN';
}
{
$Moose::Object::VERSION = '2.0602';
}
use strict;
use warnings;
use Carp ();
use Devel::GlobalDestruction ();
use MRO::Compat ();
use Scalar::Util ();
use Try::Tiny ();
use if ( not our $__mx_is_compiled ), 'Moose::Meta::Class';
use if ( not our $__mx_is_compiled ), metaclass => 'Moose::Meta::Class';
sub new {
my $class = shift;
my $real_class = Scalar::Util::blessed($class) || $class;
my $params = $real_class->BUILDARGS(@_);
return Class::MOP::Class->initialize($real_class)->new_object($params);
}
sub BUILDARGS {
my $class = shift;
if ( scalar @_ == 1 ) {
unless ( defined $_[0] && ref $_[0] eq 'HASH' ) {
Class::MOP::class_of($class)->throw_error(
"Single parameters to new() must be a HASH ref",
data => $_[0] );
}
return { %{ $_[0] } };
}
elsif ( @_ % 2 ) {
Carp::carp(
"The new() method for $class expects a hash reference or a key/value list."
. " You passed an odd number of arguments" );
return { @_, undef };
}
else {
return {@_};
}
}
sub BUILDALL {
# NOTE: we ask Perl if we even
# need to do this first, to avoid
# extra meta level calls
return unless $_[0]->can('BUILD');
my ($self, $params) = @_;
foreach my $method (reverse Class::MOP::class_of($self)->find_all_methods_by_name('BUILD')) {
$method->{code}->execute($self, $params);
}
}
sub DEMOLISHALL {
my $self = shift;
my ($in_global_destruction) = @_;
# NOTE: we ask Perl if we even
# need to do this first, to avoid
# extra meta level calls
return unless $self->can('DEMOLISH');
my @isa;
if ( my $meta = Class::MOP::class_of($self ) ) {
@isa = $meta->linearized_isa;
} else {
# We cannot count on being able to retrieve a previously made
# metaclass, _or_ being able to make a new one during global
# destruction. However, we should still be able to use mro at
# that time (at least tests suggest so ;)
my $class_name = ref $self;
@isa = @{ mro::get_linear_isa($class_name) }
}
foreach my $class (@isa) {
no strict 'refs';
my $demolish = *{"${class}::DEMOLISH"}{CODE};
$self->$demolish($in_global_destruction)
if defined $demolish;
}
}
sub DESTROY {
my $self = shift;
local $?;
Try::Tiny::try {
$self->DEMOLISHALL(Devel::GlobalDestruction::in_global_destruction);
}
Try::Tiny::catch {
die $_;
};
return;
}
# support for UNIVERSAL::DOES ...
BEGIN {
my $does = UNIVERSAL->can("DOES") ? "SUPER::DOES" : "isa";
eval 'sub DOES {
my ( $self, $class_or_role_name ) = @_;
return $self->'.$does.'($class_or_role_name)
|| $self->does($class_or_role_name);
}';
}
# new does() methods will be created
# as appropiate see Moose::Meta::Role
sub does {
my ($self, $role_name) = @_;
my $class = Scalar::Util::blessed($self) || $self;
my $meta = Class::MOP::Class->initialize($class);
(defined $role_name)
|| $meta->throw_error("You must supply a role name to does()");
return 1 if $meta->can('does_role') && $meta->does_role($role_name);
return 0;
}
sub dump {
my $self = shift;
require Data::Dumper;
local $Data::Dumper::Maxdepth = shift if @_;
Data::Dumper::Dumper $self;
}
1;
# ABSTRACT: The base object for Moose
=pod
=head1 NAME
Moose::Object - The base object for Moose
=head1 VERSION
version 2.0602
=head1 DESCRIPTION
This class is the default base class for all Moose-using classes. When
you C<use Moose> in this class, your class will inherit from this
class.
It provides a default constructor and destructor, which run all of the
C<BUILD> and C<DEMOLISH> methods in the inheritance hierarchy,
respectively.
You don't actually I<need> to inherit from this in order to use Moose,
but it makes it easier to take advantage of all of Moose's features.
=head1 METHODS
=over 4
=item B<< Moose::Object->new(%params|$params) >>
This method calls C<< $class->BUILDARGS(@_) >>, and then creates a new
instance of the appropriate class. Once the instance is created, it
calls C<< $instance->BUILD($params) >> for each C<BUILD> method in the
inheritance hierarchy.
=item B<< Moose::Object->BUILDARGS(%params|$params) >>
The default implementation of this method accepts a hash or hash
reference of named parameters. If it receives a single argument that
I<isn't> a hash reference it throws an error.
You can override this method in your class to handle other types of
options passed to the constructor.
This method should always return a hash reference of named options.
=item B<< $object->does($role_name) >>
This returns true if the object does the given role.
=item B<< $object->DOES($class_or_role_name) >>
This is a a Moose role-aware implementation of L<UNIVERSAL/DOES>.
This is effectively the same as writing:
$object->does($name) || $object->isa($name)
This method will work with Perl 5.8, which did not implement
C<UNIVERSAL::DOES>.
=item B<< $object->dump($maxdepth) >>
This is a handy utility for C<Data::Dumper>ing an object. By default,
the maximum depth is 1, to avoid making a mess.
=item B<< $object->DESTROY >>
A default destructor is provided, which calls
C<< $instance->DEMOLISH($in_global_destruction) >> for each C<DEMOLISH>
method in the inheritance hierarchy.
=back
=head1 BUGS
See L<Moose/BUGS> for details on reporting bugs.
=head1 AUTHOR
Moose is maintained by the Moose Cabal, along with the help of many contributors. See L<Moose/CABAL> and L<Moose/CONTRIBUTORS> for details.
=head1 COPYRIGHT AND LICENSE
This software is copyright (c) 2012 by Infinity Interactive, Inc..
This is free software; you can redistribute it and/or modify it under
the same terms as the Perl 5 programming language system itself.
=cut
__END__
| leighpauls/k2cro4 | third_party/perl/perl/vendor/lib/Moose/Object.pm | Perl | bsd-3-clause | 6,025 |
package Module::Build;
# This module doesn't do much of anything itself, it inherits from the
# modules that do the real work. The only real thing it has to do is
# figure out which OS-specific module to pull in. Many of the
# OS-specific modules don't do anything either - most of the work is
# done in Module::Build::Base.
use strict;
use File::Spec ();
use File::Path ();
use File::Basename ();
use Perl::OSType ();
use Module::Build::Base;
use vars qw($VERSION @ISA);
@ISA = qw(Module::Build::Base);
$VERSION = '0.39_01';
$VERSION = eval $VERSION;
# Inserts the given module into the @ISA hierarchy between
# Module::Build and its immediate parent
sub _interpose_module {
my ($self, $mod) = @_;
eval "use $mod";
die $@ if $@;
no strict 'refs';
my $top_class = $mod;
while (@{"${top_class}::ISA"}) {
last if ${"${top_class}::ISA"}[0] eq $ISA[0];
$top_class = ${"${top_class}::ISA"}[0];
}
@{"${top_class}::ISA"} = @ISA;
@ISA = ($mod);
}
if (grep {-e File::Spec->catfile($_, qw(Module Build Platform), $^O) . '.pm'} @INC) {
__PACKAGE__->_interpose_module("Module::Build::Platform::$^O");
} elsif ( my $ostype = os_type() ) {
__PACKAGE__->_interpose_module("Module::Build::Platform::$ostype");
} else {
warn "Unknown OS type '$^O' - using default settings\n";
}
sub os_type { return Perl::OSType::os_type() }
sub is_vmsish { return Perl::OSType::is_os_type('VMS') }
sub is_windowsish { return Perl::OSType::is_os_type('Windows') }
sub is_unixish { return Perl::OSType::is_os_type('Unix') }
1;
__END__
=for :stopwords
bindoc binhtml destdir distcheck distclean distdir distmeta distsign disttest
fakeinstall html installdirs installsitebin installsitescript installvendorbin
installvendorscript libdoc libhtml pardist ppd ppmdist realclean skipcheck
testall testcover testdb testpod testpodcoverage versioninstall
=head1 NAME
Module::Build - Build and install Perl modules
=head1 SYNOPSIS
Standard process for building & installing modules:
perl Build.PL
./Build
./Build test
./Build install
Or, if you're on a platform (like DOS or Windows) that doesn't require
the "./" notation, you can do this:
perl Build.PL
Build
Build test
Build install
=head1 DESCRIPTION
C<Module::Build> is a system for building, testing, and installing
Perl modules. It is meant to be an alternative to
C<ExtUtils::MakeMaker>. Developers may alter the behavior of the
module through subclassing in a much more straightforward way than
with C<MakeMaker>. It also does not require a C<make> on your system
- most of the C<Module::Build> code is pure-perl and written in a very
cross-platform way. In fact, you don't even need a shell, so even
platforms like MacOS (traditional) can use it fairly easily. Its only
prerequisites are modules that are included with perl 5.6.0, and it
works fine on perl 5.005 if you can install a few additional modules.
See L<"MOTIVATIONS"> for more comparisons between C<ExtUtils::MakeMaker>
and C<Module::Build>.
To install C<Module::Build>, and any other module that uses
C<Module::Build> for its installation process, do the following:
perl Build.PL # 'Build.PL' script creates the 'Build' script
./Build # Need ./ to ensure we're using this "Build" script
./Build test # and not another one that happens to be in the PATH
./Build install
This illustrates initial configuration and the running of three
'actions'. In this case the actions run are 'build' (the default
action), 'test', and 'install'. Other actions defined so far include:
build manifest
clean manifest_skip
code manpages
config_data pardist
diff ppd
dist ppmdist
distcheck prereq_data
distclean prereq_report
distdir pure_install
distinstall realclean
distmeta retest
distsign skipcheck
disttest test
docs testall
fakeinstall testcover
help testdb
html testpod
install testpodcoverage
installdeps versioninstall
You can run the 'help' action for a complete list of actions.
=head1 GUIDE TO DOCUMENTATION
The documentation for C<Module::Build> is broken up into sections:
=over
=item General Usage (L<Module::Build>)
This is the document you are currently reading. It describes basic
usage and background information. Its main purpose is to assist the
user who wants to learn how to invoke and control C<Module::Build>
scripts at the command line.
=item Authoring Reference (L<Module::Build::Authoring>)
This document describes the structure and organization of
C<Module::Build>, and the relevant concepts needed by authors who are
writing F<Build.PL> scripts for a distribution or controlling
C<Module::Build> processes programmatically.
=item API Reference (L<Module::Build::API>)
This is a reference to the C<Module::Build> API.
=item Cookbook (L<Module::Build::Cookbook>)
This document demonstrates how to accomplish many common tasks. It
covers general command line usage and authoring of F<Build.PL>
scripts. Includes working examples.
=back
=head1 ACTIONS
There are some general principles at work here. First, each task when
building a module is called an "action". These actions are listed
above; they correspond to the building, testing, installing,
packaging, etc., tasks.
Second, arguments are processed in a very systematic way. Arguments
are always key=value pairs. They may be specified at C<perl Build.PL>
time (i.e. C<perl Build.PL destdir=/my/secret/place>), in which case
their values last for the lifetime of the C<Build> script. They may
also be specified when executing a particular action (i.e.
C<Build test verbose=1>), in which case their values last only for the
lifetime of that command. Per-action command line parameters take
precedence over parameters specified at C<perl Build.PL> time.
The build process also relies heavily on the C<Config.pm> module.
If the user wishes to override any of the
values in C<Config.pm>, she may specify them like so:
perl Build.PL --config cc=gcc --config ld=gcc
The following build actions are provided by default.
=over 4
=item build
[version 0.01]
If you run the C<Build> script without any arguments, it runs the
C<build> action, which in turn runs the C<code> and C<docs> actions.
This is analogous to the C<MakeMaker> I<make all> target.
=item clean
[version 0.01]
This action will clean up any files that the build process may have
created, including the C<blib/> directory (but not including the
C<_build/> directory and the C<Build> script itself).
=item code
[version 0.20]
This action builds your code base.
By default it just creates a C<blib/> directory and copies any C<.pm>
and C<.pod> files from your C<lib/> directory into the C<blib/>
directory. It also compiles any C<.xs> files from C<lib/> and places
them in C<blib/>. Of course, you need a working C compiler (probably
the same one that built perl itself) for the compilation to work
properly.
The C<code> action also runs any C<.PL> files in your F<lib/>
directory. Typically these create other files, named the same but
without the C<.PL> ending. For example, a file F<lib/Foo/Bar.pm.PL>
could create the file F<lib/Foo/Bar.pm>. The C<.PL> files are
processed first, so any C<.pm> files (or other kinds that we deal
with) will get copied correctly.
=item config_data
[version 0.26]
...
=item diff
[version 0.14]
This action will compare the files about to be installed with their
installed counterparts. For .pm and .pod files, a diff will be shown
(this currently requires a 'diff' program to be in your PATH). For
other files like compiled binary files, we simply report whether they
differ.
A C<flags> parameter may be passed to the action, which will be passed
to the 'diff' program. Consult your 'diff' documentation for the
parameters it will accept - a good one is C<-u>:
./Build diff flags=-u
=item dist
[version 0.02]
This action is helpful for module authors who want to package up their
module for source distribution through a medium like CPAN. It will create a
tarball of the files listed in F<MANIFEST> and compress the tarball using
GZIP compression.
By default, this action will use the C<Archive::Tar> module. However, you can
force it to use binary "tar" and "gzip" executables by supplying an explicit
C<tar> (and optional C<gzip>) parameter:
./Build dist --tar C:\path\to\tar.exe --gzip C:\path\to\zip.exe
=item distcheck
[version 0.05]
Reports which files are in the build directory but not in the
F<MANIFEST> file, and vice versa. (See L<manifest> for details.)
=item distclean
[version 0.05]
Performs the 'realclean' action and then the 'distcheck' action.
=item distdir
[version 0.05]
Creates a "distribution directory" named C<$dist_name-$dist_version>
(if that directory already exists, it will be removed first), then
copies all the files listed in the F<MANIFEST> file to that directory.
This directory is what the distribution tarball is created from.
=item distinstall
[version 0.37]
Performs the 'distdir' action, then switches into that directory and runs a
C<perl Build.PL>, followed by the 'build' and 'install' actions in that
directory. Use PERL_MB_OPT or F<.modulebuildrc> to set options that should be
applied during subprocesses
=item distmeta
[version 0.21]
Creates the F<META.yml> file that describes the distribution.
F<META.yml> is a file containing various bits of I<metadata> about the
distribution. The metadata includes the distribution name, version,
abstract, prerequisites, license, and various other data about the
distribution. This file is created as F<META.yml> in a simplified YAML format.
F<META.yml> file must also be listed in F<MANIFEST> - if it's not, a
warning will be issued.
The current version of the F<META.yml> specification can be found
on CPAN as L<CPAN::Meta::Spec>.
=item distsign
[version 0.16]
Uses C<Module::Signature> to create a SIGNATURE file for your
distribution, and adds the SIGNATURE file to the distribution's
MANIFEST.
=item disttest
[version 0.05]
Performs the 'distdir' action, then switches into that directory and runs a
C<perl Build.PL>, followed by the 'build' and 'test' actions in that directory.
Use PERL_MB_OPT or F<.modulebuildrc> to set options that should be applied
during subprocesses
=item docs
[version 0.20]
This will generate documentation (e.g. Unix man pages and HTML
documents) for any installable items under B<blib/> that
contain POD. If there are no C<bindoc> or C<libdoc> installation
targets defined (as will be the case on systems that don't support
Unix manpages) no action is taken for manpages. If there are no
C<binhtml> or C<libhtml> installation targets defined no action is
taken for HTML documents.
=item fakeinstall
[version 0.02]
This is just like the C<install> action, but it won't actually do
anything, it will just report what it I<would> have done if you had
actually run the C<install> action.
=item help
[version 0.03]
This action will simply print out a message that is meant to help you
use the build process. It will show you a list of available build
actions too.
With an optional argument specifying an action name (e.g. C<Build help
test>), the 'help' action will show you any POD documentation it can
find for that action.
=item html
[version 0.26]
This will generate HTML documentation for any binary or library files
under B<blib/> that contain POD. The HTML documentation will only be
installed if the install paths can be determined from values in
C<Config.pm>. You can also supply or override install paths on the
command line by specifying C<install_path> values for the C<binhtml>
and/or C<libhtml> installation targets.
=item install
[version 0.01]
This action will use C<ExtUtils::Install> to install the files from
C<blib/> into the system. See L<"INSTALL PATHS">
for details about how Module::Build determines where to install
things, and how to influence this process.
If you want the installation process to look around in C<@INC> for
other versions of the stuff you're installing and try to delete it,
you can use the C<uninst> parameter, which tells C<ExtUtils::Install> to
do so:
./Build install uninst=1
This can be a good idea, as it helps prevent multiple versions of a
module from being present on your system, which can be a confusing
situation indeed.
=item installdeps
[version 0.36]
This action will use the C<cpan_client> parameter as a command to install
missing prerequisites. You will be prompted whether to install
optional dependencies.
The C<cpan_client> option defaults to 'cpan' but can be set as an option or in
F<.modulebuildrc>. It must be a shell command that takes a list of modules to
install as arguments (e.g. 'cpanp -i' for CPANPLUS). If the program part is a
relative path (e.g. 'cpan' or 'cpanp'), it will be located relative to the perl
program that executed Build.PL.
/opt/perl/5.8.9/bin/perl Build.PL
./Build installdeps --cpan_client 'cpanp -i'
# installs to 5.8.9
=item manifest
[version 0.05]
This is an action intended for use by module authors, not people
installing modules. It will bring the F<MANIFEST> up to date with the
files currently present in the distribution. You may use a
F<MANIFEST.SKIP> file to exclude certain files or directories from
inclusion in the F<MANIFEST>. F<MANIFEST.SKIP> should contain a bunch
of regular expressions, one per line. If a file in the distribution
directory matches any of the regular expressions, it won't be included
in the F<MANIFEST>.
The following is a reasonable F<MANIFEST.SKIP> starting point, you can
add your own stuff to it:
^_build
^Build$
^blib
~$
\.bak$
^MANIFEST\.SKIP$
CVS
See the L<distcheck> and L<skipcheck> actions if you want to find out
what the C<manifest> action would do, without actually doing anything.
=item manifest_skip
[version 0.3608]
This is an action intended for use by module authors, not people
installing modules. It will generate a boilerplate MANIFEST.SKIP file
if one does not already exist.
=item manpages
[version 0.28]
This will generate man pages for any binary or library files under
B<blib/> that contain POD. The man pages will only be installed if the
install paths can be determined from values in C<Config.pm>. You can
also supply or override install paths by specifying there values on
the command line with the C<bindoc> and C<libdoc> installation
targets.
=item pardist
[version 0.2806]
Generates a PAR binary distribution for use with L<PAR> or L<PAR::Dist>.
It requires that the PAR::Dist module (version 0.17 and up) is
installed on your system.
=item ppd
[version 0.20]
Build a PPD file for your distribution.
This action takes an optional argument C<codebase> which is used in
the generated PPD file to specify the (usually relative) URL of the
distribution. By default, this value is the distribution name without
any path information.
Example:
./Build ppd --codebase "MSWin32-x86-multi-thread/Module-Build-0.21.tar.gz"
=item ppmdist
[version 0.23]
Generates a PPM binary distribution and a PPD description file. This
action also invokes the C<ppd> action, so it can accept the same
C<codebase> argument described under that action.
This uses the same mechanism as the C<dist> action to tar & zip its
output, so you can supply C<tar> and/or C<gzip> parameters to affect
the result.
=item prereq_data
[version 0.32]
This action prints out a Perl data structure of all prerequisites and the versions
required. The output can be loaded again using C<eval()>. This can be useful for
external tools that wish to query a Build script for prerequisites.
=item prereq_report
[version 0.28]
This action prints out a list of all prerequisites, the versions required, and
the versions actually installed. This can be useful for reviewing the
configuration of your system prior to a build, or when compiling data to send
for a bug report.
=item pure_install
[version 0.28]
This action is identical to the C<install> action. In the future,
though, when C<install> starts writing to the file
F<$(INSTALLARCHLIB)/perllocal.pod>, C<pure_install> won't, and that
will be the only difference between them.
=item realclean
[version 0.01]
This action is just like the C<clean> action, but also removes the
C<_build> directory and the C<Build> script. If you run the
C<realclean> action, you are essentially starting over, so you will
have to re-create the C<Build> script again.
=item retest
[version 0.2806]
This is just like the C<test> action, but doesn't actually build the
distribution first, and doesn't add F<blib/> to the load path, and
therefore will test against a I<previously> installed version of the
distribution. This can be used to verify that a certain installed
distribution still works, or to see whether newer versions of a
distribution still pass the old regression tests, and so on.
=item skipcheck
[version 0.05]
Reports which files are skipped due to the entries in the
F<MANIFEST.SKIP> file (See L<manifest> for details)
=item test
[version 0.01]
This will use C<Test::Harness> or C<TAP::Harness> to run any regression
tests and report their results. Tests can be defined in the standard
places: a file called C<test.pl> in the top-level directory, or several
files ending with C<.t> in a C<t/> directory.
If you want tests to be 'verbose', i.e. show details of test execution
rather than just summary information, pass the argument C<verbose=1>.
If you want to run tests under the perl debugger, pass the argument
C<debugger=1>.
If you want to have Module::Build find test files with different file
name extensions, pass the C<test_file_exts> argument with an array
of extensions, such as C<[qw( .t .s .z )]>.
If you want test to be run by C<TAP::Harness>, rather than C<Test::Harness>,
pass the argument C<tap_harness_args> as an array reference of arguments to
pass to the TAP::Harness constructor.
In addition, if a file called C<visual.pl> exists in the top-level
directory, this file will be executed as a Perl script and its output
will be shown to the user. This is a good place to put speed tests or
other tests that don't use the C<Test::Harness> format for output.
To override the choice of tests to run, you may pass a C<test_files>
argument whose value is a whitespace-separated list of test scripts to
run. This is especially useful in development, when you only want to
run a single test to see whether you've squashed a certain bug yet:
./Build test --test_files t/something_failing.t
You may also pass several C<test_files> arguments separately:
./Build test --test_files t/one.t --test_files t/two.t
or use a C<glob()>-style pattern:
./Build test --test_files 't/01-*.t'
=item testall
[version 0.2807]
[Note: the 'testall' action and the code snippets below are currently
in alpha stage, see
L<"http://www.nntp.perl.org/group/perl.module.build/2007/03/msg584.html"> ]
Runs the C<test> action plus each of the C<test$type> actions defined by
the keys of the C<test_types> parameter.
Currently, you need to define the ACTION_test$type method yourself and
enumerate them in the test_types parameter.
my $mb = Module::Build->subclass(
code => q(
sub ACTION_testspecial { shift->generic_test(type => 'special'); }
sub ACTION_testauthor { shift->generic_test(type => 'author'); }
)
)->new(
...
test_types => {
special => '.st',
author => ['.at', '.pt' ],
},
...
=item testcover
[version 0.26]
Runs the C<test> action using C<Devel::Cover>, generating a
code-coverage report showing which parts of the code were actually
exercised during the tests.
To pass options to C<Devel::Cover>, set the C<$DEVEL_COVER_OPTIONS>
environment variable:
DEVEL_COVER_OPTIONS=-ignore,Build ./Build testcover
=item testdb
[version 0.05]
This is a synonym for the 'test' action with the C<debugger=1>
argument.
=item testpod
[version 0.25]
This checks all the files described in the C<docs> action and
produces C<Test::Harness>-style output. If you are a module author,
this is useful to run before creating a new release.
=item testpodcoverage
[version 0.28]
This checks the pod coverage of the distribution and
produces C<Test::Harness>-style output. If you are a module author,
this is useful to run before creating a new release.
=item versioninstall
[version 0.16]
** Note: since C<only.pm> is so new, and since we just recently added
support for it here too, this feature is to be considered
experimental. **
If you have the C<only.pm> module installed on your system, you can
use this action to install a module into the version-specific library
trees. This means that you can have several versions of the same
module installed and C<use> a specific one like this:
use only MyModule => 0.55;
To override the default installation libraries in C<only::config>,
specify the C<versionlib> parameter when you run the C<Build.PL> script:
perl Build.PL --versionlib /my/version/place/
To override which version the module is installed as, specify the
C<version> parameter when you run the C<Build.PL> script:
perl Build.PL --version 0.50
See the C<only.pm> documentation for more information on
version-specific installs.
=back
=head1 OPTIONS
=head2 Command Line Options
The following options can be used during any invocation of C<Build.PL>
or the Build script, during any action. For information on other
options specific to an action, see the documentation for the
respective action.
NOTE: There is some preliminary support for options to use the more
familiar long option style. Most options can be preceded with the
C<--> long option prefix, and the underscores changed to dashes
(e.g. C<--use-rcfile>). Additionally, the argument to boolean options is
optional, and boolean options can be negated by prefixing them with
C<no> or C<no-> (e.g. C<--noverbose> or C<--no-verbose>).
=over 4
=item quiet
Suppress informative messages on output.
=item verbose
Display extra information about the Build on output. C<verbose> will
turn off C<quiet>
=item cpan_client
Sets the C<cpan_client> command for use with the C<installdeps> action.
See C<installdeps> for more details.
=item use_rcfile
Load the F<~/.modulebuildrc> option file. This option can be set to
false to prevent the custom resource file from being loaded.
=item allow_mb_mismatch
Suppresses the check upon startup that the version of Module::Build
we're now running under is the same version that was initially invoked
when building the distribution (i.e. when the C<Build.PL> script was
first run). As of 0.3601, a mismatch results in a warning instead of
a fatal error, so this option effectively just suppresses the warning.
=item debug
Prints Module::Build debugging information to STDOUT, such as a trace of
executed build actions.
=back
=head2 Default Options File (F<.modulebuildrc>)
[version 0.28]
When Module::Build starts up, it will look first for a file,
F<$ENV{HOME}/.modulebuildrc>. If it's not found there, it will look
in the the F<.modulebuildrc> file in the directories referred to by
the environment variables C<HOMEDRIVE> + C<HOMEDIR>, C<USERPROFILE>,
C<APPDATA>, C<WINDIR>, C<SYS$LOGIN>. If the file exists, the options
specified there will be used as defaults, as if they were typed on the
command line. The defaults can be overridden by specifying new values
on the command line.
The action name must come at the beginning of the line, followed by any
amount of whitespace and then the options. Options are given the same
as they would be on the command line. They can be separated by any
amount of whitespace, including newlines, as long there is whitespace at
the beginning of each continued line. Anything following a hash mark (C<#>)
is considered a comment, and is stripped before parsing. If more than
one line begins with the same action name, those lines are merged into
one set of options.
Besides the regular actions, there are two special pseudo-actions: the
key C<*> (asterisk) denotes any global options that should be applied
to all actions, and the key 'Build_PL' specifies options to be applied
when you invoke C<perl Build.PL>.
* verbose=1 # global options
diff flags=-u
install --install_base /home/ken
--install_path html=/home/ken/docs/html
installdeps --cpan_client 'cpanp -i'
If you wish to locate your resource file in a different location, you
can set the environment variable C<MODULEBUILDRC> to the complete
absolute path of the file containing your options.
=head2 Environment variables
=over
=item MODULEBUILDRC
[version 0.28]
Specifies an alternate location for a default options file as described above.
=item PERL_MB_OPT
[version 0.36]
Command line options that are applied to Build.PL or any Build action. The
string is split as the shell would (e.g. whitespace) and the result is
prepended to any actual command-line arguments.
=back
=head1 INSTALL PATHS
[version 0.19]
When you invoke Module::Build's C<build> action, it needs to figure
out where to install things. The nutshell version of how this works
is that default installation locations are determined from
F<Config.pm>, and they may be overridden by using the C<install_path>
parameter. An C<install_base> parameter lets you specify an
alternative installation root like F</home/foo>, and a C<destdir> lets
you specify a temporary installation directory like F</tmp/install> in
case you want to create bundled-up installable packages.
Natively, Module::Build provides default installation locations for
the following types of installable items:
=over 4
=item lib
Usually pure-Perl module files ending in F<.pm>.
=item arch
"Architecture-dependent" module files, usually produced by compiling
XS, L<Inline>, or similar code.
=item script
Programs written in pure Perl. In order to improve reuse, try to make
these as small as possible - put the code into modules whenever
possible.
=item bin
"Architecture-dependent" executable programs, i.e. compiled C code or
something. Pretty rare to see this in a perl distribution, but it
happens.
=item bindoc
Documentation for the stuff in C<script> and C<bin>. Usually
generated from the POD in those files. Under Unix, these are manual
pages belonging to the 'man1' category.
=item libdoc
Documentation for the stuff in C<lib> and C<arch>. This is usually
generated from the POD in F<.pm> files. Under Unix, these are manual
pages belonging to the 'man3' category.
=item binhtml
This is the same as C<bindoc> above, but applies to HTML documents.
=item libhtml
This is the same as C<libdoc> above, but applies to HTML documents.
=back
Four other parameters let you control various aspects of how
installation paths are determined:
=over 4
=item installdirs
The default destinations for these installable things come from
entries in your system's C<Config.pm>. You can select from three
different sets of default locations by setting the C<installdirs>
parameter as follows:
'installdirs' set to:
core site vendor
uses the following defaults from Config.pm:
lib => installprivlib installsitelib installvendorlib
arch => installarchlib installsitearch installvendorarch
script => installscript installsitebin installvendorbin
bin => installbin installsitebin installvendorbin
bindoc => installman1dir installsiteman1dir installvendorman1dir
libdoc => installman3dir installsiteman3dir installvendorman3dir
binhtml => installhtml1dir installsitehtml1dir installvendorhtml1dir [*]
libhtml => installhtml3dir installsitehtml3dir installvendorhtml3dir [*]
* Under some OS (eg. MSWin32) the destination for HTML documents is
determined by the C<Config.pm> entry C<installhtmldir>.
The default value of C<installdirs> is "site". If you're creating
vendor distributions of module packages, you may want to do something
like this:
perl Build.PL --installdirs vendor
or
./Build install --installdirs vendor
If you're installing an updated version of a module that was included
with perl itself (i.e. a "core module"), then you may set
C<installdirs> to "core" to overwrite the module in its present
location.
(Note that the 'script' line is different from C<MakeMaker> -
unfortunately there's no such thing as "installsitescript" or
"installvendorscript" entry in C<Config.pm>, so we use the
"installsitebin" and "installvendorbin" entries to at least get the
general location right. In the future, if C<Config.pm> adds some more
appropriate entries, we'll start using those.)
=item install_path
Once the defaults have been set, you can override them.
On the command line, that would look like this:
perl Build.PL --install_path lib=/foo/lib --install_path arch=/foo/lib/arch
or this:
./Build install --install_path lib=/foo/lib --install_path arch=/foo/lib/arch
=item install_base
You can also set the whole bunch of installation paths by supplying the
C<install_base> parameter to point to a directory on your system. For
instance, if you set C<install_base> to "/home/ken" on a Linux
system, you'll install as follows:
lib => /home/ken/lib/perl5
arch => /home/ken/lib/perl5/i386-linux
script => /home/ken/bin
bin => /home/ken/bin
bindoc => /home/ken/man/man1
libdoc => /home/ken/man/man3
binhtml => /home/ken/html
libhtml => /home/ken/html
Note that this is I<different> from how C<MakeMaker>'s C<PREFIX>
parameter works. C<install_base> just gives you a default layout under the
directory you specify, which may have little to do with the
C<installdirs=site> layout.
The exact layout under the directory you specify may vary by system -
we try to do the "sensible" thing on each platform.
=item destdir
If you want to install everything into a temporary directory first
(for instance, if you want to create a directory tree that a package
manager like C<rpm> or C<dpkg> could create a package from), you can
use the C<destdir> parameter:
perl Build.PL --destdir /tmp/foo
or
./Build install --destdir /tmp/foo
This will effectively install to "/tmp/foo/$sitelib",
"/tmp/foo/$sitearch", and the like, except that it will use
C<File::Spec> to make the pathnames work correctly on whatever
platform you're installing on.
=item prefix
Provided for compatibility with C<ExtUtils::MakeMaker>'s PREFIX argument.
C<prefix> should be used when you wish Module::Build to install your
modules, documentation and scripts in the same place
C<ExtUtils::MakeMaker> does.
The following are equivalent.
perl Build.PL --prefix /tmp/foo
perl Makefile.PL PREFIX=/tmp/foo
Because of the very complex nature of the prefixification logic, the
behavior of PREFIX in C<MakeMaker> has changed subtly over time.
Module::Build's --prefix logic is equivalent to the PREFIX logic found
in C<ExtUtils::MakeMaker> 6.30.
If you do not need to retain compatibility with C<ExtUtils::MakeMaker> or
are starting a fresh Perl installation we recommend you use
C<install_base> instead (and C<INSTALL_BASE> in C<ExtUtils::MakeMaker>).
See L<Module::Build::Cookbook/Instaling in the same location as
ExtUtils::MakeMaker> for further information.
=back
=head1 MOTIVATIONS
There are several reasons I wanted to start over, and not just fix
what I didn't like about C<MakeMaker>:
=over 4
=item *
I don't like the core idea of C<MakeMaker>, namely that C<make> should be
involved in the build process. Here are my reasons:
=over 4
=item +
When a person is installing a Perl module, what can you assume about
their environment? Can you assume they have C<make>? No, but you can
assume they have some version of Perl.
=item +
When a person is writing a Perl module for intended distribution, can
you assume that they know how to build a Makefile, so they can
customize their build process? No, but you can assume they know Perl,
and could customize that way.
=back
For years, these things have been a barrier to people getting the
build/install process to do what they want.
=item *
There are several architectural decisions in C<MakeMaker> that make it
very difficult to customize its behavior. For instance, when using
C<MakeMaker> you do C<use ExtUtils::MakeMaker>, but the object created in
C<WriteMakefile()> is actually blessed into a package name that's
created on the fly, so you can't simply subclass
C<ExtUtils::MakeMaker>. There is a workaround C<MY> package that lets
you override certain C<MakeMaker> methods, but only certain explicitly
preselected (by C<MakeMaker>) methods can be overridden. Also, the method
of customization is very crude: you have to modify a string containing
the Makefile text for the particular target. Since these strings
aren't documented, and I<can't> be documented (they take on different
values depending on the platform, version of perl, version of
C<MakeMaker>, etc.), you have no guarantee that your modifications will
work on someone else's machine or after an upgrade of C<MakeMaker> or
perl.
=item *
It is risky to make major changes to C<MakeMaker>, since it does so many
things, is so important, and generally works. C<Module::Build> is an
entirely separate package so that I can work on it all I want, without
worrying about backward compatibility.
=item *
Finally, Perl is said to be a language for system administration.
Could it really be the case that Perl isn't up to the task of building
and installing software? Even if that software is a bunch of stupid
little C<.pm> files that just need to be copied from one place to
another? My sense was that we could design a system to accomplish
this in a flexible, extensible, and friendly manner. Or die trying.
=back
=head1 TO DO
The current method of relying on time stamps to determine whether a
derived file is out of date isn't likely to scale well, since it
requires tracing all dependencies backward, it runs into problems on
NFS, and it's just generally flimsy. It would be better to use an MD5
signature or the like, if available. See C<cons> for an example.
- append to perllocal.pod
- add a 'plugin' functionality
=head1 AUTHOR
Ken Williams <kwilliams@cpan.org>
Development questions, bug reports, and patches should be sent to the
Module-Build mailing list at <module-build@perl.org>.
Bug reports are also welcome at
<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Module-Build>.
The latest development version is available from the Git
repository at <https://github.com/dagolden/module-build/>
=head1 COPYRIGHT
Copyright (c) 2001-2006 Ken Williams. All rights reserved.
This library is free software; you can redistribute it and/or
modify it under the same terms as Perl itself.
=head1 SEE ALSO
perl(1), L<Module::Build::Cookbook>, L<Module::Build::Authoring>,
L<Module::Build::API>, L<ExtUtils::MakeMaker>
F<META.yml> Specification:
L<CPAN::Meta::Spec>
L<http://www.dsmit.com/cons/>
L<http://search.cpan.org/dist/PerlBuildSystem/>
=cut
| efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/perl5/5.16.2/Module/Build.pm | Perl | apache-2.0 | 35,464 |
#!/usr/bin/perl
use strict;
use warnings;
use Getopt::Long;
use File::Basename;
# Author: Dean Wilson ; License: GPL
# Project Home: http://www.unixdaemon.net/
# For documentation look at the bottom of this file, or run with '-h'
# Version 0.5 - Tided up arg handling. Added usage
# Changes:
# Usage information corrected, thanks to Bartlomiej Konarski
# nagios requires a 3 for unknown errors.
$SIG{__DIE__} = sub {
print @_;
exit 3;
};
my $app = basename($0);
GetOptions(
"w|warn=s" => \( my $warn_percent = 30 ),
"c|crit=s" => \( my $crit_percent = 20 ),
"h|help" => \&usage,
);
# remove any % passed in
$warn_percent =~ s/%//;
$crit_percent =~ s/%//;
die "Warning value must be larger than critical value\n"
unless $warn_percent >= $crit_percent;
my $memory_stats_ref = get_mem();
my $percentage_free = get_percentage($memory_stats_ref);
if ($percentage_free <= $crit_percent) {
print "CRIT: Only $percentage_free% ($memory_stats_ref->{free_cache}M) of memory free!\n";
exit 2;
} elsif ($percentage_free <= $warn_percent) {
print "WARN: Only $percentage_free% ($memory_stats_ref->{free_cache}M) of memory free!\n";
exit 1;
} else {
print "OK: $percentage_free% ($memory_stats_ref->{free_cache}M) free memory.\n";
exit 0;
}
#########################################
sub get_mem {
# get the two values from the free command.
# return them as a hash ref
my %memory_stats;
open(FREEPIPE, "free -m |")
|| die "Failed to open 'free'\n$!\n";
while(<FREEPIPE>) {
chomp;
next unless m!buffers/cache:!;
m/[^\d]+(\d+)\s+(\d+)$/;
$memory_stats{'used_cache'} = $1;
$memory_stats{'free_cache'} = $2;
}
close FREEPIPE;
return \%memory_stats;
}
#------------------------------------------#
sub get_percentage {
my $mem_stats_ref = shift;
my $percentage_free;
my $total = $mem_stats_ref->{'used_cache'} + $mem_stats_ref->{'free_cache'};
$percentage_free = int (($mem_stats_ref->{'free_cache'} / $total) * 100);
return $percentage_free;
}
#------------------------------------------#
sub usage {
print<<EOU;
$app - Copyright (c) 2006 Dean Wilson. Licensed under the GPL
This script reports the percentage of memory that's still free along
with a warning or a critical based upon user defined threshholds.
This script was written to be used in conjunction with Nagios.
Usage Examples:
$app -w 20 -c 10
$app -warn 30 -crit 15
$app -h # shows this information
Options:
-w | -warn
Warn if less than this percentage is free.
-c | -crit
Crit if less than this percentage is free.
-h
This help and usage information
Notes:
The output format of "free" (which this script wraps) can change
between releases. Please double check the outputs before you deploy
this script.
EOU
exit 3;
}
| saimonmoore/deprec | lib/deprec/templates/nagios/check_linux_free_memory.pl | Perl | mit | 2,810 |
=head1 NAME
memcached_analyze - Analyze server information
=head1 LIBRARY
C Client Library for memcached (libmemcached, -lmemcached)
=head1 SYNOPSIS
#include <memcached.h>
memcached_analysis_st *
memcached_analyze (memcached_st *ptr,
memcached_stat_st *stat,
memcached_return_t *error);
=head1 DESCRIPTION
libmemcached(3) has the ability to query a memcached server (or collection
of servers) for their current state. Queries to find state return a
C<memcached_analysis_st> structure. You are responsible for freeing this structure.
memcached_analyze() analyzes useful information based on the provided servers
and sets the result to the C<memcached_analysis_st> structure. The return value
must be freed by the calling application.
A command line tool, memstat(1) with the option --analyze, is provided so that
you do not have to write an application to use this method.
=head1 RETURN
A pointer to the allocated C<memcached_analysis_st> structure on success and
a NULL pointer on failure. You may inspect the error detail by checking the
C<memcached_return_t> value.
Any method returning a C<memcached_analysis_st> expects you to free the
memory allocated for it.
=head1 HOME
To find out more information please check:
L<https://launchpad.net/libmemcached>
=head1 AUTHOR
Toru Maesaka, E<lt>dev@torum.netE<gt>
=head1 SEE ALSO
memcached(1) libmemcached(3) memcached_strerror(3)
=cut
| trondn/libmemcached | docs/memcached_analyze.pod | Perl | bsd-3-clause | 1,456 |
package Fixtures::Asn;
#
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use Moose;
extends 'DBIx::Class::EasyFixture';
use namespace::autoclean;
my %definition_for = (
test_cran_1 => {
new => 'Asn',
using => {
id => 1,
asn => 9939,
cachegroup => 1,
},
},
);
sub name {
return "Asn";
}
sub get_definition {
my ( $self, $name ) = @_;
return $definition_for{$name};
}
sub all_fixture_names {
return keys %definition_for;
}
__PACKAGE__->meta->make_immutable;
1;
| smalenfant/traffic_control | traffic_ops/app/lib/Fixtures/Asn.pm | Perl | apache-2.0 | 1,065 |
package Git::IndexInfo;
use strict;
use warnings;
use Git qw/command_input_pipe command_close_pipe/;
sub new {
my ($class) = @_;
my ($gui, $ctx) = command_input_pipe(qw/update-index -z --index-info/);
bless { gui => $gui, ctx => $ctx, nr => 0}, $class;
}
sub remove {
my ($self, $path) = @_;
if (print { $self->{gui} } '0 ', 0 x 40, "\t", $path, "\0") {
return ++$self->{nr};
}
undef;
}
sub update {
my ($self, $mode, $hash, $path) = @_;
if (print { $self->{gui} } $mode, ' ', $hash, "\t", $path, "\0") {
return ++$self->{nr};
}
undef;
}
sub DESTROY {
my ($self) = @_;
command_close_pipe($self->{gui}, $self->{ctx});
}
1;
| ianyh/heroku-buildpack-python-opencv | vendor/.heroku/share/perl/5.10.1/Git/IndexInfo.pm | Perl | mit | 644 |
#!/usr/bin/perl -w
use strict;
use Getopt::Long;
use File::Basename;
my $fastq_file;
my $qc_pl;
my $aln_pl;
my $call_pl;
my $REF;
my $REFINDEX;
my $num_jobs;
my $job_name;
GetOptions (
"fq_List=s" => \$fastq_file,
"numJobs=i" => \$num_jobs,
"jobName=s" => \$job_name,
"qc_pl=s" => \$qc_pl,
"aln_pl=s" => \$aln_pl,
"call_pl=s" => \$call_pl,
"ref=s" => \$REF,
"refindex=s" => \$REFINDEX
);
sub help {
print qq(
perl $0
-fq_list fastq_list.txt # list of fastq files, first column is the accession name, second column is the full path to the fastq file (fastq or gzfastq);
-numJobs 10 # how many jobs to be submitted to the HPC
-jobName myJob # The name of job
-aln_pl /home/$ENV{USER}/pl_scripts/align.pl
-call_pl /home/$ENV{USER}/pl_scripts/unifiedgenotyper.pl
-ref /home/$ENV{USER}/scratch_fast/ref_data/Dgenome/Dgenome.fa
-refindex /home/$ENV{USER}/scratch_fast/ref_data/Dgenome/Dgenome_bt2_index
Once the bsub scripts are generated, run "ls *.bsub |perl submit_sequential_jobs.pl" to submit all the jobs.
);
}
unless ($fastq_file and $REF and $REFINDEX) {
&help();
exit;
}
open (IN, $fastq_file) or die "Error: can not open $fastq_file\n";
$num_jobs = 10 unless $num_jobs;
$job_name = "bsubjob" unless $job_name;
my %h;
while(<IN>){
chomp;
my @t = split /\s+/,$_;
unless ($t[1] =~ /^\//){die "Error: require absolute path for fastq files\n"}
map{ $h{$_} = $t[0] }@t[1..$#t];
}
close IN;
time_stamp("done reading fastq list file");
#
$qc_pl = "/home/$ENV{USER}/Tools/NGSQCToolkit_v2.3.3/QC/IlluQC.pl" unless defined $qc_pl;
my @arr = grep{!/R2/} keys %h;
my @params;
foreach my $f (@arr) {
my $f2=$f;
$f2=~s/R1/R2/;
my $folder = dirname($f);
my $qc_folder = $folder . "/QC";
my $acc = $h{$f};
mkdir $qc_folder unless -d $qc_folder;
if(exists $h{$f2}) {
#push @params, "-pe $f $f2 2 5";
my $cmd="java -jar \$EBROOTTRIMMOMATIC/trimmomatic-0.38.jar PE -threads 4 $f $f2 $qc_folder/${acc}_F.fq.gz $qc_folder/${acc}_FU.fq.gz $qc_folder/${acc}_R.fq.gz $qc_folder/${acc}_RU.fq.gz ILLUMINACLIP:\$EBROOTTRIMMOMATIC/adapters/TruSeq3-PE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36";
push @params, $cmd;
}
else{
#push @params, "-se $f 1 5"
my $cmd="java -jar \$EBROOTTRIMMOMATIC/trimmomatic-0.38.jar SE -threads 4 $f $qc_folder/${acc}_F.fq.gz ILLUMINACLIP:\$EBROOTTRIMMOMATIC/adapters/TruSeq3-PE.fa:2:30:10 LEADING:3 TRAILING:3 SLIDINGWINDOW:4:15 MINLEN:36";
push @params, $cmd;
}
}
##
my $header = qq(#!/bin/bash
#
# See "man bsub" for more information on rusage
#
#BSUB -W 70:05 # wall-clock time (hrs:mins)
#BSUB -L /bin/bash # login shell
#BSUB -n 1 # number of tasks in job
#BSUB -R "span[ptile=1]" # run one MPI task per node
#BSUB -R "rusage[mem=5000]" # memory to reserve, in MB
#BSUB -J myjob # job name
#BSUB -o myjob.%J.%I.out # output file name in which %J is replaced by the job ID
#BSUB -e myjob.%J.%I.err # error file name in which %J is replaced by the job ID
module load zlib/1.2.8-intel-2015B
module load Java/1.8.0_181
export PERL5LIB="/home/wangsc/perl5/lib/perl5/x86_64-linux-thread-multi___/" # modify this path to reflect your own setup
);
my $qc_header = $header;
# Generate bsub job script for QC
my $qc_bsub = "0.qc.bsub";
open(QB, ">$qc_bsub") or die $!;
my $num_param = int ( (scalar @params) / $num_jobs ) + 1;
$num_param++ if (scalar @params) % $num_jobs ;
$qc_header =~ s/-J\s+myjob/"-J ". $job_name . "_QC[1-$num_jobs]"/e;
$qc_header =~ s/myjob/$job_name . "_QC"/eg;
$qc_header =~ s/-n 1/-n $num_param/;
$qc_header =~ s/ptile=1/ptile=$num_param/;
print QB $qc_header, "\nmodule load Trimmomatic/0.38-Java-1.8.0\ndate\n";
print STDERR '$num_jobs: ', $num_jobs, "\n";
print STDERR '$num_param: ', $num_param, "\n";
foreach my $ind (1 .. $num_jobs){
my $start = ($ind - 1) * $num_param;
my $end = $start + $num_param - 1;
$end = $#params if $end > $#params;
my $cmd = join("\n", @params[$start .. $end]);
print QB "if [ \$LSB_JOBINDEX == $ind ]; then\n", $cmd, "\nfi\n\n";
print STDERR "if [ \$LSB_JOBINDEX == $ind ]; then\n", $cmd, "\nfi\n\n";
last if $start >= $#params or $end >= $#params;
}
print QB "date\n echo \"Done \$LSB_JOBINDEX\"\n";
close QB;
time_stamp("generated $qc_bsub");
## Generate bsub job script for Alignment
my $aln_header = $header;
$aln_header =~ s/-n\s+\d+/-n 10/;
$aln_header =~ s/ptile=\d+/ptile=10/;
$aln_header =~ s/mem=\d+/mem=2000/;
my $aln_bsub = "1.aln.bsub";
open(ALN, ">$aln_bsub") or die $!;
my %acc_qc;
foreach (@params){
my @p = split /\s+/,$_;
my @fs = / PE /?@p[8..11]:$p[7];
my $dir = dirname($fs[0]);
my $acc = $h{$p[6]};
if(@fs == 1){push @{$acc_qc{$acc}}, $fs[0]; next}
my @filtered = @fs[0,2];
#my $single_HQ = $dir . "/IlluQC_Filtered_files/" . basename($fs[0]) . "_" . basename($fs[1]) . "_unPaired_HQReads";
push @{$acc_qc{$acc}}, join(",", @filtered);
#push @{$acc_qc{$acc}}, $single_HQ if -e $single_HQ and not -z $single_HQ;
}
$aln_pl = "/home/$ENV{USER}/pl_scripts/align.pl" unless defined $aln_pl;
$REF="/home/$ENV{USER}/scratch_fast/Projects/GBS/wheat/ref_data/wheat_concate/Dgenome/Dgenome.fa" unless $REF;
$REFINDEX="/home/$ENV{USER}/scratch_fast/Projects/GBS/wheat/ref_data/wheat_concate/Dgenome/Dgenome.fa" unless $REFINDEX;
my $outdir = "Alignments"; mkdir($outdir) unless -d $outdir;
my $b2 = `module load Bowtie2/2.3.4.2-foss-2018b; which bowtie2`;
my @aln_cmds;
map{
my $acc = $_;
my @fs = @{$acc_qc{$acc}};
push @aln_cmds, "perl $aln_pl -acc $acc -reads " . join(" ", @fs) . " -ref $REF -refindex $REFINDEX -outdir $outdir -MAQ 5 -CPU 10 -bowtie2 $b2";
}keys %acc_qc;
my $start = 0;
my $index = 0;
my $total = int ((scalar @aln_cmds) / 10);
$total ++ if (scalar @aln_cmds) / 10;
$aln_header =~ s/myjob/$job_name."_ALN[1-$total]"/e;
$aln_header =~ s/myjob/$job_name."_ALN"/eg;
print ALN $aln_header, "\nmodule load Bowtie2/2.3.4.2-foss-2018b\ndate\n";
while($start <= $#aln_cmds){
$index ++;
print ALN "if [ \$LSB_JOBINDEX == $index ]; then\n";
my $end = $start + 9;
$end = $#aln_cmds if $end > $#aln_cmds;
print ALN join("\n", @aln_cmds[$start .. $end]), "\nfi\n\n";
last if $end >= $#aln_cmds;
$start = $end + 1;
}
print ALN "date\necho \"done \$LSB_JOBINDEX\"\n";
close ALN;
time_stamp("generated $aln_bsub");
## generate bsub job script for processing
my $proc_bsub = "2.proc.bsub";
open(PB, ">$proc_bsub") or die $!;
open(IN, $aln_bsub) or die $!;
while(<IN>){
s/-n\s+\d+/-n 1/;
s/ptile=\d+/ptile=1/;
s/mem=\d+/mem=8000/;
s/align.pl/process_gbs.pl/;
s/_ALN/_PROC/ if /^\#/;
print PB $_;
}
close IN;
close PB;
time_stamp("generated $proc_bsub");
## genearte bsub job script for calling variations
$call_pl = "/home/$ENV{USER}/pl_scripts/unifiedgenotyper.pl" unless $call_pl;
my $call_bsub = "3.call.bsub";
open (CB, ">$call_bsub") or die $!;
my $call_header = $header;
$call_header =~ s/-n\s+\d+/-n 1/;
$call_header =~ s/ptile=\d+/ptile=1/;
my @chrs = get_chrs_from_fai($REF.".fai");
my $t = scalar @chrs;
$call_header =~ s/myjob/$job_name."_VAR[1-$t]"/e;
$call_header =~ s/myjob/$job_name."_VAR"/eg;
print CB $call_header, "\ndate\n";
my $var_dir = "Variations"; mkdir($var_dir) unless -d $var_dir;
my @bams = ("$outdir/*QC.bam");
foreach my $ind (0 .. $#chrs){
my $job = $ind + 1;
print CB "if [ \$LSB_JOBINDEX == $job ]; then\n ";
print CB "perl $call_pl -ref $REF -out_prefix Variations/$chrs[$ind] -region $chrs[$ind] -bam " . join(" ", @bams), "\n";
print CB "fi\n\n";
}
print CB "date\necho \"done \$LSB_JOBINDEX\"\n";
close CB;
time_stamp("generated $call_bsub");
##
sub get_chrs_from_fai {
my $f = shift;
open(IN, $f) or die $!;
my @return;
while(<IN>){
push @return, $1 if /^(\S+)/;
}
close IN;
return @return;
}
sub time_stamp{
my $s = join(" ", @_);
my $t = localtime(time);
print STDERR $t, "\t", $s, "\n";
}
| swang8/Perl_scripts_misc | generate_bsub.pl | Perl | mit | 7,991 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from debian/tzdata/asia. Olson data version 2008c
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Asia::Yerevan;
use strict;
use Class::Singleton;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Asia::Yerevan::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY,
60694520520,
DateTime::TimeZone::NEG_INFINITY,
60694531200,
10680,
0,
'LMT'
],
[
60694520520,
61730542800,
60694531320,
61730553600,
10800,
0,
'YERT'
],
[
61730542800,
62490600000,
61730557200,
62490614400,
14400,
0,
'YERT'
],
[
62490600000,
62506407600,
62490618000,
62506425600,
18000,
1,
'YERST'
],
[
62506407600,
62522136000,
62506422000,
62522150400,
14400,
0,
'YERT'
],
[
62522136000,
62537943600,
62522154000,
62537961600,
18000,
1,
'YERST'
],
[
62537943600,
62553672000,
62537958000,
62553686400,
14400,
0,
'YERT'
],
[
62553672000,
62569479600,
62553690000,
62569497600,
18000,
1,
'YERST'
],
[
62569479600,
62585294400,
62569494000,
62585308800,
14400,
0,
'YERT'
],
[
62585294400,
62601026400,
62585312400,
62601044400,
18000,
1,
'YERST'
],
[
62601026400,
62616751200,
62601040800,
62616765600,
14400,
0,
'YERT'
],
[
62616751200,
62632476000,
62616769200,
62632494000,
18000,
1,
'YERST'
],
[
62632476000,
62648200800,
62632490400,
62648215200,
14400,
0,
'YERT'
],
[
62648200800,
62663925600,
62648218800,
62663943600,
18000,
1,
'YERST'
],
[
62663925600,
62679650400,
62663940000,
62679664800,
14400,
0,
'YERT'
],
[
62679650400,
62695375200,
62679668400,
62695393200,
18000,
1,
'YERST'
],
[
62695375200,
62711100000,
62695389600,
62711114400,
14400,
0,
'YERT'
],
[
62711100000,
62726824800,
62711118000,
62726842800,
18000,
1,
'YERST'
],
[
62726824800,
62742549600,
62726839200,
62742564000,
14400,
0,
'YERT'
],
[
62742549600,
62758274400,
62742567600,
62758292400,
18000,
1,
'YERST'
],
[
62758274400,
62773999200,
62758288800,
62774013600,
14400,
0,
'YERT'
],
[
62773999200,
62790328800,
62774017200,
62790346800,
18000,
1,
'YERST'
],
[
62790328800,
62806053600,
62790343200,
62806068000,
14400,
0,
'YERT'
],
[
62806053600,
62821252800,
62806068000,
62821267200,
14400,
1,
'YERST'
],
[
62821252800,
62821782000,
62821267200,
62821796400,
14400,
1,
'AMST'
],
[
62821782000,
62837496000,
62821792800,
62837506800,
10800,
0,
'AMT'
],
[
62837496000,
62853217200,
62837510400,
62853231600,
14400,
1,
'AMST'
],
[
62853217200,
62868956400,
62853228000,
62868967200,
10800,
0,
'AMT'
],
[
62868956400,
62884681200,
62868970800,
62884695600,
14400,
1,
'AMST'
],
[
62884681200,
62900406000,
62884692000,
62900416800,
10800,
0,
'AMT'
],
[
62900406000,
62916130800,
62900420400,
62916145200,
14400,
1,
'AMST'
],
[
62916130800,
62931855600,
62916141600,
62931866400,
10800,
0,
'AMT'
],
[
62931855600,
62947580400,
62931870000,
62947594800,
14400,
1,
'AMST'
],
[
62947580400,
62987745600,
62947594800,
62987760000,
14400,
0,
'AMT'
],
[
62987745600,
62995356000,
62987760000,
62995370400,
14400,
0,
'AMT'
],
[
62995356000,
63013500000,
62995374000,
63013518000,
18000,
1,
'AMST'
],
[
63013500000,
63026805600,
63013514400,
63026820000,
14400,
0,
'AMT'
],
[
63026805600,
63044949600,
63026823600,
63044967600,
18000,
1,
'AMST'
],
[
63044949600,
63058255200,
63044964000,
63058269600,
14400,
0,
'AMT'
],
[
63058255200,
63077004000,
63058273200,
63077022000,
18000,
1,
'AMST'
],
[
63077004000,
63089704800,
63077018400,
63089719200,
14400,
0,
'AMT'
],
[
63089704800,
63108453600,
63089722800,
63108471600,
18000,
1,
'AMST'
],
[
63108453600,
63121154400,
63108468000,
63121168800,
14400,
0,
'AMT'
],
[
63121154400,
63139903200,
63121172400,
63139921200,
18000,
1,
'AMST'
],
[
63139903200,
63153208800,
63139917600,
63153223200,
14400,
0,
'AMT'
],
[
63153208800,
63171352800,
63153226800,
63171370800,
18000,
1,
'AMST'
],
[
63171352800,
63184658400,
63171367200,
63184672800,
14400,
0,
'AMT'
],
[
63184658400,
63202802400,
63184676400,
63202820400,
18000,
1,
'AMST'
],
[
63202802400,
63216108000,
63202816800,
63216122400,
14400,
0,
'AMT'
],
[
63216108000,
63234856800,
63216126000,
63234874800,
18000,
1,
'AMST'
],
[
63234856800,
63247557600,
63234871200,
63247572000,
14400,
0,
'AMT'
],
[
63247557600,
63266306400,
63247575600,
63266324400,
18000,
1,
'AMST'
],
[
63266306400,
63279007200,
63266320800,
63279021600,
14400,
0,
'AMT'
],
[
63279007200,
63297756000,
63279025200,
63297774000,
18000,
1,
'AMST'
],
[
63297756000,
63310456800,
63297770400,
63310471200,
14400,
0,
'AMT'
],
[
63310456800,
63329205600,
63310474800,
63329223600,
18000,
1,
'AMST'
],
[
63329205600,
63342511200,
63329220000,
63342525600,
14400,
0,
'AMT'
],
[
63342511200,
63360655200,
63342529200,
63360673200,
18000,
1,
'AMST'
],
[
63360655200,
63373960800,
63360669600,
63373975200,
14400,
0,
'AMT'
],
[
63373960800,
63392104800,
63373978800,
63392122800,
18000,
1,
'AMST'
],
[
63392104800,
63405410400,
63392119200,
63405424800,
14400,
0,
'AMT'
],
[
63405410400,
63424159200,
63405428400,
63424177200,
18000,
1,
'AMST'
],
[
63424159200,
63436860000,
63424173600,
63436874400,
14400,
0,
'AMT'
],
[
63436860000,
63455608800,
63436878000,
63455626800,
18000,
1,
'AMST'
],
[
63455608800,
63468309600,
63455623200,
63468324000,
14400,
0,
'AMT'
],
[
63468309600,
63487058400,
63468327600,
63487076400,
18000,
1,
'AMST'
],
[
63487058400,
63500364000,
63487072800,
63500378400,
14400,
0,
'AMT'
],
[
63500364000,
63518508000,
63500382000,
63518526000,
18000,
1,
'AMST'
],
[
63518508000,
63531813600,
63518522400,
63531828000,
14400,
0,
'AMT'
],
[
63531813600,
63549957600,
63531831600,
63549975600,
18000,
1,
'AMST'
],
[
63549957600,
63563263200,
63549972000,
63563277600,
14400,
0,
'AMT'
],
[
63563263200,
63581407200,
63563281200,
63581425200,
18000,
1,
'AMST'
],
[
63581407200,
63594712800,
63581421600,
63594727200,
14400,
0,
'AMT'
],
[
63594712800,
63613461600,
63594730800,
63613479600,
18000,
1,
'AMST'
],
[
63613461600,
63626162400,
63613476000,
63626176800,
14400,
0,
'AMT'
],
[
63626162400,
63644911200,
63626180400,
63644929200,
18000,
1,
'AMST'
],
[
63644911200,
63657612000,
63644925600,
63657626400,
14400,
0,
'AMT'
],
[
63657612000,
63676360800,
63657630000,
63676378800,
18000,
1,
'AMST'
],
[
63676360800,
63689666400,
63676375200,
63689680800,
14400,
0,
'AMT'
],
[
63689666400,
63707810400,
63689684400,
63707828400,
18000,
1,
'AMST'
],
];
sub olson_version { '2008c' }
sub has_dst_changes { 39 }
sub _max_year { 2018 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
sub _last_offset { 14400 }
my $last_observance = bless( {
'format' => 'AM%sT',
'gmtoff' => '4:00',
'local_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 729025,
'local_rd_secs' => 0,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 729025,
'utc_rd_secs' => 0,
'utc_year' => 1998
}, 'DateTime' ),
'offset_from_std' => 0,
'offset_from_utc' => 14400,
'until' => [],
'utc_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 729024,
'local_rd_secs' => 72000,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 729024,
'utc_rd_secs' => 72000,
'utc_year' => 1997
}, 'DateTime' )
}, 'DateTime::TimeZone::OlsonDB::Observance' )
;
sub _last_observance { $last_observance }
my $rules = [
bless( {
'at' => '2:00s',
'from' => '1996',
'in' => 'Oct',
'letter' => '',
'name' => 'RussiaAsia',
'offset_from_std' => 0,
'on' => 'lastSun',
'save' => '0',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' ),
bless( {
'at' => '2:00s',
'from' => '1993',
'in' => 'Mar',
'letter' => 'S',
'name' => 'RussiaAsia',
'offset_from_std' => 3600,
'on' => 'lastSun',
'save' => '1:00',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' )
]
;
sub _rules { $rules }
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/TimeZone/Asia/Yerevan.pm | Perl | mit | 8,905 |
package Data::Structure::Util;
use 5.008;
use strict;
use warnings::register;
use vars qw($VERSION @ISA @EXPORT @EXPORT_OK);
use Storable qw( freeze );
use Digest::MD5 qw( md5_hex );
require Exporter;
require DynaLoader;
require AutoLoader;
@ISA = qw( Exporter DynaLoader );
$VERSION = '0.16';
@EXPORT_OK = qw(
unbless get_blessed get_refs has_circular_ref circular_off signature
);
if ( $] >= 5.008 ) {
push @EXPORT_OK, qw(
has_utf8 utf8_off utf8_on _utf8_on _utf8_off
);
}
bootstrap Data::Structure::Util $VERSION;
sub has_utf8 {
has_utf8_xs( $_[0] ) ? $_[0] : undef;
}
sub utf8_off {
utf8_off_xs( $_[0] ) ? $_[0] : undef;
}
sub utf8_on {
utf8_on_xs( $_[0] ) ? $_[0] : undef;
}
sub _utf8_off {
_utf8_off_xs( $_[0] ) ? $_[0] : undef;
}
sub _utf8_on {
_utf8_on_xs( $_[0] ) ? $_[0] : undef;
}
sub unbless {
unbless_xs( $_[0] );
}
sub get_blessed {
$_[0] or return [];
get_blessed_xs( $_[0] );
}
sub get_refs {
$_[0] or return [];
get_refs_xs( $_[0] );
}
sub has_circular_ref {
$_[0] or return $_[0];
has_circular_ref_xs( $_[0] );
}
# Need to hold another reference to the passed in value to avoid this
# pathological case throwing an error
# my $obj8 = [];
# $obj8->[0] = \$obj8;
# circular_off($obj8); # Used to throw an error
sub circular_off {
my $r = $_[0];
$r or return $r;
circular_off_xs( $r );
}
sub signature {
return @_
? md5_hex( freeze( [ $_[0], signature_xs( $_[0] ) ] ) )
: '0' x 32;
}
1;
__END__
=head1 NAME
Data::Structure::Util - Change nature of data within a structure
=head1 SYNOPSIS
use Data::Structure::Util qw(
has_utf8 utf8_off utf8_on unbless get_blessed get_refs
has_circular_ref circular_off signature
);
# get the objects in the data structure
my $objects_arrayref = get_blessed( $data );
# unbless all objects
unbless( $data );
if ( has_circular_ref( $data ) ) {
print "Removing circular ref!\n";
circular_off( $data );
}
# convert back to latin1 if needed and possible
utf8_off( $data ) if defined has_utf8( $data );
=head1 DESCRIPTION
C<Data::Structure::Util> is a toolbox to manipulate the data inside a
data structure. It can process an entire tree and perform the operation
requested on each appropriate element.
For example: It can transform all strings within a data structure to
utf8 or transform any utf8 string back to the default encoding. It can
remove the blessing on any reference. It can collect all the objects or
detect if there is a circular reference.
It is written in C for decent speed.
=head1 FUNCTIONS
All Data::Structure::Util functions operate on a whole tree. If you pass
them a simple scalar then they will operate on that one scalar. However,
if you pass them a reference to a hash, array, or scalar then they will
iterate though that structure and apply the manipulation to all
elements, and in turn if they are references to hashes, arrays or
scalars to all their elements and so on, recursively.
For speed reasons all manipulations that alter the data structure do in-
place manipulation meaning that rather than returning an altered copy of
the data structure the passed data structure which has been altered.
=head2 Manipulating Data Structures
=over 4
=item has_circular_ref($ref)
This function detects if the passed data structure has a circular
reference, that is to say if it is possible by following references
contained in the structure to return to a part of the data structure you
have already visited. Data structures that have circular references will
not be automatically reclaimed by Perl's garbage collector.
If a circular reference is detected the function returns a reference
to an element within circuit, otherwise the function will return a
false value.
If the version of perl that you are using supports weak references then
any weak references found within the data structure will not be
traversed, meaning that circular references that have had links
successfully weakened will not be returned by this function.
=item circular_off($ref)
Detects circular references in $ref (as above) and weakens a link in
each so that they can be properly garbage collected when no external
references to the data structure are left.
This means that one (or more) of the references in the data structure
will be told that the should not count towards reference counting. You
should be aware that if you later modify the data structure and leave
parts of it only 'accessible' via weakened references that those parts
of the data structure will be immediately garbage collected as the
weakened references will not be strong enough to maintain the connection
on their own.
The number of references weakened is returned.
=item get_refs($ref)
Examine the data structure and return a reference to flat array that
contains one copy of every reference in the data structure you passed.
For example:
my $foo = {
first => [ "inner", "array", { inmost => "hash" } ],
second => \"refed scalar",
};
use Data::Dumper;
# tell Data::Dumper to show nodes multiple times
$Data::Dumper::Deepcopy = 1;
print Dumper get_refs( $foo );
$VAR1 = [
{ 'inmost' => 'hash' },
[ 'inner', 'array', { 'inmost' => 'hash' } ],
\'refed scalar',
{
'first' => [ 'inner', { 'inmost' => 'hash' }, 'array' ],
'second' => \'refed scalar'
}
];
As you can see, the data structure is traversed depth first, so the
top most references should be the last elements of the array. See
L<get_blessed($ref)> below for a similar function for blessed objects.
=item signature($ref)
Returns a md5 of the passed data structure. Any change at all to the
data structure will cause a different md5 to be returned.
The function examines the structure, addresses, value types and flags
to generate the signature, meaning that even data structures that
would look identical when dumped with Data::Dumper produce different
signatures:
$ref1 = { key1 => [] };
$ref2 = $ref1;
$ref2->{key1} = [];
# this produces the same result, as they look the same
# even though they are different data structures
use Data::Dumper;
use Digest::MD5 qw(md5_hex);
print md5_hex( Dumper( $ref1 ) ), " ", md5_hex( Dumper( $ref2 ) ), "\n";
# cb55d41da284a5869a0401bb65ab74c1 cb55d41da284a5869a0401bb65ab74c1
# this produces differing results
use Data::Structure::Util qw(signature);
print signature( $ref1 ), " ", signature( $ref2 ), "\n";
# 5d20c5e81a53b2be90521167aefed9db 8b4cba2cbae0fec4bab263e9866d3911
=back
=head2 Object Blessing
=over 4
=item unbless($ref)
Remove the blessing from any objects found within the passed data
structure. For example:
my $foo = {
'a' => bless( { 'b' => bless( {}, "c" ), }, "d" ),
'e' => [ bless( [], "f" ), bless( [], "g" ), ]
};
use Data::Dumper;
use Data::Structure::Util qw(unbless);
print Dumper( unbless( $foo ) );
$VAR1 = {
'a' => { 'b' => {} },
'e' => [ [], [] ]
};
Note that the structure looks inside blessed objects for other
objects to unbless.
=item get_blessed($ref)
Examine the data structure and return a reference to flat array that
contains every object in the data structure you passed. For example:
my $foo = {
'a' => bless( { 'b' => bless( {}, "c" ), }, "d" ),
'e' => [ bless( [], "f" ), bless( [], "g" ), ]
};
use Data::Dumper;
# tell Data::Dumper to show nodes multiple times
$Data::Dumper::Deepcopy = 1;
use Data::Structure::Util qw(get_blessed);
print Dumper( get_blessed( $foo ) );
$VAR1 = [
bless( {}, 'c' ),
bless( { 'b' => bless( {}, 'c' ) }, 'd' ),
bless( [], 'f' ),
bless( [], 'g' )
];
This function is essentially the same as C<get_refs> but only returns
blessed objects rather than all objects. As with that function the
data structure is traversed depth first, so the top most objects
should be the last elements of the array. Note also (as shown in the
above example shows) that objects within objects are returned.
=back
=head2 utf8 Manipulation Functions
These functions allow you to manipulate the state of the utf8 flags in
the scalars contained in the data structure. Information on the utf8
flag and it's significance can be found in L<Encode>.
=over 4
=item has_utf8($var)
Returns C<$var> if the utf8 flag is enabled for C<$var> or any scalar
that a data structure passed in C<$var> contains.
print "this will be printed" if defined has_utf8( "\x{1234}" );
print "this won't be printed" if defined has_utf8( "foo bar" );
Note that you should not check the truth of the return value of this
function when calling it with a single scalar as it is possible to
have a string "0" or "" for which the utf8 flag set; Since C<undef>
can never have the utf8 flag set the function will never return a
defined value if the data structure does not contain a utf8 flagged
scalar.
=item _utf8_off($var)
Recursively disables the utf8 flag on all scalars within $var. This
is the same the C<_utf8_off> function of L<Encode> but applies to any
string within C<$var>. The data structure is converted in-place, and
as a convenience the passed variable is returned from the function.
This function makes no attempt to do any character set conversion to
the strings stored in any of the scalars in the passed data structure.
This means that if perl was internally storing any character as
sequence of bytes in the utf8 encoding each byte in that sequence will
then be henceforth treated as a character in it's own right.
For example:
my $emoticons = { smile => "\x{236a}" };
use Data::Structure::Util qw(_utf8_on);
print length( $emoticons->{smile} ), "\n"; # prints 1
_utf8_off( $emoticons );
print length( $emoticons->{smile} ), "\n"; # prints 3
=item _utf8_on($var)
Recursively enables the utf8 flag on all scalars within $var. This is
the same the C<_utf8_on> function of L<Encode> but applies to any
string within C<$var>. The data structure is converted in-place and as
a convenience the passed variable is returned from the function.
As above, this makes no attempt to do any character set conversion
meaning that unless your string contains the valid utf8 byte sequences
for the characters you want you are in trouble. B<In some cases
incorrect byte sequences can segfault perl>. In particular, the
regular expression engine has significant problems with invalid utf8
that has been incorrectly marked as utf8. You should know what you
are doing if you are using this function; Consider using the Encode
module as an alternative.
Contrary example to the above:
my $emoticons = { smile => "\342\230\272" };
use Data::Structure::Util qw(_utf8_on);
print length( $emoticons->{smile} ), "\n"; # prints 3
_utf8_on( $emoticons );
print length( $emoticons->{smile} ), "\n"; # prints 1
=item utf8_on($var)
This routine performs a C<sv_utf8_upgrade> on each scalar string in
the passed data structure that does not have the utf8 flag turned on.
This will cause the perl to change the method it uses internally to
store the string from the native encoding (normally Latin-1 unless
locales come into effect) into a utf8 encoding and set the utf8 flag
for that scalar. This means that single byte letters will now be
represented by multi-byte sequences. However, as long as the C<use
bytes> pragma is not in effect the string will be the same length as
because as far as perl is concerned the string still contains the same
number of characters (but not bytes).
This routine is significantly different from C<_utf8_on>; That routine
assumes that your string is encoded in utf8 but was marked (wrongly)
in the native encoding. This routine assumes that your string is
encoded in the native encoding and is marked that way, but you'd
rather it be encoded and marked as utf8.
=item utf8_off($var)
This routine performs a C<sv_utf8_downgrade> on each scalar string in
the passed data structure that has the utf8 flag turned on. This will
cause the perl to change the method it uses internally to store the
string from the utf8 encoding into a the native encoding (normally
Latin-1 unless locales are used) and disable the utf8 flag for that
scalar. This means that multiple byte sequences that represent a
single character will be replaced by one byte per character. However,
as long as the C<use bytes> pragma is not in effect the string will be
the same length as because as far as perl is concerned the string
still contains the same number of characters (but not bytes).
Please note that not all strings can be converted from utf8 to the
native encoding; In the case that the utf8 character has no
corresponding character in the native encoding Perl will die with
"Wide character in subroutine entry" exception.
This routine is significantly different from C<_utf8_off>; That
routine assumes that your string is encoded in utf8 and that you want
to simply mark it as being in the native encoding so that perl will
treat every byte that makes up the character sequences as a character
in it's own right in the native encoding. This routine assumes that
your string is encoded in utf8, but you want it each character that is
currently represented by multi-byte strings to be replaced by the
single byte representation of the same character.
=back
=head1 SEE ALSO
L<Encode>, L<Scalar::Util>, L<Devel::Leak>, L<Devel::LeakTrace>
See the excellent article
http://www.perl.com/pub/a/2002/08/07/proxyobject.html from Matt
Sergeant for more info on circular references.
=head1 REPOSITORY
https://github.com/AndyA/Data--Structure--Util
=head1 BUGS
C<signature()> is sensitive to the hash randomisation algorithm
This module only recurses through basic hashes, lists and scalar
references. It doesn't attempt anything more complicated.
=head1 THANKS TO
James Duncan and Arthur Bergman who helped me and found a name for
this module. Leon Brocard and Richard Clamp have provided invaluable
help to debug this module. Mark Fowler rewrote large chunks of the
documentation and patched a few bugs.
=head1 AUTHOR
This release by Andy Armstrong <andy@hexten.net>
Originally by Pierre Denis <pdenis@fotango.com>
http://opensource.fotango.com/
=head1 COPYRIGHT
Copyright 2003, 2004 Fotango - All Rights Reserved.
This module is released under the same license as Perl itself.
=cut
| jkb78/extrajnm | local/lib/perl5/darwin-2level/Data/Structure/Util.pm | Perl | mit | 14,572 |
/*************************************************************************
name: lexicon_player_english.pl
date: 2004-10-25
author: Andreas Wallentin
*************************************************************************/
:- module( lexicon_player_english, [output_form/2,
input_form/2,
yn_answer/1]).
:- multifile synset/2.
:- discontiguous output_form/2, input_form/2.
:- use_module( library(lists), [ member/2, select/3, append/3, is_list/1 ] ).
%%:- use_module( library(charsio), [ format_to_chars/3 ] ).
%% för mer variation av output
:- use_module( library(random) ).
:- use_module( dbase ).
:- ensure_loaded( digits_svenska_player ).
:- ensure_loaded( semsort_player ).
:- ensure_loaded( groups ).
/*----------------------------------------------------------------------
output_form( +Move, -String )
-- Canned output
----------------------------------------------------------------------*/
/*
För mer variation i output, slumpas olika fraser fram.
Samma för avsluten.
*/
greetings(['The music application is ready to use.','Welcome to the audio player']).
byes(['Good bye!','Hope you enjoyed the stay','Bye bye']).
% getNoXInList(+VilketIOrdning, +Lista, -UtvaltSvar).
getNoNInList(1,[X|_],X).
getNoNInList(Num, [_|Xs], Svar):-
N is Num-1,
getNoNInList(N,Xs,Svar).
output_form( action(top), ['top'] ).
%% Called the first time the program is running
%%
output_form( greet, [Greeting] ):-
random(1,3,N),
greetings(List),
getNoNInList(N,List,Greeting).
output_form( quit, [Ends] ):-
random(1,4,N),
byes(List),
getNoNInList(N,List,Ends).
% ask-moves
output_form( ask(X^(action(X))), ['What can I do for you?'] ).
output_form( ask(action(T)), Str ):-
output_form(action(T), StrT ),
append( ['Do you want to '], StrT, Str0 ),
append( Str0, ['?'], Str).
%% ta reda på saker från användaren
output_form( ask(X^playlist(X)),
['Which playlist do you want to open?'] ).
output_form( ask(X^itemAdd(X)),
['What song do you want to add to the playlist?'] ).
output_form( ask(X^itemRem(X)),
['What song(index no) do you want to remove from the playlist?'] ).
output_form( ask(X^groupToAdd(X)),
['What group do you mean?'] ).
output_form( ask(X^station(X)),
['What radio station do you want to listen to?'] ).
output_form( ask(X^listenTo(X)),
['Do you want to listen to radio or songs?'] ).
output_form( ask(X^artist(X)),
['What artist do you mean?'] ).
output_form( ask(X^song(X)),
['What song do you mean?'] ).
output_form( ask(X^album(X)),
['What album do you mean?'] ).
output_form( ask(X^song_artist(X)),
['What group do you mean?'] ).
output_form( ask(X^group(X)),
['What group do you mean?'] ).
output_form( ask(X^item(X)),
['What song do you mean?'] ).
output_form( ask(X^what_to_play(X)),
['What song in the playlist do you want to listen to?'] ).
output_form( answer(path(Path)), Ans ):-
( Path = ''
->
Ans = ['There is no path that matches the search criterion.']
;
Ans = ['The path to the song is:',Path]
).
output_form( answer(fail(Path^path(Path),no_matches)), Ans ):-
Ans = ['The path to the song is not:',Path].
output_form( answer(artists_song(Artist)), ['The following artists have done it:',Artist] ).
output_form( answer(artists_album(Artist)), ['The album was made by',Artist] ).
output_form( answer(albums_by_artist(Albums)), Answer ):-
( Albums = ''
-> Answer = ['There are no albums']
; Answer = ['These albums exist:',Albums]
).
output_form( answer(current_song([A,B])), Answer ):-
Answer = ['You are listening to',A,'-',B].
output_form( answer(songs_by_artist(Songs)), ['They have made:',Songs] ).
output_form( issue(path(_)), ['ask what path'] ).
output_form( action(handle_player), ['handle the player'] ).
output_form( action(handle_playlist), ['handle playlists'] ).
output_form( action(handle_stations), ['handle radio stations'] ).
output_form( action(start), ['start the player'] ).
output_form( action(start_specific), ['play a specific song'] ).
output_form( action(stop), ['stop the player'] ).
output_form( action(pause), ['pause the music'] ).
output_form( action(resume), ['resume the music'] ).
output_form( action(fast_rewind), ['wind/rewind'] ).
output_form( action(start_playlist), ['play a certain playlist'] ).
output_form( action(fast_forward), ['wind'] ).
output_form( action(rewind), ['rewind'] ).
output_form( action(next_song), ['to next'] ).
output_form( action(previous_song), ['to previous'] ).
output_form( action(playlist_add), ['add a song to the playlist'] ).
output_form( action(playlist_del_specific), ['remove a song from the playlist'] ).
output_form( action(playlist_del), ['delete the playlist'] ).
output_form( action(playlist_shuffle), ['shuffle the playlist'] ).
output_form( action(show_list), ['show the playlist'] ).
%%% confirming actions
output_form( confirm(handle_player), ['done handling player'] ).
output_form( confirm(handle_playlist), ['done handling playlist'] ).
output_form( confirm(handle_stations), ['done handling stations'] ).
output_form( confirm(start), ['Starting the music'] ).
output_form( confirm(start_specific), ['Starting the music'] ).
output_form( confirm(stop), ['The music is stopped'] ).
output_form( confirm(pause), ['Pausing the player'] ).
output_form( confirm(resume), ['Resuming the music'] ).
%output_form( confirm(fast_rewind), ['soplar åt nåt håll'] ).
output_form( confirm(start_playlist), ['Playing playlist'] ).
output_form( confirm(fast_forward), ['Winding'] ).
output_form( confirm(rewind), ['Rewinding'] ).
output_form( confirm(playlist_add), ['The playlist is increased'] ).
output_form( confirm(playlist_del_specific), ['The playlist is reduced'] ).
output_form( confirm(playlist_del), ['The playlist is cleared'] ).
output_form( confirm(playlist_shuffle), ['The playlist is shuffled'] ).
output_form( confirm(show_list), ['The playlist is shown'] ).
output_form( confirm(vol_up), ['Increasing volume'] ).
output_form( confirm(vol_down), ['Lowering volume'] ).
output_form( confirm(next_song), ['To next song'] ).
output_form( confirm(previous_song), ['To previous song'] ).
output_form( report('PlaylistAdd', failed(G,S)), Ans ):-
make_name(G,Group),
make_name(S,Song),
Ans = ['Sorry,',Song,'by',Group,'does not exist'].
output_form( report('Resume', failed(Status) ),
['Spelaren stod på',Status,'så därför ingen resume'] ).
output_form( report('Start', failed(Status) ), %%% spelare på paus
['Spelaren stod på',Status,'Då måste resume köras'] ).
altlist2altstr_and( [D], Str ):-
alt2altstr( D, Str1 ),
append( " and ", Str1, Str ).
altlist2altstr_and( [D|Ds], Str ):-
alt2altstr( D, Str1 ),
altlist2altstr_and( Ds, Str2 ),
append( Str1, ", ", Str3 ),
append(Str3, Str2, Str ).
altlist2altstr_or( [D], Str ):-
alt2altstr( D, Str1 ),
append( " or ", Str1, Str ).
altlist2altstr_or( [D|Ds], Str ):-
alt2altstr( D, Str1 ),
altlist2altstr_or( Ds, Str2 ),
append( Str1, ", ", Str3 ),
append(Str3, Str2, Str ).
alt2altstr( D, Str ):-
output_form( D, Str ).
alt2altstr( D, Str ):-
name( D, Str ).
%%% used in output_form/2 with ask(set(...))
altlist2alts_or( [Alt], ['or'|OutputAlt] ):-
output_form(Alt, OutputAlt ).
altlist2alts_or( [Alt|Alts], [','|Output] ):-
output_form(Alt, OutputAlt ),
altlist2alts_or(Alts, AltsOr),
append( OutputAlt, AltsOr, Output).
% object-level clarification and groundnig questions
output_form( ask(C), Output ):-
output_form( icm:und*pos:_*C, IcmPos ),
append( IcmPos0,['.'],IcmPos),
append( IcmPos0, [', is that correct?'], Output ).
output_form( ask(set([Alt0|Alts])), Output):-
output_form(Alt0, Alt0out),
altlist2alts_or( Alts, AltsOr ),
append(['Do you want '|Alt0out], AltsOr, Output0 ),
append(Output0, ['?'], Output).
output_form( Alt, OutputAlt ):-
input_form( OutputAlt, answer( Alt ) ).
output_form( answer(notexist(X,Q)), ['Sorry, there is nothing matching your request about '|InputQDot]):-
input_form( InputQ, ask(X^Q) ),
append( InputQ, ['.'], InputQDot ).
output_form( answer(unknown(Q)), ['Sorry, there is nothing matching your request about '|InputQDot]):-
input_form( InputQ, ask(Q) ),
append( InputQ, ['.'], InputQDot ).
% for asking metaissue clarification question
output_form( issue(Q), ['to ask about'|Out] ):-
input_form( Out, ask( Q ) ).
% for asking metaissue clarification question
%output_form( action(Action), ['to '|Out] ):-
% input_form( Out, request( Action ) ).
% ICM
% contact
output_form( icm:con*neg, ['Hello?'] ).
% perception
output_form( icm:per*int, ['Pardon?'] ).
output_form( icm:per*int, ['What did you say?'] ).
output_form( icm:per*neg, ['Sorry, I didnt hear what you said.'] ).
output_form( icm:per*pos:String, ['I heard you say',Name,'. '] ):-
name( Name, String ).
output_form( icm:sem*int, ['What do you mean'] ).
output_form( icm:sem*neg, ['Sorry, I dont understand.'] ).
output_form( icm:sem*pos:Move, InputDot ):-
input_form( Input, Move ),
append( Input, ['.'], InputDot ).
% understanding(pragmatic)
output_form( icm:und*neg, ['I dont quite understand.'] ).
% first clause added 021120 SL
output_form( icm:und*pos:usr*issue(Q), ['You want to know '|AnsPDot] ):-
output_form( Q, AnsP ),
append(AnsP,['.'],AnsPDot).
output_form( icm:und*pos:usr*issue(Q), ['You want to know about'|AnsPDot] ):-
input_form( AnsP, ask( Q ) ),
append(AnsP,['.'],AnsPDot).
output_form( icm:und*pos:usr*(not issue(Q)), ['You did not ask about'|AnsPDot] ):-
input_form( AnsP, ask( Q ) ),
append(AnsP,['.'],AnsPDot).
output_form( icm:und*pos:usr*(not P), AnsNotPDot ):-
output_form( icm:und*pos:usr*P, AnsPDot ),
append( ['not'],AnsPDot,AnsNotPDot ).
output_form( icm:und*pos:usr*P, AnsPDot ):-
( output_form(P, AnsP);
input_form( AnsP, answer(P) ) ),
append(AnsP,['.'],AnsPDot).
% special cases; could make use of isa-hierarchy
%output_form( icm:und*pos:usr*channel_to_store(X), IcmPos ):-
% output_form( icm:und*pos:usr*channel(X), IcmPos ).
%output_form( icm:und*pos:usr*new_channel(X), IcmPos ):-
% output_form( icm:und*pos:usr*channel(X), IcmPos ).
% 020702 SL
output_form( icm:und*pos:usr*PX, IcmPos ):-
PX =.. [P,X],
isa( P, P1 ),
P1X =.. [P1,X],
output_form( icm:und*pos:usr*P1X, IcmPos ).
output_form( icm:und*int:usr*C, IcmInt ):-
output_form( ask(C), IcmInt ).
%output_form( icm:und*pos:C, IcmPos ),
%append( IcmPos0,['.'],IcmPos),
%append( IcmPos0, [', is that correct?'], IcmInt ).
%output_form( icm:und*int:usr*C, IcmInt ):-
% input_form( answer(C), IcmInt ).
output_form( icm:und*int:usr*C, Output ):-
output_form( icm:und*pos:_*C, IcmPos ),
append( IcmPos0,['.'],IcmPos),
append( IcmPos0, [', is that correct?'], Output ).
% clarification question
output_form( icm:und*int:usr*AltQ, Output):-
output_form( ask(AltQ), Output).
% "acceptance"/integration
% icm-Type(-Polarity(-Args))
output_form( icm:acc*pos, ['Okay.'] ).
% reject(issue(Q))
output_form( icm:acc*neg:issue(Q), ['Sorry, I cannot answer questions about'|InputQDot]):-
input_form( InputQ, ask(Q) ),
append( InputQ, ['.'], InputQDot ).
% reject proposition P
output_form( icm:acc*neg:P, ['Sorry, '|Rest]):-
input_form( InputP, answer(P) ),
append( InputP, [' is not a valid parameter.'], Rest ).
% indicate loading a plan (pushed by findPlan)
%output_form( icm:loadplan, ['I need some information.'] ).
output_form( icm:loadplan, ['Lets see.'] ).
% reraise issue explicitly (feedback on user reraise, or system-initiated)
output_form( icm:reraise:Q, ['Returning to the issue of '|InputQDot]):-
( input_form( InputQ, ask(Q) ); output_form( ask(Q), InputQ ) ),
append( InputQ, ['.'], InputQDot ).
% reraise action explicitly (feedback on user reraise, or system-initiated)
output_form( icm:reraise:A, ['Returning to '|InputQDot]):-
( input_form( InputQ, request(A) ); output_form( action(A), InputQ ) ),
append( InputQ, ['.'], InputQDot ).
% reraise issue (system-initiated, where question follows immediately after)
output_form( icm:reraise, ['So,']).
% accommodation
output_form( icm:accommodate:_, ['Alright.'] ).
output_form( icm:reaccommodate:Q, ['Returning to the issue of'|AnsPDot] ):-
input_form( AnsP, ask( Q ) ),
append(AnsP,['.'],AnsPDot).
output_form( not C, ['Not'|S] ):- output_form( C, S ).
%
/*----------------------------------------------------------------------
input_form( +Phrase, -Move )
-- Almost canned input
----------------------------------------------------------------------*/
input_form( [not|S], answer(not(C))):- input_form(S,answer(C)).
input_form( [yes], answer(yes) ).
input_form( [no], answer(no) ).
% simple stuff
input_form( [hello], greet ).
input_form( [good,bye], quit ).
input_form( [quit], quit ).
input_form( [abort], quita ).
% ICM
input_form( [sorry], icm:per*neg ).
input_form( [okey], icm:acc*pos ).
input_form( [ok], icm:acc*pos ).
input_form( [dont,know], icm:acc*neg:issue ).
/******************************
ACTIONS
******************************/
%%%%% Requests %%%%%
input_form( [restart], request(restart) ).
input_form( [top], request(top) ).
input_form( [go,up], request(up) ).
input_form( Player, request(handle_player) ) :- lexsem( Player, player ).
input_form( Playlist, request(handle_playlist) ) :- lexsem( Playlist, playlist ).
input_form( [choose], request(listen_to) ).
input_form( [play|X], [request(start_specific),answer(index(X))] ):-
sem_sort(X,index).
input_form( [play|Group], [request(start),request(playlist_add),answer(group(Group))] ):-
sem_sort(Group,group).
input_form( [play|Song], [request(start),request(playlist_add),answer(item(Song))] ):-
sem_sort(Song,item).
input_form( [Station], request(handle_stations) ) :- lexsem( Station, station ).
input_form( [Play], request(start) ) :- lexsem( Play, play ).
input_form( [Stop], request(stop) ) :- lexsem( Stop, stop ).
input_form( [Pause], request(pause) ) :- lexsem( Pause, pause ).
input_form( [Resume], request(resume) ) :- lexsem( Resume, resume ).
input_form( [wind], request(fast_rewind) ).
input_form( [back], request(rewind) ).
input_form( [forward], request(fast_forward) ).
input_form( [next], request(next_song) ).
input_form( [previous], request(previous_song) ).
input_form( [play,playlist], request(start_playlist) ).
input_form( [a,playlist], request(start_playlist) ).
input_form( [add], request(playlist_add) ).
%input_form( [lägg,till], request(playlist_add) ).
input_form( [listen,to], request(playlist_add) ).
input_form( [show,the,list], request(show_list) ).
input_form( [show,the,playlist], request(show_list) ).
%input_form( [höra,på], request(listen_to) ).
input_form( [want,to,hear], request(listen_to) ).
input_form( [shuffle], request(playlist_shuffle) ).
%input_form( [List], request(playlist_del) ) :- lexsem(List,list).
%input_form( [låt], request(playlist_del_specific) ).
%% ny plan som frågar vad man vill ta bort
%input_form( [ta,bort], request(remove) ).
input_form( [remove|List], request(playlist_del) ) :- lexsem(List,list).
input_form( [delete|List], request(playlist_del) ) :- lexsem(List,list).
input_form( [remove|X], [request(playlist_del_specific),
answer(index(X)) ] ):-
sem_sort(X,index).
input_form( [delete|X], [request(playlist_del_specific),
answer(index(X)) ] ):-
sem_sort(X,index).
input_form( [remove,a,song], request(playlist_del_specific) ).
input_form( [Inc], request(vol_up) ) :- lexsem(Inc,increase).
input_form( [Dec], request(vol_down) ) :- lexsem(Dec,decrease).
%%%%% Answers %%%%%
input_form( X, answer(index(X)) ):- sem_sort(X,index).
input_form( Station, answer(station(Station)) ):- sem_sort(Station,station).
input_form( Group, answer(group(Group)) ):- sem_sort(Group,group).
input_form( Playlist, answer(playlist(Playlist)) ):- sem_sort(Playlist,playlist).
%%input_form( [Year], answer(year(Year)) ):- sem_sort(Year,year).
input_form( SongRadio, answer(item(SongRadio)) ):- sem_sort(SongRadio,item).
input_form( Album, answer(album(Album)) ):- sem_sort(Album,album).
input_form( Station, answer(station(IP)) ):-
longNum(Station,IP),
sem_sort(IP,station).
%%%%% Questions to DB %%%%%
input_form( [what,album], ask(A^albums_by_artist(A)) ).
input_form( [search,albums], ask(A^albums_by_artist(A)) ).
input_form( [current,song], ask(X^current_song(X)) ).
input_form( [who,made,the,album], ask(A^artists_album(A)) ).
input_form( [who,made,the,song], ask(A^artists_song(A)) ).
input_form( [what,search,path], ask(A^path(A)) ).
%%% för mer explicit input
input_form( [who,made,the,song|Song], [ask(A^artists_song(A)),answer(item(Song))] ):-
sem_sort(Song,item).
input_form( [who,made,the,album|Album], [ask(A^artists_album(A)),answer(album(Album))] ):-
sem_sort(Album,album).
%%% mer generellt
input_form( [who,wrote|Song], [ask(A^artists_song(A)),answer(item(Song))] ):-
sem_sort(Song,item).
input_form( [who,made|Song], [ask(A^artists_song(A)),answer(item(Song))] ):-
sem_sort(Song,item).
input_form( [who,wrote|Album], [ask(A^artists_album(A)),answer(album(Album))] ):-
sem_sort(Album,album).
input_form( [who,made|Album], [ask(A^artists_album(A)),answer(album(Album))] ):-
sem_sort(Album,album).
input_form( [what,songs], ask(Songs^songs_by_artist(Songs)) ).
input_form( [xxxxxd], ask(X^what_to_play(X)) ).
%%% input_form( [vilka,grupper], ask(Groups^all_groups(Groups)) ).
/*
Kommande predikat...
input_form( [med,Group], answer(group(Group)) ):- sem_sort(Group,group).
input_form( [någonting,med], request(find_group) ).
*/
/*----------------------------------------------------------------------
yn_answer( ?YN )
----------------------------------------------------------------------*/
yn_answer(A):-
A = 'yes';
A = 'no'.
/*----------------------------------------------------------------------
lexsem( ?Word, ?Concept )
-- Lexical semantics
----------------------------------------------------------------------*/
% use semantics as surface forms (only possible for english)
lexsem( Word, Concept ):-
synset( Words, Concept ),
member( Word, Words ).
synset( [[the,player],[the,music]], player ).
synset( [[the,playlist],[the,playlists],[the,list]], playlist ).
synset( [[the,list]], list ).
synset( [start,play], play ).
synset( [radio,station], station ).
synset( [stop,halt], stop ).
synset( [resume], resume ).
synset( [pause], pause ).
synset( [increase], increase ).
synset( [decrease,lower], decrease ).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/SIRIDUS/UGOT-D31/godis-apps/domain-player/player-old/Resources/lexicon_player_english.pl | Perl | mit | 19,557 |
# Copyright 2018 Tomas Brabec
#
# See LICENSE for license details.
# -----------------------------------------------------------------------------
# Description:
# Quotes stock and currency symbols from DB.
#
# The list of stocks and currencies is taken from DB. Currencies are
# quoted only when the base currency is given (`-b` option).
#
# Quotes are obtained from the following sources with decreasing priority:
# - Cached quotes from DB
# - Yahoo Finance
# - AlphaVantage.
# -----------------------------------------------------------------------------
use DBI;
use Getopt::Long::Descriptive;
#use Switch;
#use Data::Dumper;
use Finance::Quote;
use Array::Utils;
use xfrs;
use strict;
use warnings;
my ($opt, $usage) = describe_options(
'%c %o',
[ 'db|d=s', "Sqlite3 DB file to import into", { default => "xfrs.sqlite3.db" } ],
[ 'base|b=s', "base currency", { default => "" } ],
[],
[ 'help|h', "print usage message and exit", { shortcircuit => 1 } ],
);
my $dbh = DBI->connect("dbi:SQLite:dbname=".$opt->db,"","",{ RaiseError => 1 }) or die $DBI::errstr;;
my @stocks = xfrs::getStocks($dbh);
my @currencies = xfrs::getCurrencies($dbh);
my $q = Finance::Quote->new;
$q->timeout(30);
my @attrs = ("last","currency");
my %quotes;
# Remaps attributes from the info structure returned by a quote.
my %attrMap = (
'last' => 'price',
'currency' => 'currency'
);
# Quote stocks
# ------------
# obtain cached quotes
my %qtCacheStocks = xfrs::getCachedQuote($dbh,'',@stocks);
foreach my $s (keys %qtCacheStocks) {
$quotes{$s} = $qtCacheStocks{$s};
print "$s (cache) ....\n";
foreach my $a (@attrs) {
print "\t$s.$a = ".$quotes{$s}->{$attrMap{$a}}."\n";
}
}
# obtain additional quotes (if needed) from
# - Yahoo finance
# - AlphaVantage
foreach my $qtSrc ('yahoo_json', 'alphavantage') {
if (scalar @stocks > scalar keys %quotes) {
my @syms;
foreach my $s (@stocks) {
push(@syms,$s) unless (exists($quotes{$s}));
}
my %qs = $q->fetch($qtSrc,@syms);
foreach my $s (@syms) {
next unless (exists($qs{$s,'success'}) && $qs{$s,'success'} == 1);
print "$s ($qtSrc) ....\n";
foreach my $a (@attrs) {
if (exists($qs{$s,$a})) {
$quotes{$s}->{$attrMap{$a}} = $qs{$s,$a};
print "\t$s.$a = ".$qs{$s,$a}."\n";
}
}
}
}
}
# report failed stock quotes
if (scalar @stocks > scalar keys %quotes) {
foreach my $s (@stocks) {
print "$s ... failed\n" unless (exists($quotes{$s}));
}
}
# Quote currencies
# ----------------
my %curconv;
if ($opt->base ne "") {
my %curQuotes;
my @curs = @currencies;
my $base = $opt->base;
# add currencies of the stocks
foreach my $s (keys %quotes) {
my $rec = $quotes{$s};
if (exists $rec->{'currency'}) {
push(@curs,$rec->{'currency'});
}
}
@curs = Array::Utils::unique(@curs);
# get cached quotes first
my @cacheSyms;
foreach my $c (@curs) {
push(@cacheSyms,$c.$base);
}
my %qtCacheCurs = xfrs::getCachedQuote($dbh,'',@cacheSyms);
foreach my $c (@curs) {
if (exists($qtCacheCurs{$c.$base})) {
$curQuotes{$c} = $qtCacheCurs{$c.$base};
print "$c (cache) ....\n";
foreach my $a (@attrs) {
print "\t$c.$a = ".$curQuotes{$c}->{$attrMap{$a}}."\n";
}
}
}
# append the base currency (if needed)
if (!exists($curQuotes{$base})) {
$curQuotes{$base} = {
'price' => 1.0,
'currency' => $base
};
}
# quote conversion rates at Yahoo Finance (if needed)
if (scalar @curs > scalar keys %curQuotes) {
my %syms;
foreach my $s (@curs) {
$syms{$s}=$s.$base."=X" unless (exists($curQuotes{$s}));
}
my %qs = $q->fetch("yahoo_json",values %syms);
#print Dumper(\%qs);
foreach my $s (keys %syms) {
next unless (exists($qs{$syms{$s},'success'}) && $qs{$syms{$s},'success'} == 1);
print "$s (yahoo_json) ....\n";
foreach my $a (@attrs) {
if (exists($qs{$syms{$s},$a})) {
$curQuotes{$s}->{$attrMap{$a}} = $qs{$syms{$s},$a};
print "\t$s.$a = ".$qs{$syms{$s},$a}."\n";
}
}
}
}
# quote conversion rates (if needed) using the default API (AlphaVantage as of Finance::Quote 1.47)
if (scalar @curs > scalar keys %curQuotes) {
my @syms;
foreach my $s (@curs) {
push(@syms,$s) unless (exists($curQuotes{$s}));
}
foreach my $c (@syms) {
if ($base eq $c) {
$curQuotes{$c} = {
'price' => 1.0,
'currency' => $c
};
} else {
my $convrate = $q->currency($c,$base);
if (! defined $convrate ) { next; }
$curQuotes{$c} = {
'price' => $convrate,
'currency' => $base
};
}
print "$c/$base (defualt) ....\n";
foreach my $a (@attrs) {
print "\t$c.$a = ".$curQuotes{$c}->{$attrMap{$a}}."\n";
}
}
}
# populate the conversion rate hash
foreach my $c (keys %curQuotes) {
$curconv{$c} = $curQuotes{$c}->{'price'};
}
}
# Print balance
# --------------
# get the actual balance
my %balance;
xfrs::getBalance($dbh, \%balance);
# collect NAV (net asset value)
my $nav = 0;
# print the cash balance
foreach my $c (@currencies) {
print "$c,$balance{$c},$c";
if (exists($curconv{$c})) {
my $v = $balance{$c}*$curconv{$c};
print ",".$v.",".$opt->base;
$nav += $v;
}
print "\n";
}
# print the equity balance
foreach my $s (@stocks) {
my $p = $quotes{$s}->{'price'} || 0;
my $c = $quotes{$s}->{'currency'} || "";
print "$s,".($balance{$s}*$p).",$c";
if (exists $curconv{$c} && defined $curconv{$c}) {
my $v = $balance{$s}*$p*$curconv{$c};
print ",".$v.",".$opt->base;
$nav += $v;
}
print "\n";
}
print "\nnav = $nav".$opt->base."\n";
#my $conversion_rate = $q->currency("AUD","USD");
#$q->set_currency("EUR"); # Return all info in Euros.
#
#$q->require_labels(qw/price date high low volume/);
#
#$q->failover(1); # Set failover support (on by default).
#
#my @stocks = ["RGR", "AOBC"];
#
#my %quotes = $q->fetch("nasdaq",@stocks);
##my $hashref = $q->fetch("nyse",@stocks);
#
#foreach my $k (keys %quotes) {
# print "$k = $quotes{$k}\n";
#}
| brabect1/invest-utils-perl | get-quotes.pl | Perl | mit | 6,775 |
#!/usr/bin/perl
if ($#ARGV != 0)
{
die "Argument must contain filename $#ARGV"
}
else
{
$fname=$ARGV[0];
}
# If we have a .m file inside a (@)-folder with the same name :
# we will read each file of this folder
if ($fname =~ /^(.*)\@([\d\w-_]*)[\/\\](\2)\.m/)
{
$name = $2;
$nameExt = $name.".m";
$dir = $1."@".$name."/\*.m";
@fic = glob($dir);
$i = 0;
@listeFic[0] = $fname;
foreach $my_test (@fic)
{
if (!($my_test =~ $nameExt))
{
$i++;
@listeFic[$i] = $my_test;
}
}
}
# otherwise @-folder, but .m with a different name : ignore it
elsif ($fname =~ /^(.*)\@([\d\w-_]*)[\/\\](.*)\.m/)
{
}
# otherwise
else
{
@listeFic[0] = $fname;
}
$output = "";
foreach $my_fic (@listeFic)
{
open(my $in, $my_fic);
$declTypeDef="";
$inClass = 0;
$inAbstractMethodBlock = 0;
$listeProperties = 0;
$listeEnumeration = 0;
$methodAttribute = "";
while (<$in>)
{
if (/(^\s*)(%>)(.*)/)
{
$output=$output."$1///$3";
}
if (($listeProperties == 1) && (/(^\s*end\s*)/))
{
$listeProperties = 0;
}
if (($inAbstractMethodBlock == 1) && (/(^\s*end\s*)/))
{
$inAbstractMethodBlock = 0;
}
if (($listeProperties == 1) && (/^\s*([\w\d]*)\s*(=\s*[\w\d{}'',\s\[\]\.]*)?.*(%>.*)?/))
{
$propertyName = $1;
$propertyValue = $2;
$propertyComment = $3;
if (!($propertyName =~ /^$/))
{
if ($typeProperties =~ /Constant/)
{
$properties = $propertyName."$propertyValue;$propertyComment";
}
else
{
$properties = $propertyName.";$propertyComment";
}
$properties =~ s/%>/\/\/\//g;
$properties =~ s/%/\/\//g;
$output=$output.$typeProperties."Property ".$properties;
}
}
if (($listeEnumeration == 1) && (/(^\s*end\s*)/))
{
$listeEnumeration = 0;
$output=$output."};";
}
if (($listeEvents == 1) && (/(^\s*end\s*)/))
{
$listeEvents = 0;
$output=$output."};";
}
if (($listeEvents == 1) && (/^\s*([\w\d]*)\s*/))
{
$name_event = $1;
if (!($name_event =~ /^$/))
{
$event = $name_event.",";
$event =~ s/%>/\/\/\//g;
$event =~ s/%/\/\//g;
$output=$output.$event;
}
}
if (($listeEnumeration == 1) && (/^\s*([\w\d]*)\s*(\(.*\))?(%>.*)?/))
{
$name_enum = $1;
$val_enum = $2;
if (!($name_enum =~ /^$/))
{
if (!($val_enum =~ /^$/))
{
$enum = "$name_enum=$val_enum,";
$enum =~ s/%>/\/\/\//g;
$enum =~ s/%/\/\//g;
$output=$output.$enum;
}
else
{
$enum = "$name_enum,";
$enum =~ s/%>/\/\/\//g;
$enum =~ s/%/\/\//g;
$output=$output.$enum;
}
}
}
if (/(^\s*function)\s*([\] \w\d,_\[]+=)?\s*([.\w\d_-]*)\s*\(?([\w\d\s,~]*)\)?(%?.*)/)
{
$functionKeyWord = $1;
$functionName = $3;
$arguments = $4;
if ($inClass == 0)
{
$output = $declTypeDef.$output;
$declTypeDef = "";
}
$arguments =~ s/,/,in /g;
$arguments =~ s/~/ignoredArg/g;
$arguments = "in $arguments";
if ($arguments =~ /^in $/)
{
$arguments = "";
}
$ligne = "$methodAttribute $functionKeyWord $functionName($arguments);";
$output=$output.$ligne;
}
# Signature of functions in abstract methods
elsif ((/^\s*([\] \w\d,_\[]+=)?\s*([.\w\d_-]+)\s*\(?([\w\d\s,~]*)\)?(%?.*)/) & ($inAbstractMethodBlock == 1) )
{
$functionName = $2;
$arguments = $3;
$arguments =~ s/,/,in /g;
$arguments =~ s/~/ignoredArg/g;
$arguments = "in $arguments";
if ($arguments =~ /^in $/)
{
$arguments = "";
}
$ligne = "$methodAttribute $functionKeyWord $functionName($arguments);";
$output=$output.$ligne;
}
# inheritance for classes
if (/(^\s*classdef)\s*(\s*\([\{\}\?\w,=\s]+\s*\))?\s*([\w\d_]+)\s*<?\s*([\s\w\d_&]+)?(.*)/)
{
$className = $3;
$classInheritance = $4;
$classAttributes = $2;
if (!($classInheritance =~ /^$/))
{
$classInheritance =~ s/&/,public /g;
$classDef = "class ".$className.":public $classInheritance";
}
else
{
$classDef = "class ".$className;
}
$output=$output.$classDef;
$output=$output."{";
$output=$output.$declTypeDef;
$output=$output."public:\n";
$inClass = 1;
}
if (/(^\s*properties)\s*(\s*\([\w,=\s]+\s*\))?(.*)/)
{
$listeProperties = 1;
$propertiesAttributes = $2;
$typeProperties = "public:\n";
if (lc($propertiesAttributes) =~ /(access\s*=\s*private)/)
{
$typeProperties = "private:\n"
}
elsif (lc($propertiesAttributes) =~ /(access\s*=\s*public)/)
{
$typeProperties = "public:\n"
}
elsif (lc($propertiesAttributes) =~ /(access\s*=\s*protected)/)
{
$typeProperties = "protected:\n"
}
if ((lc($propertiesAttributes) =~ /(constant\s*=\s*false)/) || (lc($propertiesAttributes) =~ /(~constant)/))
{
}
elsif (lc($propertiesAttributes) =~ /(constant(\s*=\s*true\s*)?)/)
{
$typeProperties = $typeProperties." Constant ";
}
}
if (/(^\s*enumeration)\s*(.*)/)
{
$listeEnumeration = 1;
$output=$output."public:\nenum ".$className." {";
}
if (/(^\s*events)\s*(.*)/)
{
$listeEvents = 1;
$output=$output."public:\nenum Events {";
}
if (/(^\s*methods)\s*(\s*\([\w,=\s]+\s*\))?(.*)/)
{
$methodAttribute = "public:\n";
$methodsAttributes = $2;
if (lc($methodsAttributes) =~ /(access\s*=\s*private)/)
{
$methodAttribute = "private:\n"
}
elsif (lc($methodsAttributes) =~ /(access\s*=\s*protected)/)
{
$methodAttribute = "protected:\n"
}
elsif (lc($methodsAttributes) =~ /(access\s*=\s*public)/)
{
$methodAttribute = "public:\n"
}
if (lc($methodsAttributes) =~ /(abstract(\s*=\s*true\s*)?)/)
{
$inAbstractMethodBlock = 1;
$methodAttribute = $methodAttribute." virtual ";
}
if ((lc($methodsAttributes) =~ /(static\s*=\s*false)/) || (lc($methodsAttributes) =~ /(~static)/))
{
}
elsif (lc($methodsAttributes) =~ /(static(\s*=\s*true\s*)?)/)
{
$methodAttribute = $methodAttribute." static";
}
}
$output=$output."\n";
}
close $in;
}
$output=$output."};\n";
print $output;
| navoj/WholeCell | lib/DoxygenMatlab/m2cpp.pl | Perl | mit | 6,871 |
/* Part of Extended libraries for Prolog
Author: Edison Mera Menendez
E-mail: efmera@gmail.com
WWW: https://github.com/edisonm/xlibrary
Copyright (C): 2014, Process Design Center, Breda, The Netherlands.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(i18n_test, [i18n_test/1]).
:- use_module(library(filesex)).
:- use_module(library(i18n/i18n_expansion)).
:- use_module(library(i18n/i18n_support)).
:- retractall(i18n_support:language(_)). % overriding language
:- retractall(i18n_support:i18n_resource_dir(_)). % overriding resource dir
i18n_support:i18n_resource_dir(Dir) :-
context_module(M),
module_property(M, file(F)),
directory_file_path(Dir, _, F).
i18n_support:language(es). % Spanish
i18n_test(t1) :-
A = ~(hello),
assertion(A == hola).
i18n_test(t2) :-
A = hola,
assertion(A == ~(hello)).
| TeamSPoon/logicmoo_workspace | packs_lib/xlibrary/tests/i18n/i18n_test.pl | Perl | mit | 2,214 |
package Net::PMP::Profile::Story;
use Moose;
extends 'Net::PMP::Profile';
our $VERSION = '0.100';
has 'teaser' => ( is => 'rw', isa => 'Str', );
has 'contentencoded' => ( is => 'rw', isa => 'Str', );
has 'contenttemplated' => ( is => 'rw', isa => 'Str', );
sub get_profile_url {'https://api.pmp.io/profiles/story'}
__PACKAGE__->meta->make_immutable();
1;
__END__
=head1 NAME
Net::PMP::Profile::Story - Story Profile for PMP CollectionDoc
=head1 SYNOPSIS
use Net::PMP;
use Net::PMP::Profile::Story;
my $story = Net::PMP::Profile::Story->new(
title => 'I am A Title',
published => '2013-12-03T12:34:56.789Z',
valid => {
from => "2013-04-11T13:21:31.598Z",
to => "3013-04-11T13:21:31.598Z",
},
byline => 'By: John Writer and Nancy Author',
description => 'This is a summary of the document.',
tags => [qw( foo bar baz )],
teaser => 'important story to read here!',
contentencoded => $html,
contenttemplated => $templated_html,
);
# instantiate a client
my $client = Net::PMP->client(
host => $host,
id => $client_id,
secret => $client_secret,
);
# save doc
$client->save($story);
=cut
=head1 DESCRIPTION
Net::PMP::Profile::Story implements the CollectionDoc fields for the PMP Story Profile
L<https://github.com/publicmediaplatform/pmpdocs/wiki/Story-Profile>.
=head1 METHODS
This class extends L<Net::PMP::Profile>. Only new or overridden methods are documented here.
=head2 teaser
Optional brief summary.
=head2 contentencoded
Optional full HTML-encoded string.
=head2 contenttemplated
Optional content with placeholders for rich media assets.
=head2 get_profile_url
Returns a string for the PMP profile's URL.
=head1 AUTHOR
Peter Karman, C<< <karman at cpan.org> >>
=head1 BUGS
Please report any bugs or feature requests to C<bug-net-pmp at rt.cpan.org>, or through
the web interface at L<http://rt.cpan.org/NoAuth/ReportBug.html?Queue=Net-PMP-Profile>. I will be notified, and then you'll
automatically be notified of progress on your bug as I make changes.
=head1 SUPPORT
You can find documentation for this module with the perldoc command.
perldoc Net::PMP
You can also look for information at:
=over 4
=item IRC
Join #pmp on L<http://freenode.net>.
=item * RT: CPAN's request tracker (report bugs here)
L<http://rt.cpan.org/NoAuth/Bugs.html?Dist=Net-PMP-Profile>
=item * AnnoCPAN: Annotated CPAN documentation
L<http://annocpan.org/dist/Net-PMP-Profile>
=item * CPAN Ratings
L<http://cpanratings.perl.org/d/Net-PMP-Profile>
=item * Search CPAN
L<http://search.cpan.org/dist/Net-PMP-Profile/>
=back
=head1 ACKNOWLEDGEMENTS
American Public Media and the Public Media Platform sponsored the development of this module.
=head1 LICENSE AND COPYRIGHT
Copyright 2013 American Public Media Group
See the LICENSE file that accompanies this module.
=cut
| APMG/net-pmp-profile-perl | lib/Net/PMP/Profile/Story.pm | Perl | mit | 2,942 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Feature - Ensembl specific sequence feature.
=head1 SYNOPSIS
my $feat = new Bio::EnsEMBL::Feature(
-start => 100,
-end => 220,
-strand => -1,
-slice => $slice,
-analysis => $analysis
);
my $start = $feat->start();
my $end = $feat->end();
my $strand = $feat->strand();
# Move the feature to the chromosomal coordinate system
$feature = $feature->transform('chromosome');
# Move the feature to a different slice (possibly on another coord
# system)
$feature = $feature->transfer($new_slice);
# Project the feature onto another coordinate system possibly across
# boundaries:
@projection = @{ $feature->project('contig') };
# Change the start, end, and strand of the feature in place
$feature->move( $new_start, $new_end, $new_strand );
=head1 DESCRIPTION
This is the Base feature class from which all Ensembl features inherit.
It provides a bare minimum functionality that all features require. It
basically describes a location on a sequence in an arbitrary coordinate
system.
=head1 METHODS
=cut
package Bio::EnsEMBL::Feature;
use strict;
use warnings;
use Bio::EnsEMBL::Storable;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use Bio::EnsEMBL::Utils::Exception qw(throw deprecate warning);
use Bio::EnsEMBL::Utils::Scalar qw(check_ref assert_ref);
use Bio::EnsEMBL::Slice;
use Bio::EnsEMBL::StrainSlice;
use vars qw(@ISA);
use Scalar::Util qw(weaken);
@ISA = qw(Bio::EnsEMBL::Storable);
=head2 new
Arg [-SLICE]: Bio::EnsEMBL::SLice - Represents the sequence that this
feature is on. The coordinates of the created feature are
relative to the start of the slice.
Arg [-START]: The start coordinate of this feature relative to the start
of the slice it is sitting on. Coordinates start at 1 and
are inclusive.
Arg [-END] : The end coordinate of this feature relative to the start of
the slice it is sitting on. Coordinates start at 1 and are
inclusive.
Arg [-STRAND]: The orientation of this feature. Valid values are 1,-1,0.
Arg [-SEQNAME] : A seqname to be used instead of the default name of the
of the slice. Useful for features that do not have an
attached slice such as protein features.
Arg [-dbID] : (optional) internal database id
Arg [-ADAPTOR]: (optional) Bio::EnsEMBL::DBSQL::BaseAdaptor
Example : $feature = Bio::EnsEMBL::Feature->new(-start => 1,
-end => 100,
-strand => 1,
-slice => $slice,
-analysis => $analysis);
Description: Constructs a new Bio::EnsEMBL::Feature. Generally subclasses
of this method are instantiated, rather than this class itself.
Returntype : Bio::EnsEMBL::Feature
Exceptions : Thrown on invalid -SLICE, -ANALYSIS, -STRAND ,-ADAPTOR arguments
Caller : general, subclass constructors
Status : Stable
=cut
sub new {
my $caller = shift;
my $class = ref($caller) || $caller;
my ( $start, $end, $strand, $slice, $analysis,$seqname, $dbID, $adaptor ) =
rearrange(['START','END','STRAND','SLICE','ANALYSIS', 'SEQNAME',
'DBID', 'ADAPTOR'], @_);
if($slice) {
if(!ref($slice) || !($slice->isa('Bio::EnsEMBL::Slice') or $slice->isa('Bio::EnsEMBL::LRGSlice')) ) {
throw('-SLICE argument must be a Bio::EnsEMBL::Slice not '.$slice);
}
}
if($analysis) {
if(!ref($analysis) || !$analysis->isa('Bio::EnsEMBL::Analysis')) {
throw('-ANALYSIS argument must be a Bio::EnsEMBL::Analysis not '.
$analysis);
}
}
if(defined($strand)) {
if(!($strand == 1) && !($strand == -1) && !($strand == 0)) {
throw('-STRAND argument must be 1, -1, or 0');
}
}
if(defined($start) && defined($end)) {
if (($start =~ /\d+/) && ($end =~ /\d+/)) {
if($end+1 < $start and !$slice->is_circular()) {
throw(sprintf('Start (%d) must be less than or equal to end+1 (%d)', $start, ($end+1)));
}
} else {
throw('Start and end must be integers');
}
}
my $self = bless({'start' => $start,
'end' => $end,
'strand' => $strand,
'slice' => $slice,
'analysis' => $analysis,
'seqname' => $seqname,
'dbID' => $dbID}, $class);
$self->adaptor($adaptor);
return $self;
}
=head2 start
Arg [1] : (optional) int $start
The start of this feature relative to the start of the slice
that it is on.
Example : $start = $feat->start()
Description: Getter/Setter for the start of this feature relative to the
start of the slice it is on. Note that negative values, or
values exceeding the length of the slice are permitted.
Start must be less than or equal to the end regardless of the
strand. Coordinate values start at 1 and are inclusive.
Returntype : int
Exceptions : none
Caller : general
Status : Stable
=cut
sub start {
my ( $self, $value ) = @_;
if ( defined($value) ) {
$self->{'start'} = $value;
}
return $self->{'start'};
}
=head2 end
Arg [1] : (optional) int $end
Example : $end = $feat->end();
Description: Getter/Setter for the end of this feature relative to the
start of the slice that it is on. Note that negative values,
of values exceeding the length of the slice are permitted. End
must be greater than or equal to start regardless of the strand.
Coordinate values start at 1 and are inclusive.
Returntype : int
Exceptions : none
Caller : general
Status : Stable
=cut
sub end {
my ( $self, $value ) = @_;
if ( defined($value) ) {
$self->{'end'} = $value;
}
return $self->{'end'};
}
=head2 strand
Arg [1] : (optional) int $strand
Example : $feat->strand(-1);
Description: Getter/Setter for the strand of this feature relative to the
slice it is on. 0 is an unknown or non-applicable strand.
-1 is the reverse (negative) strand and 1 is the forward
(positive) strand. No other values are permitted.
Returntype : int
Exceptions : thrown if an invalid strand argument is passed
Caller : general
Status : Stable
=cut
sub strand {
my ( $self, $strand ) = @_;
if ( defined($strand) ) {
if ( $strand != 0 && $strand != 1 && $strand != -1 ) {
throw('strand argument must be 0, -1 or 1');
}
$self->{'strand'} = $strand;
}
return $self->{'strand'};
}
=head2 move
Arg [1] : int start
Arg [2] : int end
Arg [3] : (optional) int strand
Description: Sets the start, end and strand in one call rather than in
3 seperate calls to the start(), end() and strand() methods.
This is for convenience and for speed when this needs to be
done within a tight loop.
Returntype : none
Exceptions : Thrown is invalid arguments are provided
Caller : general
Status : Stable
=cut
sub move {
my $self = shift;
throw('start and end arguments are required') if(@_ < 2);
my $start = shift;
my $end = shift;
my $strand = shift;
if(defined($start) && defined($end) && $end < $start) {
throw('start must be less than or equal to end');
}
if(defined($strand) && $strand != 0 && $strand != -1 && $strand != 1) {
throw('strand must be 0, -1 or 1');
}
$self->{'start'} = $start;
$self->{'end'} = $end;
$self->{'strand'} = $strand if(defined($strand));
}
=head2 length
Arg [1] : none
Example : $length = $feat->length();
Description: Returns the length of this feature
Returntype : Integer
Exceptions : Throws if end < start and the feature is not on a
circular slice
Caller : general
Status : Stable
=cut
sub length {
my ($self) = @_;
if ( $self->{'end'} < $self->{'start'} ) {
# if circular, we can work out the length of an origin-spanning
# feature using the size of the underlying region.
if ( $self->slice() && $self->slice()->is_circular() ) {
my $len =
$self->slice()->seq_region_length() -
( $self->{'start'} - $self->{'end'} ) + 1;
return $len;
} else {
throw( "Cannot determine length of non-circular feature "
. "where start > end" );
}
}
return $self->{'end'} - $self->{'start'} + 1;
}
=head2 analysis
Arg [1] : (optional) Bio::EnsEMBL::Analysis $analysis
Example : $feature->analysis(new Bio::EnsEMBL::Analysis(...))
Description: Getter/Setter for the analysis that is associated with
this feature. The analysis describes how this feature
was derived.
Returntype : Bio::EnsEMBL::Analysis
Exceptions : thrown if an invalid argument is passed
Caller : general
Status : Stable
=cut
sub analysis {
my $self = shift;
if(@_) {
my $an = shift;
if(defined($an) && (!ref($an) || !$an->isa('Bio::EnsEMBL::Analysis'))) {
throw('analysis argument must be a Bio::EnsEMBL::Analysis');
}
$self->{'analysis'} = $an;
}
return $self->{'analysis'};
}
=head2 slice
Arg [1] : (optional) Bio::EnsEMBL::Slice $slice
Example : $seqname = $feature->slice()->name();
Description: Getter/Setter for the Slice that is associated with this
feature. The slice represents the underlying sequence that this
feature is on. Note that this method call is analagous to the
old SeqFeature methods contig(), entire_seq(), attach_seq(),
etc.
Returntype : Bio::EnsEMBL::Slice
Exceptions : thrown if an invalid argument is passed
Caller : general
Status : Stable
=cut
sub slice {
my ( $self, $slice ) = @_;
if ( defined($slice) ) {
if ( !check_ref( $slice, 'Bio::EnsEMBL::Slice' )
&& !check_ref( $slice, 'Bio::EnsEMBL::LRGSlice' ) )
{
throw('slice argument must be a Bio::EnsEMBL::Slice');
}
$self->{'slice'} = $slice;
} elsif ( @_ > 1 ) {
delete($self->{'slice'});
}
return $self->{'slice'};
}
=head2 equals
Arg [1] : Bio::EnsEMBL::Feature object
Example : if ($featureA->equals($featureB)) { ... }
Description : Compares two features using various criteria. The
test for eqality goes through the following list and
terminates at the first true match:
1. If the two features are the same object, they are
equal.
2. If they are of different types (e.g., transcript
and gene), they are *not* equal.
3. If they both have dbIDs: if these are the same,
then they are equal, otherwise not.
4. If they both have slices and analysis objects:
if the analysis dbIDs are the same and the
seq_region_id are the same, along with
seq_region_start and seq_region_end, then they are
equal, otherwise not.
If none of the above is able to determine equality,
undef is returned.
Return type : tri-Boolean (0, 1, undef = "unknown")
Exceptions : Thrown if a non-feature is passed as the argument.
=cut
sub equals {
my ( $self, $feature ) = @_;
# If the features are the same object, they are equal.
if ( !defined($feature) ) { return 0 }
if ( $self eq $feature ) { return 1 }
assert_ref( $feature, 'Bio::EnsEMBL::Feature' );
# If the features have different types, they are *not* equal.
if ( ref($self) ne ref($feature) ) {
return 0;
}
# If the features has the same dbID, they are equal.
if ( defined( $self->dbID() ) && defined( $feature->dbID() ) ) {
if ( $self->dbID() == $feature->dbID() ) { return 1 }
else { return 0 }
}
# We now know that one of the features do not have a dbID.
# If the features have the same start, end, strand and seq_region_id,
# and analysis_id, they are equal.
if (
( defined( $self->analysis() ) && defined( $feature->analysis() ) )
&& ( defined( $self->slice() ) && defined( $feature->slice() ) ) )
{
if ( ( $self->start() == $feature->start() ) &&
( $self->end() == $feature->end() ) &&
( $self->strand() == $feature->strand() ) &&
( $self->slice()->get_seq_region_id() ==
$feature->slice()->get_seq_region_id() ) &&
( $self->analysis()->dbID() == $feature->analysis()->dbID() ) )
{
return 1;
}
else { return 0 }
}
# We now know that one of the features does not have either analysis
# or slice.
# We don't know if the features are equal. This happens if they are
# not the same object but are of the same type, and one of them lacks
# dbID, and if there aren't slice and analysis objects attached to
# them both.
return undef;
} ## end sub equals
=head2 transform
Arg [1] : string $coord_system
The coord system to transform this feature to.
Arg [2] : string $version (optional)
The version of the coord system to transform this feature to.
Arg [3] : Bio::EnsEMBL::Slice (optional)
Specified when a projection may land on many overlapping slices
and disambiguation is required.
Example : $feature = $feature->transform('contig');
next if(!defined($feature));
Description: Returns a copy of this feature, but converted to a different
coordinate system. The converted feature will be placed on a
slice which spans an entire sequence region of the new
coordinate system. If the requested coordinate system is the
same coordinate system it is simply placed on a slice which
spans the entire seq_region (as opposed to the original slice
which may have only partially covered the seq_region).
If a feature spans a boundary in the new coordinate system,
undef is returned instead.
For example, transforming an exon in contig coordinates to one
in chromosomal coodinates will place the exon on a slice of an
entire chromosome.
Returntype : Bio::EnsEMBL::Feature (or undef)
Exceptions : thrown if an invalid coordinate system is provided
warning if Feature is not attached to a slice
Caller : general, transfer()
Status : Stable
=cut
sub transform {
my $self = shift;
my $cs_name = shift;
my $cs_version = shift;
my $to_slice = shift;
#
# For backwards compatibility check if the arguments are old style args
#
if(!$cs_name || ref($cs_name)) {
deprecate('Calling transform without a coord system name is deprecated.');
return $self->_deprecated_transform($cs_name);
}
my $slice = $self->{'slice'};
if(!$slice) {
warning("Feature cannot be transformed without attached slice.");
return undef;
}
if(!$slice->adaptor()) {
warning("Feature cannot be transformed without adaptor on" .
" attached slice.");
return undef;
}
#use db from slice since this feature may not yet be stored in a database
my $db = $slice->adaptor->db();
my $cs = $db->get_CoordSystemAdaptor->fetch_by_name($cs_name, $cs_version);
my $current_cs = $slice->coord_system();
if(!$current_cs) {
warning("Feature cannot be transformed without CoordSystem on " .
"attached slice.");
return undef;
}
if(!$cs) {
throw("Cannot transform to unknown coordinate system " .
"[$cs_name $cs_version]\n");
}
# if feature is already in the requested coordinate system, we can just
# return a copy
if( $cs->equals( $current_cs ) && $slice->start() == 1 &&
$slice->strand() == 1 ) {
my $new_feature;
%$new_feature = %$self;
bless $new_feature, ref $self;
return $new_feature;
}
my $projection;
if(defined($to_slice)){
$projection = $self->project_to_slice( $to_slice ); }
else{
$projection = $self->project( $cs_name, $cs_version );
}
if(@$projection == 0){
return undef;
}
if( @$projection != 1 and !defined($to_slice)) {
# warn "MORE than one projection and NO slice specified ";
# warn "from ".$self->slice->name." to $cs_name, $cs_version\n";
return undef;
}
my $index = 0;
if(defined($to_slice)){
my $found = 0;
my $i = 0;
foreach my $proj (@{$projection}) {
my $slice = $proj->[2];
if($to_slice->get_seq_region_id eq $slice->get_seq_region_id){
$found =1;
$index = $i;
}
$i++;
}
if(!$found){
if(@$projection != 1){
if(@$projection == 0){
warn "number of mappings is ".@$projection."\n";
warn "could not project feature ".ref($self)." from ".$self->slice->seq_region_name." to ".$to_slice->seq_region_name."\n";
warn "In the region of ".$self->slice->start." <-> ".$self->slice->end."\n";
warn "feat start=".($self->slice->start+$self->start)."\tend=".($self->slice->start+$self->end)."\n";
}
else{
foreach my $proj (@{$projection}) {
my $slice = $proj->[2];
warn "available slice ".$slice->seq_region_name."\n";
}
warn "MORE than one projection and no to slice specified (".$to_slice->seq_region_name.")\n";
}
}
else {
foreach my $proj (@{$projection}) {
warn "Mapping is to ".$proj->[2]->seq_region_name."\n";
}
warn "One projection but none to slice specified\n";
}
return undef;
}
}
my $p_slice = $projection->[$index]->[2];
my $slice_adaptor = $db->get_SliceAdaptor;
$slice = $slice_adaptor->fetch_by_region($p_slice->coord_system()->name(),
$p_slice->seq_region_name(),
undef, #start
undef, #end
1, #strand
$p_slice->coord_system()->version);
my $new_feature;
%$new_feature = %$self;
bless $new_feature, ref $self;
$new_feature->{'start'} = $p_slice->start();
$new_feature->{'end'} = $p_slice->end();
$new_feature->{'strand'} =
($self->{'strand'} == 0) ? 0 : $p_slice->strand();
$new_feature->{'slice'} = $slice;
return $new_feature;
}
=head2 transfer
Arg [1] : Bio::EnsEMBL::Slice $slice
The slice to transfer this feature to
Example : $feature = $feature->transfer($slice);
next if(!defined($feature));
Description: Returns a copy of this feature which has been shifted onto
another slice.
If the new slice is in a different coordinate system the
feature is transformed first and then placed on the slice.
If the feature would be split across a coordinate system
boundary or mapped to a gap undef is returned instead.
If the feature cannot be placed on the provided slice because
it maps to an entirely different location, undef is returned
instead.
Returntype : Bio::EnsEMBL::Feature (or undef)
Exceptions : throw on incorrect argument
throw if feature does not have attached slice
Caller : general, transform()
Status : Stable
=cut
sub transfer {
my $self = shift;
my $slice = shift;
if(!$slice || !ref($slice) || (!$slice->isa('Bio::EnsEMBL::Slice') && !$slice->isa('Bio::EnsEMBL::LRGSlice'))) {
throw('Slice argument is required');
}
#make a shallow copy of the feature to be transfered
my $feature;
%{$feature} = %{$self};
bless $feature, ref($self);
weaken $feature->{adaptor};
my $current_slice = $self->{'slice'};
if(!$current_slice) {
warning("Feature cannot be transfered without attached slice.");
return undef;
}
my $cur_cs = $current_slice->coord_system();
my $dest_cs = $slice->coord_system();
#if we are not in the same coord system a transformation step is needed first
if(!$dest_cs->equals($cur_cs)) {
$feature = $feature->transform($dest_cs->name, $dest_cs->version, $slice);
return undef if(!defined($feature));
$current_slice = $feature->{'slice'};
}
# feature went to entirely different seq_region
if($current_slice->seq_region_name() ne $slice->seq_region_name()) {
return undef;
}
#if the current feature positions are not relative to the start of the
#seq region, convert them so they are
my $cur_slice_start = $current_slice->start();
my $cur_slice_strand = $current_slice->strand();
if($cur_slice_start != 1 || $cur_slice_strand != 1) {
my $fstart = $feature->{'start'};
my $fend = $feature->{'end'};
if($cur_slice_strand == 1) {
$feature->{'start'} = $fstart + $cur_slice_start - 1;
$feature->{'end'} = $fend + $cur_slice_start - 1;
} else {
my $cur_slice_end = $current_slice->end();
$feature->{'start'} = $cur_slice_end - $fend + 1;
$feature->{'end'} = $cur_slice_end - $fstart + 1;
$feature->{'strand'} *= -1;
}
}
my $fstart = $feature->{'start'};
my $fend = $feature->{'end'};
#convert to destination slice coords
if($slice->strand == 1) {
$feature->{'start'} = $fstart - $slice->start() + 1;
$feature->{'end'} = $fend - $slice->start() + 1;
} else {
$feature->{'start'} = $slice->end() - $fend + 1;
$feature->{'end'} = $slice->end() - $fstart + 1;
$feature->{'strand'} *= -1;
}
$feature->{'slice'} = $slice;
return $feature;
}
=head2 project_to_slice
Arg [1] : slice to project to
Example :
my $clone_projection = $feature->project_to_slice($slice);
foreach my $seg (@$clone_projection) {
my $clone = $seg->to_Slice();
print "Features current coords ", $seg->from_start, '-',
$seg->from_end, " project onto clone coords " .
$clone->seq_region_name, ':', $clone->start, '-', $clone->end,
$clone->strand, "\n";
}
Description: Returns the results of 'projecting' this feature onto another
slice . This is useful to see where a feature
would lie in a coordinate system in which it
crosses a boundary.
This method returns a reference to a list of
Bio::EnsEMBL::ProjectionSegment objects.
ProjectionSegments are blessed arrays and can also be used as
triplets [from_start,from_end,to_Slice]. The from_start and
from_end are the coordinates relative to the feature start.
For example, if a feature is current 100-200bp on a slice
then the triplets returned might be:
[1,50,$slice1],
[51,101,$slice2]
The to_Slice is a slice spanning the region on the requested
coordinate system that this feature projected to.
If the feature projects entirely into a gap then a reference to
an empty list is returned.
Returntype : listref of Bio::EnsEMBL::ProjectionSegments
which can also be used as [$start,$end,$slice] triplets
Exceptions : slice does not have an adaptor
Caller : general
Status : At Risk
=cut
sub project_to_slice {
my $self = shift;
my $to_slice = shift;
my $slice = $self->{'slice'};
if(!$slice) {
warning("Feature cannot be projected without attached slice.");
return [];
}
#get an adaptor from the attached slice because this feature may not yet
#be stored and may not have its own adaptor
my $slice_adaptor = $slice->adaptor();
if(!$slice_adaptor) {
throw("Cannot project feature because associated slice does not have an " .
" adaptor");
}
my $strand = $self->strand() * $slice->strand();
#fetch by feature always gives back forward strand slice:
$slice = $slice_adaptor->fetch_by_Feature($self);
$slice = $slice->invert if($strand == -1);
return $slice->project_to_slice($to_slice);
}
=head2 project
Arg [1] : string $name
The name of the coordinate system to project this feature onto
Arg [2] : string $version (optional)
The version of the coordinate system (such as 'NCBI34') to
project this feature onto
Example :
my $clone_projection = $feature->project('clone');
foreach my $seg (@$clone_projection) {
my $clone = $seg->to_Slice();
print "Features current coords ", $seg->from_start, '-',
$seg->from_end, " project onto clone coords " .
$clone->seq_region_name, ':', $clone->start, '-', $clone->end,
$clone->strand, "\n";
}
Description: Returns the results of 'projecting' this feature onto another
coordinate system. This is useful to see where a feature
would lie in a coordinate system in which it
crosses a boundary.
This method returns a reference to a list of
Bio::EnsEMBL::ProjectionSegment objects.
ProjectionSegments are blessed arrays and can also be used as
triplets [from_start,from_end,to_Slice]. The from_start and
from_end are the coordinates relative to the feature start.
For example, if a feature is current 100-200bp on a slice
then the triplets returned might be:
[1,50,$slice1],
[51,101,$slice2]
The to_Slice is a slice spanning the region on the requested
coordinate system that this feature projected to.
If the feature projects entirely into a gap then a reference to
an empty list is returned.
Returntype : listref of Bio::EnsEMBL::ProjectionSegments
which can also be used as [$start,$end,$slice] triplets
Exceptions : slice does not have an adaptor
Caller : general
Status : Stable
=cut
sub project {
my $self = shift;
my $cs_name = shift;
my $cs_version = shift;
my $slice = $self->{'slice'};
if(!$slice) {
warning("Feature cannot be projected without attached slice.");
return [];
}
#get an adaptor from the attached slice because this feature may not yet
#be stored and may not have its own adaptor
my $slice_adaptor = $slice->adaptor();
if(!$slice_adaptor) {
throw("Cannot project feature because associated slice does not have an " .
" adaptor");
}
my $strand = $self->strand() * $slice->strand();
#fetch by feature always gives back forward strand slice:
$slice = $slice_adaptor->fetch_by_Feature($self);
$slice = $slice->invert if($strand == -1);
return $slice->project($cs_name, $cs_version);
}
=head2 seqname
Arg [1] : (optional) $seqname
Example : $seqname = $feat->seqname();
Description: Getter/Setter for the name of the sequence that this feature
is on. Normally you can get away with not setting this value
and it will default to the name of the slice on which this
feature is on. It is useful to set this value on features which
do not ordinarily sit on features such as ProteinFeatures which
sit on peptides.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub seqname {
my $self = shift;
if(@_) {
$self->{'seqname'} = shift;
}
if(!$self->{'seqname'} && $self->slice()) {
return $self->slice->name();
}
return $self->{'seqname'};
}
=head2 display_id
Arg [1] : none
Example : print $f->display_id();
Description: This method returns a string that is considered to be
the 'display' identifier. It is overridden by subclasses to
return an appropriate value for objects of that particular
class. If no appropriate display id is available an empty
string is returned instead.
Returntype : string
Exceptions : none
Caller : web drawing code
Status : Stable
=cut
sub display_id {
my $self = shift;
return '';
}
=head2 version
Arg [1] : none
Example : print $f->version();
Description: This method returns a string that is considered to be
the identifier version. It is overridden by subclasses to
return an appropriate value for objects of that particular
class. If no appropriate version is available an empty
string is returned instead.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub version {
my $self = shift;
return '';
}
=head2 feature_Slice
Args : none
Example : $slice = $feature->feature_Slice()
Description: This is a convenience method to return a slice that covers the
Area of this feature. The feature start will be at 1 on it, and
it will have the length of this feature.
Returntype : Bio::EnsEMBL::Slice or undef if this feature has no attached
Slice.
Exceptions : warning if Feature does not have attached slice.
Caller : web drawing code
Status : Stable
=cut
sub feature_Slice {
my $self = shift;
my $slice = $self->slice();
if(!$slice) {
warning('Cannot obtain Feature_Slice for feature without attached slice');
return undef;
}
if($slice->isa("Bio::EnsEMBL::StrainSlice")){
return Bio::EnsEMBL::StrainSlice->new
(-seq_region_name => $slice->seq_region_name,
-seq_region_length => $slice->seq_region_length,
-coord_system => $slice->coord_system,
-start => $self->seq_region_start(),
-end => $self->seq_region_end(),
-strand => $self->seq_region_strand(),
-adaptor => $slice->adaptor(),
-strain_name => $slice->strain_name());
}
else{
return Bio::EnsEMBL::Slice->new
(-seq_region_name => $slice->seq_region_name,
-seq_region_length => $slice->seq_region_length,
-coord_system => $slice->coord_system,
-start => $self->seq_region_start(),
-end => $self->seq_region_end(),
-strand => $self->seq_region_strand(),
-adaptor => $slice->adaptor());
}
}
=head2 seq_region_name
Arg [1] : none
Example : print $feature->seq_region_name();
Description: Gets the name of the seq_region which this feature is on.
Returns undef if this Feature is not on a slice.
Returntype : string or undef
Exceptions : none
Caller : general
Status : Stable
=cut
sub seq_region_name {
my $self = shift;
my $slice = $self->{'slice'};
return ($slice) ? $slice->seq_region_name() : undef;
}
=head2 seq_region_length
Arg [1] : none
Example : print $feature->seq_region_length();
Description: Returns the length of the seq_region which this feature is on
Returns undef if this Feature is not on a slice.
Returntype : int (unsigned) or undef
Exceptions : none
Caller : general
Status : Stable
=cut
sub seq_region_length {
my $self = shift;
my $slice = $self->{'slice'};
return ($slice) ? $slice->seq_region_length() : undef;
}
=head2 seq_region_strand
Arg [1] : none
Example : print $feature->seq_region_strand();
Description: Returns the strand of the seq_region which this feature is on
(i.e. feature_strand * slice_strand)
Returns undef if this Feature is not on a slice.
Returntype : 1,0,-1 or undef
Exceptions : none
Caller : general
Status : Stable
=cut
sub seq_region_strand {
my $self = shift;
my $slice = $self->{'slice'};
return ($slice) ? $slice->strand() * $self->{'strand'} : undef;
}
=head2 seq_region_start
Arg [1] : none
Example : print $feature->seq_region_start();
Description: Convenience method which returns the absolute start of this
feature on the seq_region, as opposed to the relative (slice)
position.
Returns undef if this feature is not on a slice.
Returntype : int or undef
Exceptions : none
Caller : general
Status : Stable
=cut
sub seq_region_start {
my ($self) = @_;
my $slice = $self->slice();
if ( defined($slice) ) {
return $self->_seq_region_boundary_from_db('start')
if $slice->is_circular() and $self->adaptor();
my $start;
if ( $slice->strand() == 1 ) {
$start = $slice->start() + $self->start() - 1
if defined $self->start();
} else {
$start = $slice->end() - $self->end() + 1
if defined $self->end();
}
return $start;
}
return undef;
} ## end sub seq_region_start
=head2 seq_region_end
Arg [1] : none
Example : print $feature->seq_region_end();
Description: Convenience method which returns the absolute end of this
feature on the seq_region, as opposed to the relative (slice)
position.
Returns undef if this feature is not on a slice.
Returntype : int or undef
Exceptions : none
Caller : general
Status : Stable
=cut
sub seq_region_end {
my ($self) = @_;
my $slice = $self->slice();
if ( defined($slice) ) {
return $self->_seq_region_boundary_from_db('end')
if $slice->is_circular() and $self->adaptor();
my $end;
if ( $slice->strand() == 1 ) {
$end = $slice->start() + $self->end() - 1
if defined $self->end();
} else {
$end = $slice->end() - $self->start() + 1
if defined $self->start()
}
return $end;
}
return undef;
} ## end sub seq_region_end
=head2 coord_system_name
Arg [1] : none
Example : print $feature->coord_system_name()
Description: Gets the name of the coord_system which this feature is on.
Returns undef if this Feature is not on a slice.
Returntype : string or undef
Exceptions : none
Caller : general
Status : Stable
=cut
sub coord_system_name {
my $self = shift;
my $slice = $self->{'slice'};
return ($slice) ? $slice->coord_system_name() : undef;
}
=head2 seq
Args : none
Example : my $dna_sequence = $simple_feature->seq();
Description: Returns the dna sequence from the attached slice and
attached database that overlaps with this feature.
Returns undef if there is no slice or no database.
Returns undef if this feature is unstranded (i.e. strand=0).
Returntype : String or undef
Exceptions : warning if this feature is not stranded
Caller : general
Status : Stable
=cut
sub seq {
my $self = shift;
if( ! defined $self->{'slice'} ) {
return undef;
}
if(!$self->strand()) {
warning("Cannot retrieve sequence for unstranded feature.");
return undef;
}
return $self->{'slice'}->subseq($self->start(), $self->end(),
$self->strand());
}
=head2 get_all_alt_locations
Arg [1] : Boolean override flag to force the method to return all
Features on the reference sequence as well.
Example : @features = @{$feature->get_all_alt_locations()};
foreach $f (@features) {
print $f->slice->seq_region_name,' ',$f->start, $f->end,"\n";
}
Description: Retrieves shallow copies of this feature in its alternate
locations. A feature can be considered to have multiple
locations when it sits on a alternative structural haplotype
or when it is on a Pseudo Autosomal Region. Most features will
just return a reference to an empty list though.
The features returned by this method will be on a slice which
covers the entire alternate region.
Currently this method does not take into account alternate
locations on the alternate locations (e.g. a reference
sequence may have multiple alternate haplotypes. Asking
for alternate locations of a feature on one of the alternate
haplotypes will give you back the reference location, but not
locations on the other alternate haplotypes).
Returntype : listref of features of the same type of this feature.
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_all_alt_locations {
my $self = shift;
my $return_all = shift || 0;
my $slice = $self->{'slice'} or return [];
my $sa = $slice->adaptor() or return [];
# get slice of entire region
$slice = $sa->fetch_by_seq_region_id($slice->get_seq_region_id);
my $axfa = $sa->db->get_AssemblyExceptionFeatureAdaptor();
my $axfs = $axfa->fetch_all_by_Slice($slice);
my (@haps, @alt);
foreach my $axf (@$axfs) {
if(uc($axf->type()) eq 'HAP') {
push @haps, $axf;
} elsif(uc($axf->type()) =~ 'PAR') {
push @alt, $axf;
} elsif( $axf->type() eq "PATCH_FIX"){
push @haps, $axf;
} elsif( $axf->type() eq "PATCH_FIX REF"){
push @haps, $axf if $return_all > 0 ;
} elsif( $axf->type() eq "HAP REF" ) {
push @haps, $axf if $return_all > 0 ;
# do nothing when you are on REF
} elsif( $axf->type() eq "PATCH_NOVEL"){
push @haps, $axf;
}elsif( $axf->type() eq "PATCH_NOVEL REF"){
push @haps, $axf if $return_all > 0 ;
} else {
warning("Unknown exception feature type ". $axf->type()."- ignoring.");
}
}
# regions surrounding hap are those of interest, not hap itself
# convert hap alt. exc. features to regions around haps instead
foreach my $h (@haps) {
my $haslice = $h->alternate_slice();
my $hacs = $haslice->coord_system();
if($h->start() > 1 && $haslice->start() > 1) {
my $aslice = $sa->fetch_by_region($hacs->name(),
$haslice->seq_region_name(),
1,
$haslice->start()-1,
$haslice->strand(),
$hacs->version());
push @alt, Bio::EnsEMBL::AssemblyExceptionFeature->new
(-start => 1,
-end => $h->start()-1,
-alternate_slice => $aslice);
}
if($h->end() < $slice->seq_region_length() &&
$haslice->end < $haslice->seq_region_length()) {
my $aslice = $sa->fetch_by_region($hacs->name(),
$haslice->seq_region_name(),
$haslice->end()+1,
$haslice->seq_region_length(),
$haslice->strand(),
$hacs->version());
push @alt, Bio::EnsEMBL::AssemblyExceptionFeature->new
(-start => $h->end() + 1,
-end => $slice->seq_region_length(),
-alternate_slice => $aslice);
}
}
# check if exception regions contain our feature
my @features;
foreach my $axf (@alt) {
# ignore other region if feature is not entirely on it
next if($self->seq_region_start() < $axf->start() ||
$self->seq_region_end() > $axf->end());
# quick shallow copy of the feature
my $f;
%$f = %$self;
bless $f, ref($self);
my $aslice = $axf->alternate_slice();
# position feature on entire slice of other region
# Cache seq_region_* to prevent contamination when changing feature coordinates.
my $seq_region_start = $f->seq_region_start();
my $seq_region_end = $f->seq_region_end();
$f->{'start'} = $seq_region_start - $axf->start() + $aslice->start();
$f->{'end'} = $seq_region_end - $axf->start() + $aslice->start();
$f->{'strand'} *= $aslice->strand();
$f->{'slice'} = $sa->fetch_by_seq_region_id($aslice->get_seq_region_id());
push @features, $f;
}
return \@features;
}
=head2 overlaps
Arg [1] : Bio::EnsEMBL::Feature $f
The other feature you want to check overlap with this feature
for.
Description: This method does a range comparison of this feature's C<seq_region_start> and
C<seq_region_end> and compares it with another feature's C<seq_region_start>
and C<seq_region_end>. It will return true if these ranges overlap
and the features are on the same seq_region.
For local coordinate overlaps tests (those values returned from
start and end) use C<overlaps_local()>.
Returntype : TRUE if features overlap, FALSE if they don't
Exceptions : warning if features are on different seq_regions
Caller : general
Status : Stable
=cut
sub overlaps {
my ($self, $f) = @_;
my ($sr1, $sr2) = ($self->seq_region_name, $f->seq_region_name);
if($sr1 && $sr2 && ($sr1 ne $sr2)) {
warning("Bio::EnsEMBL::Feature->overlaps(): features are on different seq regions. \$self is on $sr1 and \$feature is on $sr2");
return 0;
}
return ($self->seq_region_end >= $f->seq_region_start and $self->seq_region_start <= $f->seq_region_end) ? 1 : 0;
}
=head2 overlaps_local
Arg [1] : Bio::EnsEMBL::Feature $f
The other feature you want to check overlap with this feature
for.
Description: This method does a range comparison of this feature's start and
end and compares it with another feature's start and end. It
will return true if these ranges overlap and the features are
on the same seq_region.
This method will not attempt to resolve starts and ends with
reference to the feature's backing Slice.
For global coordinate overlaps tests (with reference to the feature's
backing sequence region) use C<overlaps()>.
Returntype : TRUE if features overlap, FALSE if they don't
Exceptions : warning if features are on different seq_regions
Caller : general
Status : Stable
=cut
sub overlaps_local {
my ($self, $f) = @_;
my ($sr1, $sr2) = ($self->seq_region_name, $f->seq_region_name);
if($sr1 && $sr2 && ($sr1 ne $sr2)) {
warning("Bio::EnsEMBL::Feature->overlaps_local(): features are on different seq regions. \$self is on $sr1 and \$feature is on $sr2");
return;
}
return ($self->end >= $f->start and $self->start <= $f->end) ? 1 : 0;
}
=head2 get_overlapping_Genes
Arg [1] : Optional Boolean: Stranded match i.e. match strand of Feature and Genes
Arg [2] : Optional Boolean: Get Genes with an overlapping 5' end
Arg [3] : Optional Boolean: Get Genes with an overlapping 3' end
Description: Get all the genes that overlap this feature.
Returntype : list ref of Bio::EnsEMBL::Gene
Caller : general
Status : UnStable
=cut
sub get_overlapping_Genes{
my ($self, $match_strands, $five_prime, $three_prime) = @_;
my $ga = Bio::EnsEMBL::Registry->get_adaptor($self->adaptor->db->species,'core','Gene');
my $list = $ga->fetch_all_nearest_by_Feature(-FEATURE => $self, -RANGE => 0, -THREE_PRIME => $three_prime, -FIVE_PRIME => $five_prime, -MATCH_STRAND => $match_strands);
return [ map { $_->[0] } @$list ];
}
# query for absolute nearest.
=head2 get_nearest_Gene
Description: Get the nearest genes to the feature
Returntype : Bio::EnsEMBL::Gene
Caller : general
Status : At risk
=cut
sub get_nearest_Gene {
my $self = shift;
my $ga = Bio::EnsEMBL::Registry->get_adaptor($self->adaptor->db->species,'core','Gene');
my ($gene, $distance) = @{ $ga->fetch_nearest_by_Feature($self) };
return $gene;
}
=head2 summary_as_hash
Example : $feature_summary = $feature->summary_as_hash();
Description : Retrieves a textual summary of this Feature.
Should be overidden by subclasses for specific tweaking
Returns : hashref of arrays of descriptive strings
Status : Intended for internal use
=cut
sub summary_as_hash {
my $self = shift;
my %summary;
$summary{'id'} = $self->display_id;
$summary{'version'} = $self->version() if $self->version();
$summary{'start'} = $self->seq_region_start;
$summary{'end'} = $self->seq_region_end;
$summary{'strand'} = $self->strand;
$summary{'seq_region_name'} = $self->seq_region_name;
$summary{'assembly_name'} = $self->slice->coord_system->version() if $self->slice();
return \%summary;
}
=head2 species
Example : $feature->species();
Description : Shortcut to the feature's DBAdaptor and returns its species name
Returntype : String the species name
Exceptions : Thrown if there is no attached adaptor
Caller : Webcode
=cut
sub species {
my ($self) = @_;
throw "Can only call this method if you have attached an adaptor" if ! $self->adaptor();
return $self->adaptor()->db()->species();
}
=head2 contig
Deprecated - Included for backwards compatibility only.
Use slice() instead
=cut
sub contig {
deprecate('Use slice() instead');
slice(@_);
}
=head2 sub_SeqFeature
Deprecated - For genebuild backwards compatibility.
Avoid using it if possible
=cut
sub sub_SeqFeature{
my ($self) = @_;
return @{$self->{'_gsf_sub_array'}} if($self->{'_gsf_sub_array'});
}
=head2 add_sub_SeqFeature
Deprecated - only for genebuild backward compatibility.
Avoid using it if possible
=cut
sub add_sub_SeqFeature{
my ($self,$feat,$expand) = @_;
my ($p, $f, $l) = caller;
if( $expand eq 'EXPAND' ) {
# if this doesn't have start/end set - forget it!
if( ! $self->start && ! $self->end ) {
$self->start($feat->start());
$self->end($feat->end());
$self->strand($feat->strand);
} else {
if( $feat->start < $self->start ) {
$self->start($feat->start);
}
if( $feat->end > $self->end ) {
$self->end($feat->end);
}
}
} else {
if($self->start > $feat->start || $self->end < $feat->end) {
throw("$feat is not contained within parent feature, " .
"and expansion is not valid");
}
}
push(@{$self->{'_gsf_sub_array'}},$feat);
}
=head2 flush_sub_SeqFeature
Deprecated - Only for genebuild backwards compatibility.
Avoid using it if possible
=cut
sub flush_sub_SeqFeature {
my ($self) = @_;
$self->{'_gsf_sub_array'} = [];
}
sub _deprecated_transform {
my $self = shift;
my $arg = shift;
if(!$arg) {
warning("Calling transform() with no arguments is deprecated.\n".
"A coordinate system name argument should be used instead.\n".
"You probably wanted transform('seqlevel') or transform('contig').");
return $self->transform('seqlevel');
}
if(ref($arg) eq 'Bio::EnsEMBL::Slice') {
if($arg->{'empty'}) {
warning("Calling transform with an empty slice is deprecated.\n" .
"A coordinate system name argument should be used instead.\n".
"You probably wanted transform('chromosome') or " .
"transform('toplevel')");
return $self->transform('toplevel');
}
warning("Calling transform with a slice is deprecated.\n" .
"Use the transfer method instead");
return $self->transfer($arg);
}
warning("Calling transform with a [".ref($arg)."] arg is no longer " .
"(or never was) supported. Doing nothing instead.");
return $self;
}
=head2 id
Deprecated - only included for backwards compatibility.
Use display_id, hseqname, dbID or stable_id instead
=cut
sub id {
my $self = shift;
deprecate("id method is not used - use display_id instead");
return $self->{'stable_id'} if($self->{'stable_id'});
return $self->{'hseqname'} if($self->{'hseqname'});
return $self->{'seqname'} if($self->{'seqname'});
return $self->{'dbID'};
}
my $feature_tables =
{
'Bio::EnsEMBL::AssemblyExceptionFeature' => 'assembly_exception',
'Bio::EnsEMBL::DensityFeature' => 'density_feature',
'Bio::EnsEMBL::Exon' => 'exon',
'Bio::EnsEMBL::PredictionExon' => 'prediction_exon',
'Bio::EnsEMBL::Gene' => 'gene',
'Bio::EnsEMBL::IntronSupportingEvidence' => 'intron_supporting_evidence',
'Bio::EnsEMBL::KaryotypeBand' => 'karyotype',
'Bio::EnsEMBL::Map::DitagFeature' => 'ditag_feature',
'Bio::EnsEMBL::Map::MarkerFeature' => 'marker_feature',
'Bio::EnsEMBL::MiscFeature' => 'misc_feature',
'Bio::EnsEMBL::Operon' => 'operon',
'Bio::EnsEMBL::OperonTranscript' => 'operon_transcript',
'Bio::EnsEMBL::RepeatFeature' => 'repeat_feature',
'Bio::EnsEMBL::SimpleFeature' => 'simple_feature',
'Bio::EnsEMBL::Transcript' => 'transcript',
'Bio::EnsEMBL::PredictionTranscript' => 'prediction_transcript'
};
#
# get seq region boundary (start|end) for a feature
# the method attempts to retrieve the boundary directly from the db
# if feature is not of class in the feature_table hash, it means the
# feature it's not stored in the db or we don't know how to get the
# region boundary from the db.
# Return undef in these cases.
#
sub _seq_region_boundary_from_db {
my ($self, $boundary) = @_;
throw "Undefined boundary"
unless defined $boundary;
$boundary eq 'start' or $boundary eq 'end'
or throw "Wrong boundary: select start|end";
$boundary = 'seq_region_' . $boundary;
my $sql_helper =
$self->adaptor->dbc->sql_helper;
throw "Unable to get SqlHelper instance"
unless defined $sql_helper;
my $feature_table =
$feature_tables->{ref $self};
return undef unless defined $feature_table;
my $db_id = $self->dbID;
my $attrib_id = $feature_table . '_id';
my $query = "SELECT ${boundary} from ${feature_table} WHERE ${attrib_id} = ${db_id}";
return $sql_helper->execute_single_result(-SQL => $query);
}
1;
| at7/ensembl | modules/Bio/EnsEMBL/Feature.pm | Perl | apache-2.0 | 51,015 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
use warnings;
use strict;
use Getopt::Long qw(:config no_ignore_case);
use Bio::EnsEMBL::Pipeline::DBSQL::DBAdaptor;
my $host;
my $port=3306;
my $user;
my $pass;
my $dbname;
my $input_id_type;
my $analysis_id;
GetOptions(
'host|dbhost|h=s' => \$host,
'port|dbport|P=s' => \$port,
'user|dbuser|u=s' => \$user,
'pass|dbpass|p=s' => \$pass,
'dbname|db|D=s' => \$dbname,
'input_id_type=s' => \$input_id_type,
'analysis_id=s' => \$analysis_id,
);
$|=1;
my $db = new Bio::EnsEMBL::Pipeline::DBSQL::DBAdaptor(-dbname => $dbname,
-user => $user,
-pass => $pass,
-port => $port,
-host => $host);
my $q= 'select seq_region.name, coord_system.version, gene.seq_region_start, gene.seq_region_end, gene.seq_region_strand from gene, seq_region, coord_system where seq_region.seq_region_id = gene.seq_region_id and seq_region.coord_system_id = coord_system.coord_system_id order by gene.seq_region_id, gene.seq_region_start, gene.seq_region_end, gene.seq_region_strand';
my $sth = $db->prepare($q) || $db->throw("can't prepare: $q");
my $slice_start = 100000000000000000000000000000000;
my $slice_end = 0;
my $slice_chr = 0;
my $slice_strand = 0;
#while(<>){
# chomp;
# my $res = $sth->execute($_) || $db->throw("can't execute: $q");
my $res = $sth->execute() || $db->throw("can't execute: $q");
while( my ($chr, $version, $start, $end, $strand) = $sth->fetchrow_array) {
next if ($chr eq 'MT');
if ($slice_chr == 0){
$slice_chr = $chr;
$slice_start = $start;
$slice_end = $end;
$slice_strand = $strand;
}
if ($slice_chr eq $chr && $start < $slice_end && $end >$slice_start && $slice_strand == $strand){
if ($start < $slice_start){ $slice_start = $start;}
if ($end > $slice_end){ $slice_end = $end;}
}else{
print "insert into input_id_analysis values ('toplevel:".$version.":$chr:$start:$end:1','".$input_id_type."',".$analysis_id.", now(), '', '', '0');\n";
$slice_chr = $chr;
$slice_start = $start;
$slice_end = $end;
$slice_strand = $strand;
}
}
#}
| james-monkeyshines/ensembl-analysis | scripts/make_input_id_4_seleno.pl | Perl | apache-2.0 | 3,185 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::powershell::exchange::owamailbox;
use strict;
use warnings;
use centreon::plugins::misc;
use centreon::common::powershell::exchange::powershell;
sub get_powershell {
my (%options) = @_;
my $no_trust_ssl = (defined($options{no_trust_ssl})) ? '' : '-TrustAnySSLCertificate';
my $ps = centreon::common::powershell::exchange::powershell::powershell_init(%options);
$ps .= '
try {
$ErrorActionPreference = "Stop"
$username = "' . $options{mailbox} . '"
$password = "' . $options{password} . '"
$secstr = New-Object -TypeName System.Security.SecureString
$password.ToCharArray() | ForEach-Object {$secstr.AppendChar($_)}
$cred = new-object -typename System.Management.Automation.PSCredential -argumentlist $username,$secstr
$results = Test-OwaConnectivity -WarningAction:SilentlyContinue -Url:' . $options{url} . ' -MailboxCredential:$cred ' . $no_trust_ssl . '
} catch {
Write-Host $Error[0].Exception
exit 1
}
Foreach ($result in $results) {
Write-Host "[url=" $result.Url "][scenario=" $result.Scenario "][result=" $result.Result "][latency=" $result.Latency.TotalMilliseconds "][[error=" $Result.Error "]]"
}
exit 0
';
return $ps;
}
sub check {
my ($self, %options) = @_;
# Following output:
#[url= http://xxxx/ ][scenario= Options ][result= Ignored ][latency= ][[error=...]]
$self->{output}->output_add(
severity => 'OK',
short_msg => "OWA to '" . $options{mailbox} . "' is ok."
);
my $checked = 0;
$self->{output}->output_add(long_msg => $options{stdout});
while ($options{stdout} =~ /\[url=(.*?)\]\[scenario=(.*?)\]\[result=(.*?)\]\[latency=(.*?)\]\[\[error=(.*?)\]\]/msg) {
$self->{data} = {};
($self->{data}->{url}, $self->{data}->{scenario}, $self->{data}->{result}, $self->{data}->{latency}, $self->{data}->{error}) =
($self->{output}->decode($1), $self->{output}->decode($2), centreon::plugins::misc::trim($3),
centreon::plugins::misc::trim($4), centreon::plugins::misc::trim($5));
$checked++;
my $status = 'ok';
if (defined($self->{option_results}->{critical}) && $self->{option_results}->{critical} ne '' &&
$self->{output}->test_eval(test => $self->{option_results}->{critical}, values => $self->{data})) {
$status = 'critical';
} elsif (defined($self->{option_results}->{warning}) && $self->{option_results}->{warning} ne '' &&
$self->{output}->test_eval(test => $self->{option_results}->{warning}, values => $self->{data})) {
$status = 'warning';
}
if (!$self->{output}->is_status(value => $status, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(
severity => $status,
short_msg => sprintf(
"OWA scenario '%s' to '%s' is '%s' [url: %s]",
$self->{data}->{scenario}, $options{mailbox}, $self->{data}->{result}, $self->{data}->{url}
)
);
}
if ($self->{data}->{latency} =~ /^(\d+)/) {
$self->{output}->perfdata_add(
label => $self->{data}->{url} . '_' . $self->{data}->{scenario}, unit => 's',
value => sprintf("%.3f", $1 / 1000),
min => 0
);
}
}
if ($checked == 0) {
$self->{output}->output_add(
severity => 'UNKNOWN',
short_msg => 'Cannot find informations'
);
}
}
1;
__END__
=head1 DESCRIPTION
Method to check Exchange owa connection on a specific mailbox.
=cut
| centreon/centreon-plugins | centreon/common/powershell/exchange/owamailbox.pm | Perl | apache-2.0 | 4,423 |
## OpenXPKI::Crypto::Tool::LibSCEP::Command::create_nextca_reply
## Written 2015-2018 by Gideon Knocke and Martin Bartosch for the OpenXPKI project
## (C) Copyright 2015-2018 by The OpenXPKI Project
package OpenXPKI::Crypto::Tool::LibSCEP::Command::create_nextca_reply;
use strict;
use warnings;
use English;
use Class::Std;
use OpenXPKI::FileUtils;
use OpenXPKI::Debug;
use Crypt::LibSCEP;
use MIME::Base64;
my %chain_of :ATTR;
my %engine_of :ATTR;
my %hash_alg_of :ATTR;
my %enc_alg_of :ATTR;
my %fu_of :ATTR;
sub START {
my ($self, $ident, $arg_ref) = @_;
$fu_of{$ident} = OpenXPKI::FileUtils->new();
$engine_of{$ident} = $arg_ref->{ENGINE};
$chain_of {$ident} = $arg_ref->{CHAIN};
$hash_alg_of {$ident} = $arg_ref->{HASH_ALG};
$enc_alg_of {$ident} = $arg_ref->{ENCRYPTION_ALG};
}
sub get_result
{
my $self = shift;
my $ident = ident $self;
if (! defined $engine_of{$ident}) {
OpenXPKI::Exception->throw(
message => 'I18N_OPENXPKI_CRYPTO_TOOL_LIBSCEP_COMMAND_CREATE_NEXTCA_REPLY_NO_ENGINE',
);
}
##! 64: 'engine: ' . Dumper($engine_of{$ident})
my $keyfile = $engine_of{$ident}->get_keyfile();
if (! defined $keyfile || $keyfile eq '') {
OpenXPKI::Exception->throw(
message => 'I18N_OPENXPKI_CRYPTO_TOOL_LIBSCEP_COMMAND_CREATE_NEXTCA_REPLY_KEYFILE_MISSING',
);
}
my $certfile = $engine_of{$ident}->get_certfile();
if (! defined $certfile || $certfile eq '') {
OpenXPKI::Exception->throw(
message => 'I18N_OPENXPKI_CRYPTO_TOOL_LIBSCEP_COMMAND_CREATE_NEXTCA_REPLY_CERTFILE_MISSING',
);
}
my $cert = $fu_of{$ident}->read_file($certfile);
my $key = $fu_of{$ident}->read_file($keyfile);
my $sigalg = $hash_alg_of{$ident};
my $encalg = $enc_alg_of{$ident};
my $pwd = $engine_of{$ident}->get_passwd();
my $chain = $chain_of{$ident};
my $nextca_reply;
eval {
$nextca_reply = Crypt::LibSCEP::create_nextca_reply({passin=>"pass", passwd=>$pwd, sigalg=>$sigalg, encalg=>$encalg}, $chain, $cert, $key);
};
if ($EVAL_ERROR) {
OpenXPKI::Exception->throw(
message => $EVAL_ERROR,
);
}
$nextca_reply =~ s/\n?\z/\n/;
$nextca_reply =~ s/^(?:.*\n){1,1}//;
$nextca_reply =~ s/(?:.*\n){1,1}\z//;
return decode_base64($nextca_reply);
}
sub cleanup {
my $self = shift;
my $ident = ident $self;
$fu_of{$ident}->cleanup();
}
1;
__END__
| openxpki/openxpki | core/server/OpenXPKI/Crypto/Tool/LibSCEP/Command/create_nextca_reply.pm | Perl | apache-2.0 | 2,502 |
#
# (c) Jan Gehring <jan.gehring@gmail.com>
#
# vim: set ts=2 sw=2 tw=0:
# vim: set expandtab:
package Rex::Repositorio::Repository_Factory;
use common::sense;
use Params::Validate qw(:all);
use English;
use Carp;
our $VERSION = '0.4.1'; # VERSION
sub create {
my $class = shift;
my %option = validate(
@_,
{
type => {
type => SCALAR
},
options => {
type => HASHREF
}
}
);
my $type = $option{type};
my $repo_mod = "Rex::Repositorio::Repository::$type";
eval "use $repo_mod;";
if ($EVAL_ERROR) {
confess "Error loading repository type: $type. ($EVAL_ERROR)";
}
return $repo_mod->new( %{ $option{options} } );
}
1;
| gitpan/Rex-Repositorio | lib/Rex/Repositorio/Repository_Factory.pm | Perl | apache-2.0 | 699 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::sun::mseries::mode::domains;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my %error_status = (
1 => ["The domain '%s' status is normal", 'OK'],
2 => ["The domain '%s' status is degraded", 'WARNING'],
3 => ["The domain '%s' status is faulted", 'CRITICAL'],
254 => ["The domain '%s' status has changed", 'WARNING'],
255 => ["The domain '%s' status is unknown", 'UNKNOWN'],
);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"skip" => { name => 'skip' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub run {
my ($self, %options) = @_;
# $options{snmp} = snmp object
$self->{snmp} = $options{snmp};
my $oid_scfDomainErrorStatus = '.1.3.6.1.4.1.211.1.15.3.1.1.5.2.1.15';
my $oids_domain_status = $self->{snmp}->get_table(oid => $oid_scfDomainErrorStatus, nothing_quit => 1);
$self->{output}->output_add(severity => 'OK',
short_msg => "All domains are ok.");
foreach ($self->{snmp}->oid_lex_sort(keys %$oids_domain_status)) {
/^${oid_scfDomainErrorStatus}\.(.*)/;
my $domain_id = $1;
$self->{output}->output_add(long_msg => sprintf(${$error_status{$oids_domain_status->{$_}}}[0], $domain_id));
if ($oids_domain_status->{$_} == 255 && defined($self->{option_results}->{skip})) {
next;
}
if ($oids_domain_status->{$_} != 1) {
$self->{output}->output_add(severity => ${$error_status{$oids_domain_status->{$_}}}[1],
short_msg => sprintf(${$error_status{$oids_domain_status->{$_}}}[0], $domain_id));
}
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Mseries domains status.
=over 8
=item B<--skip>
Skip 'unknown' domains.
=back
=cut
| s-duret/centreon-plugins | hardware/server/sun/mseries/mode/domains.pm | Perl | apache-2.0 | 2,950 |
package Paws::AppStream::DisassociateFleet;
use Moose;
has FleetName => (is => 'ro', isa => 'Str', required => 1);
has StackName => (is => 'ro', isa => 'Str', required => 1);
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DisassociateFleet');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::AppStream::DisassociateFleetResult');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::AppStream::DisassociateFleet - Arguments for method DisassociateFleet on Paws::AppStream
=head1 DESCRIPTION
This class represents the parameters used for calling the method DisassociateFleet on the
Amazon AppStream service. Use the attributes of this class
as arguments to method DisassociateFleet.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DisassociateFleet.
As an example:
$service_obj->DisassociateFleet(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 B<REQUIRED> FleetName => Str
The name of the fleet.
=head2 B<REQUIRED> StackName => Str
The name of the stack.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DisassociateFleet in L<Paws::AppStream>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/AppStream/DisassociateFleet.pm | Perl | apache-2.0 | 1,744 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Variation::Utils::dbSNP
=head1 SYNOPSIS
use Bio::EnsEMBL::Variation::Utils::dbSNP qw(decode_bitfield);
my $hashref = decode_bitfield('050160000a01050512110101');
print "variant is precious\n" if $hashref->{precious};
=head1 DESCRIPTION
This module provides a subroutine decode_bitfield which decodes
a dbSNP bitfield from their VCF files into a hash reference with values
for each value specified in the field.
The encoding is taken from the following NCBI document:
ftp://ftp.ncbi.nlm.nih.gov/snp/specs/dbSNP_BitField_latest.pdf
An additional subroutine converts allele strings from dbSNP format
to ensembl format, as used multiple times in the import pipeline.
=cut
package Bio::EnsEMBL::Variation::Utils::dbSNP;
use strict;
use warnings;
use base qw(Exporter);
our @EXPORT_OK = qw(decode_bitfield get_alleles_from_pattern);
use constant ENCODING_VERSION => 5;
# an example string, with the fields and offsets
# F0 F1 F2 F3 F4 F5 F6 F7 F8 F9
# 05 0160 000a 01 05 05 12 11 01 01
# 0 2 4 6 8 10 12 14 16 18 20 22
# offsets into the string for each field
my %offsets = (
F0 => 0,
F1_1 => 2,
F1_2 => 4,
F2_1 => 6,
F2_2 => 8,
F3 => 10,
F4 => 12,
F5 => 14,
F6 => 16,
F7 => 18,
F8 => 20,
F9 => 22,
);
# a hash mapping the values encoded in each field to the bits used encode them
# if multiple bits are used (e.g. for version) then the values should be a
# listref of all the bits, this will be used to construct a bit mask to pick
# out the necessary information
my %fields= (
F0 => {
version => [3,2,1],
},
F1_1 => {
trace_archive => 8,
assembly_archive => 7,
entrez_geo => 6,
probe_db => 5,
entrez_gene => 4,
entrez_sts => 3,
has_structure => 2,
submitter_link_out => 1,
},
F1_2 => {
clinical => 7,
precious => 6,
provisional_tpa => 5,
pubmed => 4,
sra => 3,
organism_db_link => 2,
mgc_clone => 1,
},
F2_1 => {
utr_3 => 8,
utr_5 => 7,
acceptor_ss => 6,
donor_ss => 5,
intron => 4,
region_3 => 3,
region_5 => 2,
in_gene => 1,
},
F2_2 => {
stop_loss => 6,
frameshift => 5,
missense => 4,
stop_gain => 3,
has_ref => 2,
has_syn => 1,
},
F3 => {
has_other_snp => 5,
has_assembly_conflict => 4,
is_assembly_specific => 3,
weight => [1,2],
},
F4 => {
is_mutation => 4,
is_validated => 3,
maf_all_pops => 2,
maf_some_pops => 1,
},
F5 => {
marker_high_density => 3,
in_haplotype_tagging_set => 2,
genotypes_available => 1,
},
F6 => {
tgp_2010_production => 7,
tgp_validated => 6,
tgp_2010_pilot => 5,
tgp_2009_pilot => 4,
hm_phase_3_genotyped => 3,
hm_phase_2_genotyped => 2,
hm_phase_1_genotyped => 1,
},
F7 => {
has_mesh => 8,
clinical_assay => 7,
has_tf => 6,
lsdb => 5,
dbgap_significant => 4,
dbgap_lod_score => 3,
third_party_annot => 2,
omim => 1,
},
F8 => {
var_class => [4,3,2,1],
},
F9 => {
is_suspect => 7,
is_somatic => 6,
contig_allele_not_present => 5,
withdrawn => 4,
cluster_no_overlap => 3,
strain_specific => 2,
genotype_conflict => 1,
},
);
# a lookup table for the variation class
my %var_class = (
0b0001 => 'snp',
0b0010 => 'dips',
0b0011 => 'heterozygous',
0b0100 => 'microsatellite',
0b0101 => 'named',
0b0110 => 'no_variation',
0b0111 => 'mixed',
0b1000 => 'multi_base',
);
=head2 decode_bitfield
Arg[1] : string $bitfield
Example : my $hashref = decode_bitfield('050160000a01050512110101');
Description : Decodes a dbSNP bitfield string which encodes various attributes of a variation
Returntype : A hash reference with a key for each attribute set in the field, if the field
is boolean (e.g. precious, suspect etc.) then the value should be treated as a
true or false value, otherwise (e.g. var_class, weight) the value is the actual
value of the attribute
=cut
sub decode_bitfield {
my $bitfield = shift;
my %res;
for my $field (keys %fields) {
for my $value (keys %{ $fields{$field} }) {
my $bits = $fields{$field}->{$value};
# if bits isn't an array, put the single bit into an array
$bits = [$bits] unless ref $bits eq 'ARRAY';
# OR together all the bits to give us our mask
my $mask;
for my $bit (@$bits) {
$mask |= 2**($bit-1);
}
# extract the relevant characters from the bitfield string,
# convert them to an integer, and apply our mask
$res{$value} = hex(substr($bitfield, $offsets{$field}, 2)) & $mask;
# check that the version matches what we expect
if ($value eq 'version' && $res{$value} != ENCODING_VERSION) {
warn "Version field does not match the expected version (".$res{$value}." vs ".ENCODING_VERSION.")";
return undef;
}
# lookup the class description
$res{$value} = $var_class{$res{$value}} if $value eq 'var_class';
# get rid of anything set to 0
delete $res{$value} unless $res{$value};
}
}
return \%res;
}
=head2 get_alleles_from_pattern
Arg[1] : string
Example : my $arrayref = get_alleles_from_pattern('(AGAC)25/26/27');
Description : splits allele string in dbSNP ObservedVariation pattern to return seperate alleles
Returntype : An array reference containing all alleles ['(AGAC)25', '(AGAC)26', '(AGAC)27']
=cut
sub get_alleles_from_pattern{
my $pattern = shift;
my @sep_alleles;
if($pattern =~ /^(\(.*\))\d+\/\d+/){
## tandem stored (AGAC)25/26/27/28/29/30, (TTA)1/6/7/ or mixed (TG)20/21/A/G
my $string = $1;
my @al = split/\//, $pattern;
foreach my $al(@al){
if($al eq "1" || $al eq $string . 1){
#(TTA)1 or /1/ => TTA for allele check later
$al = $string;
$al=~ s/\(|\)//g;
}
elsif( $al !~/\D+/){
# /4/ => (TTA)4
$al = $string . $al;
}
# else assume (TG)20 or A and leave
push @sep_alleles, $al;
}
}
else{
## assume A/T format
@sep_alleles = split/\//, $pattern;
}
return \@sep_alleles;
}
1;
| willmclaren/ensembl-variation | modules/Bio/EnsEMBL/Variation/Utils/dbSNP.pm | Perl | apache-2.0 | 8,230 |
#!%PERL%
# Copyright (c) vhffs project and its contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#3. Neither the name of vhffs nor the names of its contributors
# may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
#"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
#FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
#COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
#INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
#BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
#LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
#CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
use strict;
use utf8;
use Cwd;
use File::Path;
use File::Basename;
use Vhffs::Constants;
use Vhffs::Functions;
use Vhffs::Robots;
use Vhffs::Services::Bazaar;
package Vhffs::Robots::Bazaar;
sub create {
my $bazaar = shift;
return undef unless defined $bazaar and $bazaar->get_status == Vhffs::Constants::WAITING_FOR_CREATION;
my $vhffs = $bazaar->get_vhffs;
my $dir = $bazaar->get_dir;
if( -e $dir ) {
$bazaar->set_status( Vhffs::Constants::CREATION_ERROR );
$bazaar->commit;
Vhffs::Robots::vhffs_log( $vhffs, 'An error occured while creating bazaar repository '.$bazaar->get_reponame.' to the filesystem' );
return undef;
}
File::Path::make_path( $dir, { error => \my $errors });
if(@$errors) {
$bazaar->set_status( Vhffs::Constants::CREATION_ERROR );
$bazaar->commit;
Vhffs::Robots::vhffs_log( $vhffs, 'An error occured while creating bazaar repository '.$bazaar->get_reponame.' to the filesystem: '.join(', ', @$errors) );
return undef;
}
my $oldcwd = Cwd::getcwd();
if( chdir($dir) ) {
my $childpid = open( my $output, '-|', 'bzr', 'init' );
if($childpid) {
# read process output then discard
while(<$output>) {}
# wait for the child to finish
waitpid( $childpid, 0 );
# we don't care whether bzr succedded, we are going to check that ourself
}
}
chdir($oldcwd);
unless( -d $dir.'/.bzr' ) {
$bazaar->set_status( Vhffs::Constants::CREATION_ERROR );
$bazaar->commit;
Vhffs::Robots::vhffs_log( $vhffs, 'An error occured while creating bazaar repository '.$bazaar->get_reponame.' to the filesystem' );
return undef;
}
Vhffs::Robots::chmod_recur( $dir, 0664, 02775 );
Vhffs::Robots::chown_recur( $dir, $bazaar->get_owner_uid, $bazaar->get_owner_gid );
Vhffs::Robots::vhffs_log( $vhffs, 'Created bazaar repository '.$bazaar->get_reponame );
return undef unless modify( $bazaar );
$bazaar->send_created_mail;
return 1;
}
sub delete {
my $bazaar = shift;
return undef unless defined $bazaar and $bazaar->get_status == Vhffs::Constants::WAITING_FOR_DELETION;
my $vhffs = $bazaar->get_vhffs;
my $dir = $bazaar->get_dir;
Vhffs::Robots::archive_targz( $bazaar, $dir );
File::Path::remove_tree( $dir, { error => \my $errors });
my $groupdir = File::Basename::dirname($dir);
rmdir($groupdir);
if(@$errors) {
$bazaar->set_status( Vhffs::Constants::DELETION_ERROR );
$bazaar->commit;
Vhffs::Robots::vhffs_log( $vhffs, 'An error occured while removing bazaar repository '.$bazaar->get_reponame.' from the filesystem: '.join(', ', @$errors) );
return undef;
}
if( $bazaar->delete ) {
Vhffs::Robots::vhffs_log( $vhffs, 'Deleted bazaar repository '.$bazaar->get_reponame );
} else {
$bazaar->set_status( Vhffs::Constants::DELETION_ERROR );
$bazaar->commit;
Vhffs::Robots::vhffs_log( $vhffs, 'An error occured while deleting bazaar repository '.$bazaar->get_reponame.' object' );
return undef;
}
return 1;
}
sub modify {
my $bazaar = shift;
return undef unless defined $bazaar and ( $bazaar->get_status == Vhffs::Constants::WAITING_FOR_MODIFICATION or $bazaar->get_status == Vhffs::Constants::WAITING_FOR_CREATION );
my $vhffs = $bazaar->get_vhffs;
my $dir = $bazaar->get_dir;
my $mail_from = $bazaar->get_config->{notify_from};
if( $bazaar->get_ml_name !~ /^\s*$/ ) {
# TODO: Bazaar mail on commit
}
if( $bazaar->is_public ) {
chmod 02775, $bazaar->get_dir;
Vhffs::Robots::vhffs_log( $vhffs, 'Bazaar repository '.$bazaar->get_reponame.' is now public' );
$bazaar->add_history( 'Is now public');
} else {
chmod 02770, $bazaar->get_dir;
Vhffs::Robots::vhffs_log( $vhffs, 'Bazaar repository '.$bazaar->get_reponame.' is now private' );
$bazaar->add_history( 'Is now private');
}
$bazaar->set_status( Vhffs::Constants::ACTIVATED );
$bazaar->commit;
return 1;
}
1;
| najamelan/vhffs-4.5 | vhffs-api/src/Vhffs/Robots/Bazaar.pm | Perl | bsd-3-clause | 5,343 |
##
## Put me in ~/.irssi/scripts, and then execute the following in irssi:
##
## /load perl
## /script load xaway
##
use strict;
use warnings;
use Irssi;
use vars qw($VERSION %IRSSI);
use IO::Handle;
use X11::Protocol;
$VERSION = "0.01";
%IRSSI = (
authors => 'Vladimír Štill',
contact => 'xstill@fi.muni.cz',
name => 'xaway',
description => 'TODO',
license => 'BSD',
url => 'TODO',
);
my $x = X11::Protocol->new();
my ( $root, $parent, @kids ) = $x->QueryTree( $x->root );
my $irssi;
for my $win ( @kids ) {
my ( $name ) = $x->GetProperty( $win,
$x->atom( "WM_NAME" ),
$x->atom( "STRING" ), 0, ~0, 0 );
if ( $name eq "irssi" ) {
$irssi = $win;
last;
}
}
####################
my $lastwin = 2;
my $inFocus = 1;
sub check_x {
my %atrs = $x->GetWindowAttributes( $irssi );
return unless exists $atrs{ map_state };
if ( $atrs{ map_state } eq "Viewable" ) {
if ( $inFocus == 0 ) {
Irssi::command( "window goto $lastwin" );
$inFocus = 1;
}
} elsif ( $atrs{ map_state } eq "Unmapped" ) {
if ( $inFocus == 1 ) {
my $win = Irssi::active_win();
$lastwin = $$win{ refnum };
Irssi::command( "window goto 1" );
$inFocus = 0;
}
} else {
print "invalid state";
}
};
my $timerName = Irssi::timeout_add( 100, \&check_x, '' );
| vlstill/irssiScripts | xaway.pl | Perl | bsd-3-clause | 1,491 |
###########################################################################
#
# This file is partially auto-generated by the DateTime::Locale generator
# tools (v0.10). This code generator comes with the DateTime::Locale
# distribution in the tools/ directory, and is called generate-modules.
#
# This file was generated from the CLDR JSON locale data. See the LICENSE.cldr
# file included in this distribution for license details.
#
# Do not edit this file directly unless you are sure the part you are editing
# is not created by the generator.
#
###########################################################################
=pod
=encoding UTF-8
=head1 NAME
DateTime::Locale::en - Locale data examples for the en locale.
=head1 DESCRIPTION
This pod file contains examples of the locale data available for the
English locale.
=head2 Days
=head3 Wide (format)
Monday
Tuesday
Wednesday
Thursday
Friday
Saturday
Sunday
=head3 Abbreviated (format)
Mon
Tue
Wed
Thu
Fri
Sat
Sun
=head3 Narrow (format)
M
T
W
T
F
S
S
=head3 Wide (stand-alone)
Monday
Tuesday
Wednesday
Thursday
Friday
Saturday
Sunday
=head3 Abbreviated (stand-alone)
Mon
Tue
Wed
Thu
Fri
Sat
Sun
=head3 Narrow (stand-alone)
M
T
W
T
F
S
S
=head2 Months
=head3 Wide (format)
January
February
March
April
May
June
July
August
September
October
November
December
=head3 Abbreviated (format)
Jan
Feb
Mar
Apr
May
Jun
Jul
Aug
Sep
Oct
Nov
Dec
=head3 Narrow (format)
J
F
M
A
M
J
J
A
S
O
N
D
=head3 Wide (stand-alone)
January
February
March
April
May
June
July
August
September
October
November
December
=head3 Abbreviated (stand-alone)
Jan
Feb
Mar
Apr
May
Jun
Jul
Aug
Sep
Oct
Nov
Dec
=head3 Narrow (stand-alone)
J
F
M
A
M
J
J
A
S
O
N
D
=head2 Quarters
=head3 Wide (format)
1st quarter
2nd quarter
3rd quarter
4th quarter
=head3 Abbreviated (format)
Q1
Q2
Q3
Q4
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
1st quarter
2nd quarter
3rd quarter
4th quarter
=head3 Abbreviated (stand-alone)
Q1
Q2
Q3
Q4
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide (format)
Before Christ
Anno Domini
=head3 Abbreviated (format)
BC
AD
=head3 Narrow (format)
B
A
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = Tuesday, February 5, 2008
1995-12-22T09:05:02 = Friday, December 22, 1995
-0010-09-15T04:44:23 = Saturday, September 15, -10
=head3 Long
2008-02-05T18:30:30 = February 5, 2008
1995-12-22T09:05:02 = December 22, 1995
-0010-09-15T04:44:23 = September 15, -10
=head3 Medium
2008-02-05T18:30:30 = Feb 5, 2008
1995-12-22T09:05:02 = Dec 22, 1995
-0010-09-15T04:44:23 = Sep 15, -10
=head3 Short
2008-02-05T18:30:30 = 2/5/08
1995-12-22T09:05:02 = 12/22/95
-0010-09-15T04:44:23 = 9/15/-10
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 6:30:30 PM UTC
1995-12-22T09:05:02 = 9:05:02 AM UTC
-0010-09-15T04:44:23 = 4:44:23 AM UTC
=head3 Long
2008-02-05T18:30:30 = 6:30:30 PM UTC
1995-12-22T09:05:02 = 9:05:02 AM UTC
-0010-09-15T04:44:23 = 4:44:23 AM UTC
=head3 Medium
2008-02-05T18:30:30 = 6:30:30 PM
1995-12-22T09:05:02 = 9:05:02 AM
-0010-09-15T04:44:23 = 4:44:23 AM
=head3 Short
2008-02-05T18:30:30 = 6:30 PM
1995-12-22T09:05:02 = 9:05 AM
-0010-09-15T04:44:23 = 4:44 AM
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = Tuesday, February 5, 2008 at 6:30:30 PM UTC
1995-12-22T09:05:02 = Friday, December 22, 1995 at 9:05:02 AM UTC
-0010-09-15T04:44:23 = Saturday, September 15, -10 at 4:44:23 AM UTC
=head3 Long
2008-02-05T18:30:30 = February 5, 2008 at 6:30:30 PM UTC
1995-12-22T09:05:02 = December 22, 1995 at 9:05:02 AM UTC
-0010-09-15T04:44:23 = September 15, -10 at 4:44:23 AM UTC
=head3 Medium
2008-02-05T18:30:30 = Feb 5, 2008, 6:30:30 PM
1995-12-22T09:05:02 = Dec 22, 1995, 9:05:02 AM
-0010-09-15T04:44:23 = Sep 15, -10, 4:44:23 AM
=head3 Short
2008-02-05T18:30:30 = 2/5/08, 6:30 PM
1995-12-22T09:05:02 = 12/22/95, 9:05 AM
-0010-09-15T04:44:23 = 9/15/-10, 4:44 AM
=head2 Available Formats
=head3 E (ccc)
2008-02-05T18:30:30 = Tue
1995-12-22T09:05:02 = Fri
-0010-09-15T04:44:23 = Sat
=head3 EHm (E HH:mm)
2008-02-05T18:30:30 = Tue 18:30
1995-12-22T09:05:02 = Fri 09:05
-0010-09-15T04:44:23 = Sat 04:44
=head3 EHms (E HH:mm:ss)
2008-02-05T18:30:30 = Tue 18:30:30
1995-12-22T09:05:02 = Fri 09:05:02
-0010-09-15T04:44:23 = Sat 04:44:23
=head3 Ed (d E)
2008-02-05T18:30:30 = 5 Tue
1995-12-22T09:05:02 = 22 Fri
-0010-09-15T04:44:23 = 15 Sat
=head3 Ehm (E h:mm a)
2008-02-05T18:30:30 = Tue 6:30 PM
1995-12-22T09:05:02 = Fri 9:05 AM
-0010-09-15T04:44:23 = Sat 4:44 AM
=head3 Ehms (E h:mm:ss a)
2008-02-05T18:30:30 = Tue 6:30:30 PM
1995-12-22T09:05:02 = Fri 9:05:02 AM
-0010-09-15T04:44:23 = Sat 4:44:23 AM
=head3 Gy (y G)
2008-02-05T18:30:30 = 2008 AD
1995-12-22T09:05:02 = 1995 AD
-0010-09-15T04:44:23 = -10 BC
=head3 GyMMM (MMM y G)
2008-02-05T18:30:30 = Feb 2008 AD
1995-12-22T09:05:02 = Dec 1995 AD
-0010-09-15T04:44:23 = Sep -10 BC
=head3 GyMMMEd (E, MMM d, y G)
2008-02-05T18:30:30 = Tue, Feb 5, 2008 AD
1995-12-22T09:05:02 = Fri, Dec 22, 1995 AD
-0010-09-15T04:44:23 = Sat, Sep 15, -10 BC
=head3 GyMMMd (MMM d, y G)
2008-02-05T18:30:30 = Feb 5, 2008 AD
1995-12-22T09:05:02 = Dec 22, 1995 AD
-0010-09-15T04:44:23 = Sep 15, -10 BC
=head3 H (HH)
2008-02-05T18:30:30 = 18
1995-12-22T09:05:02 = 09
-0010-09-15T04:44:23 = 04
=head3 Hm (HH:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head3 Hms (HH:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Hmsv (HH:mm:ss v)
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Hmv (HH:mm v)
2008-02-05T18:30:30 = 18:30 UTC
1995-12-22T09:05:02 = 09:05 UTC
-0010-09-15T04:44:23 = 04:44 UTC
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 MEd (E, M/d)
2008-02-05T18:30:30 = Tue, 2/5
1995-12-22T09:05:02 = Fri, 12/22
-0010-09-15T04:44:23 = Sat, 9/15
=head3 MMM (LLL)
2008-02-05T18:30:30 = Feb
1995-12-22T09:05:02 = Dec
-0010-09-15T04:44:23 = Sep
=head3 MMMEd (E, MMM d)
2008-02-05T18:30:30 = Tue, Feb 5
1995-12-22T09:05:02 = Fri, Dec 22
-0010-09-15T04:44:23 = Sat, Sep 15
=head3 MMMMd (MMMM d)
2008-02-05T18:30:30 = February 5
1995-12-22T09:05:02 = December 22
-0010-09-15T04:44:23 = September 15
=head3 MMMd (MMM d)
2008-02-05T18:30:30 = Feb 5
1995-12-22T09:05:02 = Dec 22
-0010-09-15T04:44:23 = Sep 15
=head3 Md (M/d)
2008-02-05T18:30:30 = 2/5
1995-12-22T09:05:02 = 12/22
-0010-09-15T04:44:23 = 9/15
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 h (h a)
2008-02-05T18:30:30 = 6 PM
1995-12-22T09:05:02 = 9 AM
-0010-09-15T04:44:23 = 4 AM
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 PM
1995-12-22T09:05:02 = 9:05 AM
-0010-09-15T04:44:23 = 4:44 AM
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 PM
1995-12-22T09:05:02 = 9:05:02 AM
-0010-09-15T04:44:23 = 4:44:23 AM
=head3 hmsv (h:mm:ss a v)
2008-02-05T18:30:30 = 6:30:30 PM UTC
1995-12-22T09:05:02 = 9:05:02 AM UTC
-0010-09-15T04:44:23 = 4:44:23 AM UTC
=head3 hmv (h:mm a v)
2008-02-05T18:30:30 = 6:30 PM UTC
1995-12-22T09:05:02 = 9:05 AM UTC
-0010-09-15T04:44:23 = 4:44 AM UTC
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y)
2008-02-05T18:30:30 = 2008
1995-12-22T09:05:02 = 1995
-0010-09-15T04:44:23 = -10
=head3 yM (M/y)
2008-02-05T18:30:30 = 2/2008
1995-12-22T09:05:02 = 12/1995
-0010-09-15T04:44:23 = 9/-10
=head3 yMEd (E, M/d/y)
2008-02-05T18:30:30 = Tue, 2/5/2008
1995-12-22T09:05:02 = Fri, 12/22/1995
-0010-09-15T04:44:23 = Sat, 9/15/-10
=head3 yMMM (MMM y)
2008-02-05T18:30:30 = Feb 2008
1995-12-22T09:05:02 = Dec 1995
-0010-09-15T04:44:23 = Sep -10
=head3 yMMMEd (E, MMM d, y)
2008-02-05T18:30:30 = Tue, Feb 5, 2008
1995-12-22T09:05:02 = Fri, Dec 22, 1995
-0010-09-15T04:44:23 = Sat, Sep 15, -10
=head3 yMMMM (MMMM y)
2008-02-05T18:30:30 = February 2008
1995-12-22T09:05:02 = December 1995
-0010-09-15T04:44:23 = September -10
=head3 yMMMd (MMM d, y)
2008-02-05T18:30:30 = Feb 5, 2008
1995-12-22T09:05:02 = Dec 22, 1995
-0010-09-15T04:44:23 = Sep 15, -10
=head3 yMd (M/d/y)
2008-02-05T18:30:30 = 2/5/2008
1995-12-22T09:05:02 = 12/22/1995
-0010-09-15T04:44:23 = 9/15/-10
=head3 yQQQ (QQQ y)
2008-02-05T18:30:30 = Q1 2008
1995-12-22T09:05:02 = Q4 1995
-0010-09-15T04:44:23 = Q3 -10
=head3 yQQQQ (QQQQ y)
2008-02-05T18:30:30 = 1st quarter 2008
1995-12-22T09:05:02 = 4th quarter 1995
-0010-09-15T04:44:23 = 3rd quarter -10
=head2 Miscellaneous
=head3 Prefers 24 hour time?
No
=head3 Local first day of the week
1 (Monday)
=head1 SUPPORT
See L<DateTime::Locale>.
=cut
| jkb78/extrajnm | local/lib/perl5/DateTime/Locale/en.pod | Perl | mit | 9,434 |
# Copyright 2018 Jeffrey Kegler
# This file is part of Marpa::R2. Marpa::R2 is free software: you can
# redistribute it and/or modify it under the terms of the GNU Lesser
# General Public License as published by the Free Software Foundation,
# either version 3 of the License, or (at your option) any later version.
#
# Marpa::R2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser
# General Public License along with Marpa::R2. If not, see
# http://www.gnu.org/licenses/.
package Marpa::R2::HTML;
use 5.010001;
use strict;
use warnings;
use vars qw( $VERSION $STRING_VERSION );
$VERSION = '8.000000';
$STRING_VERSION = $VERSION;
## no critic (BuiltinFunctions::ProhibitStringyEval)
$VERSION = eval $VERSION;
## use critic
our @EXPORT_OK;
use base qw(Exporter);
BEGIN { @EXPORT_OK = qw(html); }
package Marpa::R2::HTML::Internal;
# Data::Dumper is used in tracing
use Data::Dumper;
use Marpa::R2::HTML::Internal;
use Marpa::R2::HTML::Config;
use Carp ();
use HTML::Parser 3.69;
use HTML::Entities qw(decode_entities);
# versions below must be coordinated with
# those required in Build.PL
use English qw( -no_match_vars );
use Marpa::R2;
{
my $submodule_version = $Marpa::R2::VERSION;
die 'Marpa::R2::VERSION not defined' if not defined $submodule_version;
die
"Marpa::R2::VERSION ($submodule_version) does not match Marpa::R2::HTML::VERSION ",
$Marpa::R2::HTML::VERSION
if $submodule_version != $Marpa::R2::HTML::VERSION;
}
use Marpa::R2::Thin::Trace;
# constants
use constant PHYSICAL_TOKEN => 42;
use constant RUBY_SLIPPERS_TOKEN => 43;
our @LIBMARPA_ERROR_NAMES = Marpa::R2::Thin::error_names();
our $UNEXPECTED_TOKEN_ID;
our $NO_MARPA_ERROR;
ERROR: for my $error_number ( 0 .. $#LIBMARPA_ERROR_NAMES ) {
my $error_name = $LIBMARPA_ERROR_NAMES[$error_number];
if ( $error_name eq 'MARPA_ERR_UNEXPECTED_TOKEN_ID' ) {
$UNEXPECTED_TOKEN_ID = $error_number;
next ERROR;
}
if ( $error_name eq 'MARPA_ERR_NONE' ) {
$NO_MARPA_ERROR = $error_number;
next ERROR;
}
} ## end ERROR: for my $error_number ( 0 .. $#LIBMARPA_ERROR_NAMES )
use Marpa::R2::HTML::Callback;
{
my $submodule_version = $Marpa::R2::HTML::Callback::VERSION;
die 'Marpa::R2::HTML::Callback::VERSION not defined'
if not defined $submodule_version;
die
"Marpa::R2::HTML::Callback::VERSION ($submodule_version) does not match Marpa::R2::HTML::VERSION ",
$Marpa::R2::HTML::VERSION
if $submodule_version != $Marpa::R2::HTML::VERSION;
}
sub earleme_to_linecol {
my ( $self, $earleme ) = @_;
my $html_parser_tokens = $self->{tokens};
my $html_token_ix = $self->{earleme_to_html_token_ix}->[$earleme] + 1;
die if not defined $html_token_ix;
return @{ $html_parser_tokens->[$html_token_ix] }[
Marpa::R2::HTML::Internal::Token::LINE,
Marpa::R2::HTML::Internal::Token::COLUMN,
];
} ## end sub earleme_to_linecol
sub earleme_to_offset {
my ( $self, $earleme ) = @_;
my $html_parser_tokens = $self->{tokens};
my $html_token_ix = $self->{earleme_to_html_token_ix}->[$earleme] + 1;
die if not defined $html_token_ix;
return $html_parser_tokens->[$html_token_ix]
->[Marpa::R2::HTML::Internal::Token::END_OFFSET];
} ## end sub earleme_to_offset
sub add_handler {
my ( $self, $handler_description ) = @_;
my $ref_type = ref $handler_description || 'not a reference';
Marpa::R2::exception(
"Long form handler description should be ref to hash, but it is $ref_type"
) if $ref_type ne 'HASH';
my $element = delete $handler_description->{element};
my $class = delete $handler_description->{class};
my $pseudoclass = delete $handler_description->{pseudoclass};
my $action = delete $handler_description->{action};
Marpa::R2::exception(
'Unknown option(s) in Long form handler description: ',
( join q{ }, keys %{$handler_description} )
) if scalar keys %{$handler_description};
Marpa::R2::exception('Handler action must be CODE ref')
if ref $action ne 'CODE';
if ( defined $pseudoclass ) {
$self->{handler_by_species}->{$pseudoclass} = $action;
return 1;
}
$element = q{*} if not $element;
$element = lc $element;
$class //= q{*};
$self->{handler_by_element_and_class}->{ join q{;}, $element, $class } =
$action;
return 1;
} ## end sub add_handler
sub add_handlers_from_hashes {
my ( $self, $handler_specs ) = @_;
my $ref_type = ref $handler_specs || 'not a reference';
Marpa::R2::exception(
"handlers arg must must be ref to ARRAY, it is $ref_type")
if $ref_type ne 'ARRAY';
for my $handler_spec ( keys %{$handler_specs} ) {
add_handler( $self, $handler_spec );
}
return 1;
} ## end sub add_handlers_from_hashes
sub add_handlers {
my ( $self, $handler_specs ) = @_;
HANDLER_SPEC: for my $specifier ( keys %{$handler_specs} ) {
my ( $element, $class, $pseudoclass );
my $action = $handler_specs->{$specifier};
( $element, $class ) = ( $specifier =~ /\A ([^.]*) [.] (.*) \z/oxms )
or ( $element, $pseudoclass ) =
( $specifier =~ /\A ([^:]*) [:] (.*) \z/oxms )
or $element = $specifier;
state $allowed_pseudoclasses =
{ map { ( $_, 1 ) }
qw(TOP PI DECL COMMENT PROLOG TRAILER WHITESPACE CDATA PCDATA CRUFT)
};
if ( $pseudoclass
and not exists $allowed_pseudoclasses->{$pseudoclass} )
{
Marpa::R2::exception(
qq{pseudoclass "$pseudoclass" is not known:\n},
"Specifier was $specifier\n" );
} ## end if ( $pseudoclass and not exists $allowed_pseudoclasses...)
if ( $pseudoclass and $element ) {
Marpa::R2::exception(
qq{pseudoclass "$pseudoclass" may not have an element specified:\n},
"Specifier was $specifier\n"
);
} ## end if ( $pseudoclass and $element )
add_handler(
$self,
{ element => $element,
class => $class,
pseudoclass => $pseudoclass,
action => $action
}
);
} ## end HANDLER_SPEC: for my $specifier ( keys %{$handler_specs} )
return 1;
} ## end sub add_handlers
# If we factor this package, this will be the constructor.
## no critic (Subroutines::RequireArgUnpacking)
sub create {
## use critic
my $self = {};
$self->{trace_fh} = \*STDERR;
ARG: for my $arg (@_) {
my $ref_type = ref $arg || 'not a reference';
if ( $ref_type eq 'HASH' ) {
Marpa::R2::HTML::Internal::add_handlers( $self, $arg );
next ARG;
}
Marpa::R2::exception(
"Argument must be hash or refs to hash: it is $ref_type")
if $ref_type ne 'REF';
my $option_hash = ${$arg};
$ref_type = ref $option_hash || 'not a reference';
Marpa::R2::exception(
"Argument must be hash or refs to hash: it is ref to $ref_type")
if $ref_type ne 'HASH';
OPTION: for my $option ( keys %{$option_hash} ) {
if ( $option eq 'handlers' ) {
add_handlers_from_hashes( $self, $option_hash->{$option} );
}
state $allowed_options = {
map { ( $_, 1 ) }
qw(trace_fh trace_values trace_handlers
trace_conflicts
trace_terminals trace_cruft
dump_AHFA dump_config compile
)
};
if ( not exists $allowed_options->{$option} ) {
Marpa::R2::exception("unknown option: $option");
}
$self->{$option} = $option_hash->{$option};
} ## end OPTION: for my $option ( keys %{$option_hash} )
} ## end ARG: for my $arg (@_)
my $source_ref = $self->{compile};
if ( defined $source_ref ) {
ref $source_ref eq 'SCALAR'
or Marpa::R2::exception(
qq{value of "compile" option must be a SCALAR});
$self->{config} = Marpa::R2::HTML::Config->new_from_compile($source_ref);
} ## end if ( defined $source_ref )
else {
$self->{config} = Marpa::R2::HTML::Config->new();
}
return $self;
} ## end sub create
sub handler_find {
my ( $self, $rule_id, $class ) = @_;
my $trace_handlers = $self->{trace_handlers};
my $handler;
$class //= q{*};
my $action = $self->{action_by_rule_id}->[$rule_id];
FIND_HANDLER: {
last FIND_HANDLER if not defined $action;
if ( index( $action, 'SPE_' ) == 0 ) {
my $species = substr $action, 4;
$handler = $self->{handler_by_species}->{$species};
say {*STDERR}
qq{Rule $rule_id: Found handler by species: "$species"}
or Carp::croak("Cannot print: $ERRNO")
if $trace_handlers and defined $handler;
last FIND_HANDLER;
} ## end if ( index( $action, 'SPE_' ) == 0 )
## At this point action always is defined
## and starts with 'ELE_'
my $element = substr $action, 4;
my @handler_keys = (
( join q{;}, $element, $class ),
( join q{;}, q{*}, $class ),
( join q{;}, $element, q{*} ),
( join q{;}, q{*}, q{*} ),
);
($handler) =
grep {defined}
@{ $self->{handler_by_element_and_class} }{@handler_keys};
say {*STDERR} qq{Rule $rule_id: Found handler by action and class: "},
( grep { defined $self->{handler_by_element_and_class}->{$_} }
@handler_keys )[0], q{"}
or Carp::croak("Cannot print: $ERRNO")
if $trace_handlers and defined $handler;
} ## end FIND_HANDLER:
return $handler if defined $handler;
say {*STDERR} qq{Rule $rule_id: Using default handler for action "},
( $action // q{*} ), qq{" and class: "$class"}
or Carp::croak("Cannot print: $ERRNO")
if $trace_handlers;
return 'default_handler';
} ## end sub handler_find
# "Original" value of a token range -- that is, the corresponding
# text of the original document, unchanged.
# Returned as a reference, because it may be very long
sub token_range_to_original {
my ( $self, $first_token_ix, $last_token_ix ) = @_;
return \q{} if not defined $first_token_ix;
my $document = $self->{document};
my $tokens = $self->{tokens};
my $start_offset =
$tokens->[$first_token_ix]
->[Marpa::R2::HTML::Internal::Token::START_OFFSET];
my $end_offset =
$tokens->[$last_token_ix]
->[Marpa::R2::HTML::Internal::Token::END_OFFSET];
my $original = substr ${$document}, $start_offset,
( $end_offset - $start_offset );
return \$original;
} ## end sub token_range_to_original
# "Original" value of token -- that is, the corresponding
# text of the original document, unchanged.
# The empty string if there is no such text.
# Returned as a reference, because it may be very long
sub tdesc_item_to_original {
my ( $self, $tdesc_item ) = @_;
my $text = q{};
my $document = $self->{document};
my $tokens = $self->{tokens};
my $tdesc_item_type = $tdesc_item->[0];
return q{} if not defined $tdesc_item_type;
if ( $tdesc_item_type eq 'PHYSICAL_TOKEN' ) {
return token_range_to_original(
$self,
$tdesc_item->[Marpa::R2::HTML::Internal::TDesc::START_TOKEN],
$tdesc_item->[Marpa::R2::HTML::Internal::TDesc::END_TOKEN],
);
} ## end if ( $tdesc_item_type eq 'PHYSICAL_TOKEN' )
if ( $tdesc_item_type eq 'VALUED_SPAN' ) {
return token_range_to_original(
$self,
$tdesc_item->[Marpa::R2::HTML::Internal::TDesc::START_TOKEN],
$tdesc_item->[Marpa::R2::HTML::Internal::TDesc::END_TOKEN],
);
} ## end if ( $tdesc_item_type eq 'VALUED_SPAN' )
return q{};
} ## end sub tdesc_item_to_original
# Given a token range and a tdesc list,
# return a reference to the literal value.
sub range_and_values_to_literal {
my ( $self, $next_token_ix, $final_token_ix, $tdesc_list ) = @_;
my @flat_tdesc_list = ();
TDESC_ITEM: for my $tdesc_item ( @{$tdesc_list} ) {
my $type = $tdesc_item->[0];
next TDESC_ITEM if not defined $type;
next TDESC_ITEM if $type eq 'ZERO_SPAN';
next TDESC_ITEM if $type eq 'RUBY_SLIPPERS_TOKEN';
if ( $type eq 'VALUES' ) {
push @flat_tdesc_list,
@{ $tdesc_item->[Marpa::R2::HTML::Internal::TDesc::VALUE] };
next TDESC_ITEM;
}
push @flat_tdesc_list, $tdesc_item;
} ## end TDESC_ITEM: for my $tdesc_item ( @{$tdesc_list} )
my @literal_pieces = ();
TDESC_ITEM: for my $tdesc_item (@flat_tdesc_list) {
my ( $tdesc_item_type, $next_explicit_token_ix,
$furthest_explicit_token_ix )
= @{$tdesc_item};
if ( not defined $next_explicit_token_ix ) {
## An element can contain no HTML tokens -- it may contain
## only Ruby Slippers tokens.
## Treat this as a special case.
if ( $tdesc_item_type eq 'VALUED_SPAN' ) {
my $value =
$tdesc_item->[Marpa::R2::HTML::Internal::TDesc::VALUE]
// q{};
push @literal_pieces, \( q{} . $value );
} ## end if ( $tdesc_item_type eq 'VALUED_SPAN' )
next TDESC_ITEM;
} ## end if ( not defined $next_explicit_token_ix )
push @literal_pieces,
token_range_to_original( $self, $next_token_ix,
$next_explicit_token_ix - 1 )
if $next_token_ix < $next_explicit_token_ix;
if ( $tdesc_item_type eq 'VALUED_SPAN' ) {
my $value =
$tdesc_item->[Marpa::R2::HTML::Internal::TDesc::VALUE];
if ( defined $value ) {
push @literal_pieces, \( q{} . $value );
$next_token_ix = $furthest_explicit_token_ix + 1;
next TDESC_ITEM;
}
## FALL THROUGH
} ## end if ( $tdesc_item_type eq 'VALUED_SPAN' )
push @literal_pieces,
token_range_to_original( $self, $next_explicit_token_ix,
$furthest_explicit_token_ix )
if $next_explicit_token_ix <= $furthest_explicit_token_ix;
$next_token_ix = $furthest_explicit_token_ix + 1;
} ## end TDESC_ITEM: for my $tdesc_item (@flat_tdesc_list)
return \( join q{}, map { ${$_} } @literal_pieces );
} ## end sub range_and_values_to_literal
sub symbol_names_by_rule_id {
my ( $self, $rule_id ) = @_;
my $tracer = $self->{tracer};
my $grammar = $tracer->grammar();
my $rule_length = $grammar->rule_length($rule_id);
return if not defined $rule_length;
my @symbol_ids = ( $grammar->rule_lhs($rule_id) );
push @symbol_ids,
map { $grammar->rule_rhs( $rule_id, $_ ) } ( 0 .. $rule_length - 1 );
return map { $tracer->symbol_name($_) } @symbol_ids;
} ## end sub symbol_names_by_rule_id
sub parse {
my ( $self, $document_ref ) = @_;
my %tags = ();
Marpa::R2::exception(
"parse() already run on this object\n",
'For a new parse, create a new object'
) if $self->{document};
my $trace_cruft = $self->{trace_cruft};
my $trace_terminals = $self->{trace_terminals} // 0;
my $trace_conflicts = $self->{trace_conflicts};
my $trace_handlers = $self->{trace_handlers};
my $trace_values = $self->{trace_values};
my $trace_fh = $self->{trace_fh};
my $ref_type = ref $document_ref;
Marpa::R2::exception('Arg to parse() must be ref to string')
if not $ref_type
or $ref_type ne 'SCALAR'
or not defined ${$document_ref};
my $document = $self->{document} = $document_ref;
my ($core_rules, $runtime_tag,
$rank_by_name, $is_empty_element,
$primary_group_by_tag
) = $self->{config}->contents();
$self->{is_empty_element} = $is_empty_element;
if ($self->{dump_config}) {
return $self->{config}->as_string();
}
my @action_by_rule_id = ();
$self->{action_by_rule_id} = \@action_by_rule_id;
my $thin_grammar = Marpa::R2::Thin::G->new( { if => 1 } );
my $tracer = Marpa::R2::Thin::Trace->new($thin_grammar);
$self->{tracer} = $tracer;
RULE: for my $rule ( @{$core_rules} ) {
my $lhs = $rule->{lhs};
my $rhs = $rule->{rhs};
my $min = $rule->{min};
my $action = $rule->{action};
my @symbol_ids = ();
for my $symbol_name ( $lhs, @{$rhs} ) {
push @symbol_ids,
$tracer->symbol_by_name($symbol_name)
// $tracer->symbol_new($symbol_name);
}
my ($lhs_id, @rhs_ids) = @symbol_ids;
my $rule_id;
if ( defined $min ) {
$rule_id =
$thin_grammar->sequence_new( $lhs_id, $rhs_ids[0],
{ min => $min } );
}
else {
$rule_id = $thin_grammar->rule_new( $lhs_id, \@rhs_ids );
}
$action_by_rule_id[$rule_id] = $action;
} ## end RULE: for my $rule ( @{$core_rules} )
# Some constants that we will use a lot
my $SYMID_CRUFT = $tracer->symbol_by_name('CRUFT');
my $SYMID_CDATA = $tracer->symbol_by_name('CDATA');
my $SYMID_PCDATA = $tracer->symbol_by_name('PCDATA');
my $SYMID_WHITESPACE = $tracer->symbol_by_name('WHITESPACE');
my $SYMID_PI = $tracer->symbol_by_name('PI');
my $SYMID_C = $tracer->symbol_by_name('C');
my $SYMID_D = $tracer->symbol_by_name('D');
my $SYMID_EOF = $tracer->symbol_by_name('EOF');
my @raw_tokens = ();
my $p = HTML::Parser->new(
api_version => 3,
start_h => [
\@raw_tokens, q{tagname,'S',line,column,offset,offset_end,is_cdata,attr}
],
end_h =>
[ \@raw_tokens, q{tagname,'E',line,column,offset,offset_end,is_cdata} ],
text_h => [
\@raw_tokens,
qq{'$SYMID_WHITESPACE','T',line,column,offset,offset_end,is_cdata}
],
comment_h =>
[ \@raw_tokens, qq{'$SYMID_C','C',line,column,offset,offset_end,is_cdata} ],
declaration_h =>
[ \@raw_tokens, qq{'$SYMID_D','D',line,column,offset,offset_end,is_cdata} ],
process_h =>
[ \@raw_tokens, qq{'$SYMID_PI','PI',line,column,offset,offset_end,is_cdata} ],
unbroken_text => 1
);
$p->parse( ${$document} );
$p->eof;
my @html_parser_tokens = ();
HTML_PARSER_TOKEN:
for my $raw_token (@raw_tokens) {
my ( undef, $token_type, $line, $column, $offset, $offset_end, $is_cdata, $attr ) =
@{$raw_token};
PROCESS_TOKEN_TYPE: {
if ($is_cdata) {
$raw_token->[Marpa::R2::HTML::Internal::Token::TOKEN_ID] =
$SYMID_CDATA;
last PROCESS_TOKEN_TYPE;
}
if ( $token_type eq 'T' ) {
# White space as defined in HTML 4.01
# space (x20); ASCII tab (x09); ASCII form feed (x0C;); Zero-width space (x200B)
# and the two characters which appear in line breaks:
# carriage return (x0D) and line feed (x0A)
# I avoid the Perl character codes because I do NOT want
# localization
$raw_token->[Marpa::R2::HTML::Internal::Token::TOKEN_ID] =
$SYMID_PCDATA if
substr(
${$document}, $offset, ( $offset_end - $offset )
) =~ / [^\x09\x0A\x0C\x0D\x20\x{200B}] /oxms;
last PROCESS_TOKEN_TYPE;
} ## end if ( $token_type eq 'T' )
if ( $token_type eq 'E' or $token_type eq 'S' ) {
# If it's a virtual token from HTML::Parser,
# pretend it never existed.
# HTML::Parser supplies missing
# end tags for title elements, but for no
# others.
# This is not helpful and we need to special-case
# these zero-length tags and throw them away.
next HTML_PARSER_TOKEN if $offset_end <= $offset;
my $tag_name = $raw_token
->[Marpa::R2::HTML::Internal::Token::TAG_NAME];
my $terminal = $token_type . q{_} . $tag_name;
my $terminal_id = $tracer->symbol_by_name($terminal);
if ( not defined $terminal_id ) {
my $group_symbol = $primary_group_by_tag->{$tag_name}
// 'GRP_anywhere';
my $contents = $runtime_tag->{$tag_name} // 'FLO_mixed';
my @symbol_names = (
$group_symbol,
'ELE_' . $tag_name,
'S_' . $tag_name,
$contents, 'E_' . $tag_name
);
my @symbol_ids = ();
SYMBOL: for my $symbol_name (@symbol_names) {
my $symbol_id = $tracer->symbol_by_name($symbol_name);
if ( not defined $symbol_id ) {
$symbol_id = $tracer->symbol_new($symbol_name);
}
push @symbol_ids, $symbol_id;
} ## end SYMBOL: for my $symbol_name (@symbol_names)
my ( $top_id, $lhs_id, @rhs_ids ) = @symbol_ids;
$thin_grammar->rule_new( $top_id, [$lhs_id] );
my $element_rule_id =
$thin_grammar->rule_new( $lhs_id, \@rhs_ids );
$action_by_rule_id[$element_rule_id] = 'ELE_' . $tag_name;
$terminal_id = $tracer->symbol_by_name($terminal);
} ## end if ( not defined $terminal_id )
$raw_token->[Marpa::R2::HTML::Internal::Token::TOKEN_ID] =
$terminal_id;
last PROCESS_TOKEN_TYPE;
} ## end if ( $token_type eq 'E' or $token_type eq 'S' )
} ## end PROCESS_TOKEN_TYPE:
push @html_parser_tokens, $raw_token;
} ## end HTML_PARSER_TOKEN: for my $raw_token (@raw_tokens)
# Points AFTER the last HTML
# Parser token.
# The other logic needs to be ready for this.
{
my $document_length = length ${$document};
my $last_token = $html_parser_tokens[-1];
push @html_parser_tokens,
[
$SYMID_EOF, 'EOF',
@{$last_token}[
Marpa::R2::HTML::Internal::Token::LINE,
Marpa::R2::HTML::Internal::Token::COLUMN
],
$document_length,
$document_length
];
}
# conserve memory
$p = undef;
@raw_tokens = ();
$thin_grammar->start_symbol_set( $tracer->symbol_by_name('document') );
$thin_grammar->precompute();
if ($self->{dump_AHFA}) {
return \$tracer->show_AHFA();
}
# Memoize these -- we use highest symbol a lot
my $highest_symbol_id = $thin_grammar->highest_symbol_id();
my $highest_rule_id = $thin_grammar->highest_rule_id();
# For the Ruby Slippers engine
# We need to know quickly if a symbol is a start tag;
my @is_start_tag = ();
# Find Ruby slippers ranks, by symbol ID
my @ruby_rank_by_id = ();
{
my @non_final_end_tag_ids = ();
SYMBOL:
for my $symbol_id ( 0 .. $highest_symbol_id ) {
my $symbol_name = $tracer->symbol_name($symbol_id);
next SYMBOL if not 0 == index $symbol_name, 'E_';
next SYMBOL
if $symbol_name eq 'E_body'
or $symbol_name eq 'E_html';
push @non_final_end_tag_ids, $symbol_id;
} ## end SYMBOL: for my $symbol_id ( 0 .. $highest_symbol_id )
my %ruby_vectors = ();
for my $rejected_symbol_name ( keys %{$rank_by_name} ) {
my @ruby_vector_by_id = ( (0) x ( $highest_symbol_id + 1 ) );
my $rank_by_candidate_name =
$rank_by_name->{$rejected_symbol_name};
CANDIDATE:
for my $candidate_name ( keys %{$rank_by_candidate_name} ) {
my $rank = $rank_by_candidate_name->{$candidate_name};
if ( $candidate_name eq '</*>' ) {
$ruby_vector_by_id[$_] = $rank for @non_final_end_tag_ids;
next CANDIDATE;
}
my $candidate_id = $tracer->symbol_by_name($candidate_name);
die "Unknown ruby slippers candidate name: $candidate_name"
if not defined $candidate_id;
$ruby_vector_by_id[$candidate_id] = $rank
for @non_final_end_tag_ids;
} ## end CANDIDATE: for my $candidate_name ( keys %{...})
$ruby_vectors{$rejected_symbol_name} = \@ruby_vector_by_id;
} ## end for my $rejected_symbol_name ( keys %{$rank_by_name} )
my @no_ruby_slippers_vector = ( (0) x ( $highest_symbol_id + 1 ) );
SYMBOL: for my $rejected_symbol_id ( 0 .. $highest_symbol_id ) {
if ( not $thin_grammar->symbol_is_terminal($rejected_symbol_id) )
{
$ruby_rank_by_id[$rejected_symbol_id] =
\@no_ruby_slippers_vector;
next SYMBOL;
} ## end if ( not $thin_grammar->symbol_is_terminal(...))
my $rejected_symbol_name =
$tracer->symbol_name($rejected_symbol_id);
my $placement;
FIND_PLACEMENT: {
my $prefix = substr $rejected_symbol_name, 0, 2;
if ( $prefix eq 'S_' ) {
$placement = '';
$is_start_tag[$rejected_symbol_id] = 1;
last FIND_PLACEMENT;
}
if ( $prefix eq 'E_' ) {
$placement = '/';
}
} ## end FIND_PLACEMENT:
my $ruby_vector = $ruby_vectors{$rejected_symbol_name};
if ( defined $ruby_vector ) {
$ruby_rank_by_id[$rejected_symbol_id] = $ruby_vector;
next SYMBOL;
}
if ( not defined $placement ) {
if ( $rejected_symbol_name eq 'CRUFT' ) {
$ruby_rank_by_id[$rejected_symbol_id] =
\@no_ruby_slippers_vector;
next SYMBOL;
}
$ruby_rank_by_id[$rejected_symbol_id] =
$ruby_vectors{'!non_element'}
// \@no_ruby_slippers_vector;
next SYMBOL;
} ## end if ( not defined $placement )
my $tag = substr $rejected_symbol_name, 2;
my $primary_group = $primary_group_by_tag->{$tag};
my $element_type = defined $primary_group ? (substr $primary_group, 4) : 'anywhere';
$ruby_vector =
$ruby_vectors{ q{<} . $placement . q{%} . $element_type . q{>} };
if ( defined $ruby_vector ) {
$ruby_rank_by_id[$rejected_symbol_id] = $ruby_vector;
next SYMBOL;
}
$ruby_vector = $ruby_vectors{ q{<} . $placement . q{*>} };
if ( defined $ruby_vector ) {
$ruby_rank_by_id[$rejected_symbol_id] = $ruby_vector;
next SYMBOL;
}
$ruby_rank_by_id[$rejected_symbol_id] = \@no_ruby_slippers_vector;
} ## end SYMBOL: for my $rejected_symbol_id ( 0 .. $highest_symbol_id )
}
my @empty_element_end_tag = ();
{
TAG: for my $tag (keys %{$is_empty_element}) {
my $start_tag_id = $tracer->symbol_by_name('S_' . $tag);
next TAG if not defined $start_tag_id;
my $end_tag_id = $tracer->symbol_by_name('E_' . $tag);
$empty_element_end_tag[$start_tag_id] = $end_tag_id;
}
}
my $recce = Marpa::R2::Thin::R->new($thin_grammar);
$recce->start_input();
$self->{recce} = $recce;
$self->{tokens} = \@html_parser_tokens;
$self->{earleme_to_html_token_ix} = [-1];
# These variables track virtual start tokens as
# a protection against infinite loops.
my %start_virtuals_used = ();
my $earleme_of_last_start_virtual = -1;
# first token is a dummy, so that ix is never 0
# this is done because 0 has a special meaning as a Libmarpa
# token value
my $latest_html_token = -1;
my $token_number = 0;
my $token_count = scalar @html_parser_tokens;
# this array track the last token number (location) at which
# the symbol with this number was last read. It's used
# to prevent the same Ruby Slippers token being added
# at the same location more than once.
# If allowed, this could cause an infinite loop.
# Note that only start tags are tracked -- the rest of the
# array stays at -1.
my @terminal_last_seen = ( (-1) x ( $highest_symbol_id + 1 ) );
$thin_grammar->throw_set(0);
my $empty_element_end_tag;
RECCE_RESPONSE: while ( $token_number < $token_count ) {
if ( defined $empty_element_end_tag ) {
my $read_result =
$recce->alternative( $empty_element_end_tag, RUBY_SLIPPERS_TOKEN,
1 );
if ( $read_result != $NO_MARPA_ERROR ) {
die $thin_grammar->error();
}
if ($trace_terminals) {
say {$trace_fh} 'Virtual end tag accepted: ',
$tracer->symbol_name($empty_element_end_tag)
or Carp::croak("Cannot print: $ERRNO");
}
if ( $recce->earleme_complete() < 0 ) {
die $thin_grammar->error();
}
my $current_earleme = $recce->current_earleme();
die $thin_grammar->error() if not defined $current_earleme;
$self->{earleme_to_html_token_ix}->[$current_earleme] =
$latest_html_token;
$empty_element_end_tag = undef;
next RECCE_RESPONSE;
} ## end if ( defined $empty_element_end_tag )
my $token = $html_parser_tokens[$token_number];
my $attempted_symbol_id = $token
->[Marpa::R2::HTML::Internal::Token::TOKEN_ID];
my $read_result =
$recce->alternative( $attempted_symbol_id, PHYSICAL_TOKEN, 1 );
if ( $read_result != $UNEXPECTED_TOKEN_ID ) {
if ( $read_result != $NO_MARPA_ERROR ) {
die $thin_grammar->error();
}
if ($trace_terminals) {
say {$trace_fh} 'Token accepted: ',
$tracer->symbol_name($attempted_symbol_id)
or Carp::croak("Cannot print: $ERRNO");
}
if ( $recce->earleme_complete() < 0 ) {
die $thin_grammar->error();
}
my $last_html_token_of_marpa_token = $token_number;
$token_number++;
if ( defined $last_html_token_of_marpa_token ) {
$latest_html_token = $last_html_token_of_marpa_token;
}
my $current_earleme = $recce->current_earleme();
die $thin_grammar->error() if not defined $current_earleme;
$self->{earleme_to_html_token_ix}->[$current_earleme] =
$latest_html_token;
$empty_element_end_tag = $empty_element_end_tag[$attempted_symbol_id];
next RECCE_RESPONSE;
} ## end if ( $read_result != $UNEXPECTED_TOKEN_ID )
if ($trace_terminals) {
say {$trace_fh} 'Literal Token not accepted: ',
$tracer->symbol_name($attempted_symbol_id)
or Carp::croak("Cannot print: $ERRNO");
}
my $highest_candidate_rank = 0;
my $virtual_terminal_to_add;
my $ruby_vector = $ruby_rank_by_id[$attempted_symbol_id];
my @terminals_expected = $recce->terminals_expected();
die $thin_grammar->error() if not defined $terminals_expected[0];
CANDIDATE: for my $candidate_id (@terminals_expected) {
my $this_candidate_rank = $ruby_vector->[$candidate_id];
if ($trace_terminals) {
say {$trace_fh} 'Considering candidate: ',
$tracer->symbol_name($candidate_id),
"; rank is $this_candidate_rank; highest rank so far is $highest_candidate_rank"
or Carp::croak("Cannot print: $ERRNO");
} ## end if ($trace_terminals)
if ( $this_candidate_rank > $highest_candidate_rank ) {
if ($trace_terminals) {
say {$trace_fh} 'Considering candidate: ',
$tracer->symbol_name($candidate_id),
'; last seen at ', $terminal_last_seen[$candidate_id],
"; current token number is $token_number"
or Carp::croak("Cannot print: $ERRNO");
} ## end if ($trace_terminals)
next CANDIDATE
if $terminal_last_seen[$candidate_id] == $token_number;
if ($trace_terminals) {
say {$trace_fh} 'Current best candidate: ',
$tracer->symbol_name($candidate_id),
or Carp::croak("Cannot print: $ERRNO");
}
$highest_candidate_rank = $this_candidate_rank;
$virtual_terminal_to_add = $candidate_id;
} ## end if ( $this_candidate_rank > $highest_candidate_rank )
} ## end CANDIDATE: for my $candidate_id (@terminals_expected)
if ( defined $virtual_terminal_to_add ) {
if ($trace_terminals) {
say {$trace_fh} 'Adding Ruby Slippers token: ',
$tracer->symbol_name($virtual_terminal_to_add),
or Carp::croak("Cannot print: $ERRNO");
}
my $ruby_slippers_result =
$recce->alternative( $virtual_terminal_to_add,
RUBY_SLIPPERS_TOKEN, 1 );
if ( $ruby_slippers_result != $NO_MARPA_ERROR ) {
die $thin_grammar->error();
}
if ( $recce->earleme_complete() < 0 ) {
die $thin_grammar->error();
}
# Only keep track of start tags. We need to be able to add end
# tags repeatedly.
# Adding end tags cannot cause an infinite loop, because each
# one ends an element and only a finite number of elements
# can have been started.
$terminal_last_seen[$virtual_terminal_to_add] = $token_number
if $is_start_tag[$virtual_terminal_to_add];
my $current_earleme = $recce->current_earleme();
die $thin_grammar->error() if not defined $current_earleme;
$self->{earleme_to_html_token_ix}->[$current_earleme] =
$latest_html_token;
$empty_element_end_tag = $empty_element_end_tag[$virtual_terminal_to_add];
next RECCE_RESPONSE;
} ## end if ( defined $virtual_terminal_to_add )
# If we didn't find a token to add, add the
# current physical token as CRUFT.
if ($trace_terminals) {
say {$trace_fh} 'Adding rejected token as cruft: ',
$tracer->symbol_name($attempted_symbol_id)
or Carp::croak("Cannot print: $ERRNO");
}
my $fatal_cruft_error = $token->[Marpa::R2::HTML::Internal::Token::TOKEN_ID]
== $SYMID_CRUFT ? 1 : 0;
if ( $trace_cruft or $fatal_cruft_error ) {
my $current_earleme = $recce->current_earleme();
die $thin_grammar->error() if not defined $current_earleme;
my ( $line, $col ) =
earleme_to_linecol( $self, $current_earleme );
# HTML::Parser uses one-based line numbers,
# but zero-based column numbers
# The convention (in vi and cut) is that
# columns are also one-based.
$col++;
say {$trace_fh} qq{Cruft at line $line, column $col: "},
${
token_range_to_original(
$self, $token_number, $token_number
)
},
q{"}
or Carp::croak("Cannot print: $ERRNO");
die 'Internal error: cruft token was rejected'
if $fatal_cruft_error;
} ## end if ( $trace_cruft or $fatal_cruft_error )
# Cruft tokens are not virtual.
# They are the real things, hacked up.
$token->[Marpa::R2::HTML::Internal::Token::TOKEN_ID] = $SYMID_CRUFT;
} ## end RECCE_RESPONSE: while ( $token_number < $token_count )
$thin_grammar->throw_set(1);
if ($trace_terminals) {
say {$trace_fh} 'at end of tokens'
or Carp::croak("Cannot print: $ERRNO");
}
$Marpa::R2::HTML::INSTANCE = $self;
local $Marpa::R2::HTML::Internal::PARSE_INSTANCE = $self;
my $latest_earley_set_ID = $recce->latest_earley_set();
my $bocage = Marpa::R2::Thin::B->new( $recce, $latest_earley_set_ID );
my $order = Marpa::R2::Thin::O->new($bocage);
my $tree = Marpa::R2::Thin::T->new($order);
$tree->next();
my @stack = ();
local $Marpa::R2::HTML::Internal::STACK = \@stack;
my %memoized_handlers = ();
my $valuator = Marpa::R2::Thin::V->new($tree);
local $Marpa::R2::HTML::Internal::RECCE = $recce;
local $Marpa::R2::HTML::Internal::VALUATOR = $valuator;
for my $rule_id ( grep { $thin_grammar->rule_length($_); }
0 .. $thin_grammar->highest_rule_id() )
{
$valuator->rule_is_valued_set( $rule_id, 1 );
}
STEP: while (1) {
my ( $type, @step_data ) = $valuator->step();
last STEP if not defined $type;
if ( $type eq 'MARPA_STEP_TOKEN' ) {
say {*STDERR} join q{ }, $type, @step_data,
$tracer->symbol_name( $step_data[0] )
or Carp::croak("Cannot print: $ERRNO")
if $trace_values;
my ( undef, $token_value, $arg_n ) = @step_data;
if ( $token_value eq RUBY_SLIPPERS_TOKEN ) {
$stack[$arg_n] = ['RUBY_SLIPPERS_TOKEN'];
say {*STDERR} "Stack:\n", Data::Dumper::Dumper( \@stack )
or Carp::croak("Cannot print: $ERRNO")
if $trace_values;
next STEP;
} ## end if ( $token_value eq RUBY_SLIPPERS_TOKEN )
my ( $start_earley_set_id, $end_earley_set_id ) =
$valuator->location();
my $start_earleme = $recce->earleme($start_earley_set_id);
my $start_html_token_ix =
$self->{earleme_to_html_token_ix}->[$start_earleme];
my $end_earleme = $recce->earleme($end_earley_set_id);
my $end_html_token_ix =
$self->{earleme_to_html_token_ix}->[$end_earleme];
$stack[$arg_n] = [
'PHYSICAL_TOKEN' => $start_html_token_ix + 1,
$end_html_token_ix,
];
say {*STDERR} "Stack:\n", Data::Dumper::Dumper( \@stack )
or Carp::croak("Cannot print: $ERRNO")
if $trace_values;
next STEP;
} ## end if ( $type eq 'MARPA_STEP_TOKEN' )
if ( $type eq 'MARPA_STEP_RULE' ) {
say {*STDERR} join q{ }, ( $type, @step_data )
or Carp::croak("Cannot print: $ERRNO")
if $trace_values;
my ( $rule_id, $arg_0, $arg_n ) = @step_data;
my $attributes = undef;
my $class = undef;
my $action = $action_by_rule_id[$rule_id];
local $Marpa::R2::HTML::Internal::START_TAG_IX = undef;
local $Marpa::R2::HTML::Internal::END_TAG_IX_REF = undef;
local $Marpa::R2::HTML::Internal::ELEMENT = undef;
local $Marpa::R2::HTML::Internal::SPECIES = q{};
if ( defined $action and ( index $action, 'ELE_' ) == 0 ) {
$Marpa::R2::HTML::Internal::SPECIES =
$Marpa::R2::HTML::Internal::ELEMENT = substr $action, 4;
my $start_tag_marpa_token = $stack[$arg_0];
my $start_tag_type = $start_tag_marpa_token
->[Marpa::R2::HTML::Internal::TDesc::TYPE];
if ( defined $start_tag_type
and $start_tag_type eq 'PHYSICAL_TOKEN' )
{
my $start_tag_ix = $start_tag_marpa_token->[1];
my $start_tag_token = $html_parser_tokens[$start_tag_ix];
if ( $start_tag_token
->[Marpa::R2::HTML::Internal::Token::TYPE] eq 'S' )
{
$Marpa::R2::HTML::Internal::START_TAG_IX =
$start_tag_ix;
$attributes = $start_tag_token
->[Marpa::R2::HTML::Internal::Token::ATTR];
} ## end if ( $start_tag_token->[...])
} ## end if ( defined $start_tag_type and $start_tag_type eq ...)
} ## end if ( defined $action and ( index $action, 'ELE_' ) ==...)
if ( defined $action and ( index $action, 'SPE_' ) == 0 ) {
$Marpa::R2::HTML::Internal::SPECIES = q{:} . substr $action,
4;
}
local $Marpa::R2::HTML::Internal::ATTRIBUTES = $attributes;
$class = $attributes->{class} // q{*};
local $Marpa::R2::HTML::Internal::CLASS = $class;
local $Marpa::R2::HTML::Internal::ARG_0 = $arg_0;
local $Marpa::R2::HTML::Internal::ARG_N = $arg_n;
my ( $start_earley_set_id, $end_earley_set_id ) =
$valuator->location();
my $start_earleme = $recce->earleme($start_earley_set_id);
my $start_html_token_ix =
$self->{earleme_to_html_token_ix}->[$start_earleme] + 1;
my $end_earleme = $recce->earleme($end_earley_set_id);
my $end_html_token_ix =
$self->{earleme_to_html_token_ix}->[$end_earleme];
if ( $start_html_token_ix > $end_html_token_ix ) {
$start_html_token_ix = $end_html_token_ix = undef;
}
local $Marpa::R2::HTML::Internal::START_HTML_TOKEN_IX =
$start_html_token_ix;
local $Marpa::R2::HTML::Internal::END_HTML_TOKEN_IX =
$end_html_token_ix;
my $handler_key =
$rule_id . q{;} . $Marpa::R2::HTML::Internal::CLASS;
my $handler = $memoized_handlers{$handler_key};
$trace_handlers
and $handler
and say {*STDERR}
qq{Found memoized handler for rule $rule_id, class "},
( $class // q{*} ), q{"};
if ( not defined $handler ) {
$handler = $memoized_handlers{$handler_key} =
handler_find( $self, $rule_id, $class );
}
COMPUTE_VALUE: {
if ( ref $handler ) {
$stack[$arg_0] = [
VALUED_SPAN => $start_html_token_ix,
$end_html_token_ix,
( scalar $handler->() ),
$rule_id
];
last COMPUTE_VALUE;
} ## end if ( ref $handler )
my @flat_tdesc_list = ();
STACK_IX:
for my $stack_ix ( $Marpa::R2::HTML::Internal::ARG_0 ..
$Marpa::R2::HTML::Internal::ARG_N )
{
my $tdesc_item =
$Marpa::R2::HTML::Internal::STACK->[$stack_ix];
my $tdesc_type = $tdesc_item->[0];
next STACK_IX if not defined $tdesc_type;
if ( $tdesc_type eq 'VALUES' ) {
push @flat_tdesc_list,
@{ $tdesc_item
->[Marpa::R2::HTML::Internal::TDesc::VALUE] };
next STACK_IX;
} ## end if ( $tdesc_type eq 'VALUES' )
next STACK_IX if $tdesc_type ne 'VALUED_SPAN';
push @flat_tdesc_list, $tdesc_item;
} ## end STACK_IX: for my $stack_ix ( $Marpa::R2::HTML::Internal::ARG_0...)
if ( scalar @flat_tdesc_list <= 1 ) {
$stack[$arg_0] = [
VALUED_SPAN => $start_html_token_ix,
$end_html_token_ix,
$flat_tdesc_list[0]
->[Marpa::R2::HTML::Internal::TDesc::VALUE],
$rule_id
];
last COMPUTE_VALUE;
} ## end if ( scalar @flat_tdesc_list <= 1 )
$stack[$arg_0] = [
VALUES => $start_html_token_ix,
$end_html_token_ix,
\@flat_tdesc_list,
$rule_id
];
} ## end COMPUTE_VALUE:
if ($trace_values) {
say {*STDERR} "rule $rule_id: ", join q{ },
symbol_names_by_rule_id( $self, $rule_id )
or Carp::croak("Cannot print: $ERRNO");
say {*STDERR} "Stack:\n", Data::Dumper::Dumper( \@stack )
or Carp::croak("Cannot print: $ERRNO");
} ## end if ($trace_values)
next STEP;
} ## end if ( $type eq 'MARPA_STEP_RULE' )
if ( $type eq 'MARPA_STEP_NULLING_SYMBOL' ) {
my ( $symbol_id, $arg_n ) = @step_data;
$stack[$arg_n] = ['ZERO_SPAN'];
if ($trace_values) {
say {*STDERR} join q{ }, $type, @step_data,
$tracer->symbol_name($symbol_id)
or Carp::croak("Cannot print: $ERRNO");
say {*STDERR} "Stack:\n", Data::Dumper::Dumper( \@stack )
or Carp::croak("Cannot print: $ERRNO");
} ## end if ($trace_values)
next STEP;
} ## end if ( $type eq 'MARPA_STEP_NULLING_SYMBOL' )
die "Unexpected step type: $type";
} ## end STEP: while (1)
my $result = $stack[0];
Marpa::R2::exception('No parse: evaler returned undef')
if not defined $result;
if ( ref $self->{handler_by_species}->{TOP} ) {
## This is a user-defined handler. We assume it returns
## a VALUED_SPAN.
$result = $result->[Marpa::R2::HTML::Internal::TDesc::VALUE];
}
else {
## The TOP handler was the default handler.
## We now want to "literalize" its result.
FIND_LITERALIZEABLE: {
my $type = $result->[Marpa::R2::HTML::Internal::TDesc::TYPE];
if ( $type eq 'VALUES' ) {
$result = $result->[Marpa::R2::HTML::Internal::TDesc::VALUE];
last FIND_LITERALIZEABLE;
}
if ( $type eq 'VALUED_SPAN' ) {
$result = [$result];
last FIND_LITERALIZEABLE;
}
die 'Internal: TOP result is not literalize-able';
} ## end FIND_LITERALIZEABLE:
$result = range_and_values_to_literal( $self, 0, $#html_parser_tokens,
$result );
} ## end else [ if ( ref $self->{handler_by_species}->{TOP} ) ]
return $result;
} ## end sub parse
sub Marpa::R2::HTML::html {
my ( $document_ref, @args ) = @_;
my $html = Marpa::R2::HTML::Internal::create(@args);
return Marpa::R2::HTML::Internal::parse( $html, $document_ref );
}
1;
# vim: set expandtab shiftwidth=4:
| jddurand/c-marpaESLIF | 3rdparty/github/marpaWrapper/3rdparty/github/Marpa--R2/cpan/html/lib/Marpa/R2/HTML.pm | Perl | mit | 48,547 |
#!/usr/bin/perl
use strict;
use warnings;
use Data::Dumper;
use File::Glob ':bsd_glob';
use Getopt::Std;
$Data::Dumper::Indent = 1;
$Data::Dumper::Sortkeys = 1;
$Data::Dumper::Terse = 1;
sub find_cpu_core_mask {
my $core_count = qx(nproc --all);
printf("0x%08d\n", $core_count);
}
sub find_ethernet_pci_ids {
# /sys/devices/pci0000:00/0000:00:03.0/virtio0
# /sys/devices/pci0000:00/0000:00:03.0/net/eth0
chdir("/sys/devices");
foreach my $ethernet_path (<pci*/*/net/eth*>) {
# pci0000:00/0000:00:08.0/net/eth0
# pci0000:00/0000:00:03.0/virtio0
my ($pci_bus, $pci_id, $junk, $ethernet_id) = split("/", $ethernet_path, 4);
# goal: virtio0 00:03.0
$pci_id =~ s/^\d{4}://;
print "$ethernet_id $pci_id\n";
}
}
my $options = {};
getopts("cp", $options) or die "invalid options provided: usage: $0 [-c] [-p]";
if ($options->{'c'}) {
find_cpu_core_mask();
}
elsif ($options->{'p'}) {
find_ethernet_pci_ids();
}
else {
die "no options provided: usage: $0 [-c] [-p]";
}
exit(0);
| TidyHuang/sdn_sensor | scripts/find-dpdk-settings.pl | Perl | mit | 1,071 |
package Google::Ads::AdWords::v201409::Address;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201409' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %streetAddress_of :ATTR(:get<streetAddress>);
my %streetAddress2_of :ATTR(:get<streetAddress2>);
my %cityName_of :ATTR(:get<cityName>);
my %provinceCode_of :ATTR(:get<provinceCode>);
my %provinceName_of :ATTR(:get<provinceName>);
my %postalCode_of :ATTR(:get<postalCode>);
my %countryCode_of :ATTR(:get<countryCode>);
__PACKAGE__->_factory(
[ qw( streetAddress
streetAddress2
cityName
provinceCode
provinceName
postalCode
countryCode
) ],
{
'streetAddress' => \%streetAddress_of,
'streetAddress2' => \%streetAddress2_of,
'cityName' => \%cityName_of,
'provinceCode' => \%provinceCode_of,
'provinceName' => \%provinceName_of,
'postalCode' => \%postalCode_of,
'countryCode' => \%countryCode_of,
},
{
'streetAddress' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'streetAddress2' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'cityName' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'provinceCode' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'provinceName' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'postalCode' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'countryCode' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
},
{
'streetAddress' => 'streetAddress',
'streetAddress2' => 'streetAddress2',
'cityName' => 'cityName',
'provinceCode' => 'provinceCode',
'provinceName' => 'provinceName',
'postalCode' => 'postalCode',
'countryCode' => 'countryCode',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201409::Address
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
Address from the namespace https://adwords.google.com/api/adwords/cm/v201409.
Structure to specify an address location.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * streetAddress
=item * streetAddress2
=item * cityName
=item * provinceCode
=item * provinceName
=item * postalCode
=item * countryCode
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201409/Address.pm | Perl | apache-2.0 | 2,798 |
#!/usr/bin/perl
# Run this script first.
# Create a list of entities which need CPF files created. We don't have all the AnF data, but we know from
# cpfRelations in data we do have that other entities exist.
# Creates an output file frozen_data.dat which is a hash of hashes of all the cpfRelations, including those we
# have files for an those we don't know about.
# Use the extra file anf-20150617-entityTypesForSNAC.csv with entity types from AnF query or script (and not
# from a corporateBody keyword guess).
# Run this before cpf_norm.pl, which now reads frozen_data.dat to get the extra entityType info for files we
# don't have.
# Error messages are easier (possible) to find when stderr is in a separate file.
# ./parse_anf.pl > pa.log 2> err.log &
# > grep 'has_file => 1' pa.log| wc -l
# 1832
# > ls working_files/ | wc -l
# 2350
# > less pa.log
# > grep 'fileinfo:' pa.log | wc -l
# 2350
# > grep 'uid ' ra.log| wc -l
# 1699
# > ls snac_files/ | wc -l
# 4049
# http://search.cpan.org/~abw/Template-Toolkit-2.26/lib/Template.pm
# http://search.cpan.org/~abw/Template-Toolkit-2.26/lib/Template/Tutorial/Datafile.pod
use strict;
use Storable;
use session_lib qw(:all);
use XML::LibXML;
use Template;
use DateTime;
use Data::Dumper;
require 'anf.pm';
use av; # attribute value controlled vocabulary, directly from av.xsl
my %av = %av::av;
# support utf8 strings
use utf8;
# Added apr 28 2015 due to Wide character in print at ./parse_whitman.pl line 54. Seems to have fixed the
# problem. Note that when using libxml to serialize XML which is already utf8, this would double encode the
# output.
# apr 30 2015. When using normal stdout and normal strings (even strings from libxml), the output is identical
# regardless of stdout being utf8 or not. So, it might as well stay commented out.
# use open qw(:std :utf8);
my $dbh; # simplest to just make the database handle global
my $db_name = "catalog.db"; # global db name so we can have sql logging, if we want it.
$| = 1; # unbuffer stdout
main();
exit();
sub main
{
# $dbh = sqlite_db_handle($db_name);
my $parser = XML::LibXML->new();
my $xpc = XML::LibXML::XPathContext->new(); # No argument here!
reg_namespaces($xpc);
my @files = `find working_files/ -type f`;
chomp(@files);
my $xx = 0;
my %flookup;
foreach my $file (@files)
{
print "Adding " . basename($file) . "\n";
$flookup{basename($file)} = 1;
}
my %data; # put all the cpf relevant data into this hash, and later write to the db, or save to disk.
my %fileinfo; # hash of all info about all files, necessary to fill in gaps for some ark id values
foreach my $file (@files)
{
my $doc = $parser->parse_file($file);
# <cpfRelation xmlns:xlink="http://www.w3.org/1999/xlink"
# cpfRelationType="associative"
# xlink:href="FRAN_NP_010938"
# xlink:type="simple">
# <relationEntry>Pinel, Jean-Jacques</relationEntry>
# <dateRange>
# <fromDate standardDate="1998-09-02">2 septembre 1998</fromDate>
# </dateRange>
# </cpfRelation>
# We need the orig entityType because we have unknown cpfRelations can sometimes be determined based
# on the original entityType and the cpfRelationType. This includes: hierarchical-child,
# hierarchical-parent, temporal-later, temporal-earlier.
my %relh;
$relh{entityType}
= trim(
$xpc->findvalue(
'/eac:eac-cpf/eac:cpfDescription/eac:identity/eac:entityType',
$doc));
$relh{file} = basename($file);
# existDates could have a single date, which uses a different xpath.
$relh{fromDate}
= trim(
$xpc->findvalue(
'/eac:eac-cpf/eac:cpfDescription/eac:description/eac:existDates/eac:dateRange/eac:fromDate/@standardDate',
$doc));
$relh{toDate}
= trim(
$xpc->findvalue(
'/eac:eac-cpf/eac:cpfDescription/eac:description/eac:existDates/eac:dateRange/eac:toDate/@standardDate',
$doc));
$relh{part}
= trim(
$xpc->findvalue(
'/eac:eac-cpf/eac:cpfDescription/eac:identity/eac:nameEntry[1]/eac:part',
$doc));
if (! exists($fileinfo{$relh{file}}))
{
my %fi_copy = %relh;
$fileinfo{$relh{file}} = \%fi_copy;
}
my $href = '';
foreach my $node ($xpc->findnodes('/eac:eac-cpf/eac:cpfDescription/eac:relations/eac:cpfRelation', $doc))
{
$relh{cpfRelationType} = $node->findvalue('@cpfRelationType');
my %fch;
$fch{relationEntry} = $xpc->findvalue('eac:relationEntry', $node);
$fch{fromDate} = $xpc->findvalue('eac:dateRange/eac:fromDate/@standardDate', $node);
$fch{toDate} = $xpc->findvalue('eac:dateRange/eac:toDate/@standardDate', $node);
$href = $node->findvalue('@xlink:href');
$fch{xhref} = $href; # sort of a duplicate key, but probably simplifies access later.
$fch{has_file} = 0;
if (exists($flookup{$href}))
{
$fch{has_file} = 1;
}
$fch{using_algo_entityType} = 'yes'; # just a reminder to anyone looking at the log files
$fch{entityType} = '';
# normalize-space on everything.
foreach my $key (keys(%fch))
{
# Don't trim list of hash. Intestingly, triming the list seemed to have done no harm.
if ($key ne 'fc_identity')
{
$fch{$key} = trim($fch{$key});
}
}
if (! exists($data{$href}))
{
# push(@{$fch{cpfRelationType}}, $node->findvalue('@cpfRelationType'));
# Copy the hash so we have separate hash references for each iteration otherwise future
# iterations overwrite previous iterations keys in the hash reference.
my %hcopy = %relh;
push(@{$fch{orig}}, \%hcopy);
$data{$href} = \%fch;
}
else
{
# push(@{$fch{cpfRelationType}}, $node->findvalue('@cpfRelationType'));
# Copy the hash so we have separate hash references for each iteration otherwise future
# iterations overwrite previous iterations keys in the hash reference.
my %hcopy = %relh;
push(@{$data{$href}{orig}}, \%hcopy);
print "Additional $href in file $file\n";
}
# print stuff out to a log file.
# print "$href\n" . Dumper(\%{$data{$href}});
}
# # Also a trim on the names (and other values) in the fc_identity list of hashes.
# foreach my $fci (@{$data{fc_identity}})
# {
# foreach my $key (keys(%{$fci}))
# {
# $fci->{$key} = trim($fci->{$key});
# }
# }
# saving data does lots of things, but all SQL related
# sql_save_data(\%data);
$xx++;
}
print "Parsed $xx files\n";
# To find the known corp type, we have to wait until all files have been examined. We set the known corp
# types here is possible, and if not, we simply set the entityType to the best guess from
# find_entity_type(); Both functions are in anf.pm.
foreach my $fkey (keys(%data))
{
# $fkey is both the key and the href/ark. "href" being a link, not a hashref. Non-obvious key and
# variable name.
my $href = $fkey;
my $is_known_type = known_entity_type($href, \%fileinfo);
print "fi: $fkey is_known_type: $is_known_type\n";
if ($is_known_type)
{
$data{$fkey}{entityType} = $is_known_type;
$data{$fkey}{using_algo_entityType} = 'is_known_type';
}
else
{
# The first check for entityType failed, so check being a corporation via relation.
foreach my $hr (@{$data{$fkey}{orig}})
{
if ($hr->{entityType} eq 'corporateBody' &&
is_corp_rel($hr->{cpfRelationType}))
{
$is_known_type = 'corporateBody';
$data{$fkey}{entityType} = 'corporateBody';
$data{$fkey}{using_algo_entityType} = 'corp_by_relation';
last;
}
}
}
# If we still don't have a known type, then call find_entity_type();
if( ! $is_known_type)
{
$data{$fkey}{entityType} = find_entity_type($href, $data{$fkey}{relationEntry});
}
}
# Fix entries in %data, which has all the cpfRelation entites. Find entities for which there is an id
# value in anf-20150617-entityTypesForSNAC.csv, and update the {entityType} and
# {using_algo_entityType}. The hash %data is used by all downstream code to look up and create CPF files
# and fix cpfRelation elements.
my $extra_info_fn = "anf-20150617-entityTypesForSNAC.csv";
my $extra;
if (! open($extra, "<", $extra_info_fn))
{
print "Error: can't open $extra_info_fn for reading\n";
exit(1);
}
my $discard_header = <$extra>;
while(my $temp = <$extra>)
{
chomp($temp);
$temp =~ s/\s+$//smg; # Remove any stuff like trailing ^M. Bummer that chomp() doesn't do this.
my ($file_id, $en_type) = split("\t", $temp);
if (exists($data{$file_id}))
{
my $tmp_en = $data{$file_id}->{entityType};
$data{$file_id}->{entityType} = $en_type;
$data{$file_id}->{using_algo_entityType} = 'anf-csv-file';
if ($tmp_en ne $en_type)
{
print "setting: updated: $file_id name: $data{$file_id}->{relationEntry} entityType: old: $tmp_en (" . length($tmp_en) .")new: $en_type (" . length($tmp_en) . ")\n";
}
else
{
print "setting: unchanged: $file_id entityType: old: $tmp_en new: $en_type\n";
}
}
else
{
print "warning: ndata: no key $file_id in hash \%data\n";
# $data{$file_id}->{entityType} = $en_type;
# $data{$file_id}->{using_algo_entityType} = 'anf-csv-file';
# print "Adding: data added key: $file_id entityType: $en_type\n";
}
}
my $out_file = 'frozen_data.dat';
# my $frozen = freeze(\%data);
# if (! open(OUT, ">", $out_file))
# {
# print STDERR ("runt_compile.pl compile(): $@\ncan't open $out_file for output\n");
# exit(1);
# }
# print OUT $frozen;
# close(OUT);
# my %data_new = %{thaw(read_file($out_file))};
store \%data, $out_file;
print "Done storing\n";
my $data_ref = retrieve($out_file);
my %data_new = %{$data_ref};
print "Done retrieving\n";
$Data::Dumper::Terse = 1;
{
use open qw(:std :utf8);
foreach my $key (keys(%data_new))
{
print "\nkey: $key\n";
my $href = $data_new{$key};
foreach my $hkey (keys(%{$href}))
{
if ($hkey eq 'orig')
{
# do nothing now, but output {orig} later.
}
# elsif ($hkey eq 'cpfRelationType')
# {
# print " $hkey => " . join(', ', @{$href->{$hkey}}) . "\n";
# }
else
{
print " $hkey => $href->{$hkey}\n";
}
}
# orig is a list of hash
foreach my $hr (@{$href->{orig}})
{
foreach my $key (keys(%{$hr}))
{
print " orig $hr->{file} => $key => $hr->{$key}\n";
}
}
}
foreach my $key (keys(%fileinfo))
{
print "fileinfo: $key\n";
foreach my $ikey (keys(%{$fileinfo{$key}}))
{
print " $ikey => $fileinfo{$key}{$ikey}\n";
}
}
}
# my $single = $data_new{'FRAN_NP_050368'};
# foreach my $key (keys(%{$single}))
# {
# print "$key: $single->{$key}\n";
# }
} # end main
# Get the unique id from a file name.
sub basename
{
my $basename = $_[0];
$basename =~ s/^.*\/(.*?)\.xml$/$1/;
return $basename;
}
sub reg_namespaces
{
my $xpc = $_[0];
$xpc->registerNs('eac',
'urn:isbn:1-931666-33-4');
$xpc->registerNs('xlink',
'http://www.w3.org/1999/xlink');
$xpc->registerNs('xsi',
'http://www.w3.org/2001/XMLSchema-instance');
$xpc->registerNs('wwa',
'http://www.whitmanarchive.org/namespace');
}
# Leave utf8 chars, but strip anything punctuation-like except space.
# perl -ne 'm/name_key = (.*)$/; print "$1";' unique_names.txt | grep -Po '[^a-zA-Z]' | sort -u
# -
# ,
# ?
# .
# '
# [
# ]
# &
# ü
sub normal_for_match
{
my $str = $_[0];
$str = lc($str);
$str =~ s/[\,\.\&\-\[\]\?\']/ /g;
return trim($str);
}
# Leave utf8 chars, but strip anything punctuation-like except space.
# perl -ne 'm/name_key = (.*)$/; print "$1";' unique_names.txt | grep -Po '[^a-zA-Z]' | sort -u
# -
# ,
# ?
# .
# '
# [
# ]
# &
# ü
sub trim
{
$_[0] =~ s/^\s+//smg;
$_[0] =~ s/\s+$//smg;
$_[0] =~ s/\s+/ /smg;
# Replace unicode hex 2014 em dash, and hex 2013 en dash with a hyphen.
$_ =~ s/—|–/-/smg;
return $_[0];
}
| snac-cooperative/snac_eac_cpf_utils | anf/parse_anf.pl | Perl | apache-2.0 | 13,988 |
package Paws::IAM::GetContextKeysForPolicyResponse;
use Moose;
has ContextKeyNames => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::IAM::GetContextKeysForPolicyResponse
=head1 ATTRIBUTES
=head2 ContextKeyNames => ArrayRef[Str|Undef]
The list of context keys that are referenced in the input policies.
=head2 _request_id => Str
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/IAM/GetContextKeysForPolicyResponse.pm | Perl | apache-2.0 | 455 |
package #
Date::Manip::TZ::eudubl00;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 10:41:42 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,1,23,35,0],'-00:25:00',[0,-25,0],
'LMT',0,[1880,8,2,0,24,59],[1880,8,1,23,59,59],
'0001010200:00:00','0001010123:35:00','1880080200:24:59','1880080123:59:59' ],
],
1880 =>
[
[ [1880,8,2,0,25,0],[1880,8,1,23,59,39],'-00:25:21',[0,-25,-21],
'DMT',0,[1916,5,21,2,25,20],[1916,5,21,1,59,59],
'1880080200:25:00','1880080123:59:39','1916052102:25:20','1916052101:59:59' ],
],
1916 =>
[
[ [1916,5,21,2,25,21],[1916,5,21,3,0,0],'+00:34:39',[0,34,39],
'IST',1,[1916,10,1,2,25,20],[1916,10,1,2,59,59],
'1916052102:25:21','1916052103:00:00','1916100102:25:20','1916100102:59:59' ],
[ [1916,10,1,2,25,21],[1916,10,1,2,25,21],'+00:00:00',[0,0,0],
'GMT',0,[1917,4,8,1,59,59],[1917,4,8,1,59,59],
'1916100102:25:21','1916100102:25:21','1917040801:59:59','1917040801:59:59' ],
],
1917 =>
[
[ [1917,4,8,2,0,0],[1917,4,8,3,0,0],'+01:00:00',[1,0,0],
'BST',1,[1917,9,17,1,59,59],[1917,9,17,2,59,59],
'1917040802:00:00','1917040803:00:00','1917091701:59:59','1917091702:59:59' ],
[ [1917,9,17,2,0,0],[1917,9,17,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1918,3,24,1,59,59],[1918,3,24,1,59,59],
'1917091702:00:00','1917091702:00:00','1918032401:59:59','1918032401:59:59' ],
],
1918 =>
[
[ [1918,3,24,2,0,0],[1918,3,24,3,0,0],'+01:00:00',[1,0,0],
'BST',1,[1918,9,30,1,59,59],[1918,9,30,2,59,59],
'1918032402:00:00','1918032403:00:00','1918093001:59:59','1918093002:59:59' ],
[ [1918,9,30,2,0,0],[1918,9,30,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1919,3,30,1,59,59],[1919,3,30,1,59,59],
'1918093002:00:00','1918093002:00:00','1919033001:59:59','1919033001:59:59' ],
],
1919 =>
[
[ [1919,3,30,2,0,0],[1919,3,30,3,0,0],'+01:00:00',[1,0,0],
'BST',1,[1919,9,29,1,59,59],[1919,9,29,2,59,59],
'1919033002:00:00','1919033003:00:00','1919092901:59:59','1919092902:59:59' ],
[ [1919,9,29,2,0,0],[1919,9,29,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1920,3,28,1,59,59],[1920,3,28,1,59,59],
'1919092902:00:00','1919092902:00:00','1920032801:59:59','1920032801:59:59' ],
],
1920 =>
[
[ [1920,3,28,2,0,0],[1920,3,28,3,0,0],'+01:00:00',[1,0,0],
'BST',1,[1920,10,25,1,59,59],[1920,10,25,2,59,59],
'1920032802:00:00','1920032803:00:00','1920102501:59:59','1920102502:59:59' ],
[ [1920,10,25,2,0,0],[1920,10,25,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1921,4,3,1,59,59],[1921,4,3,1,59,59],
'1920102502:00:00','1920102502:00:00','1921040301:59:59','1921040301:59:59' ],
],
1921 =>
[
[ [1921,4,3,2,0,0],[1921,4,3,3,0,0],'+01:00:00',[1,0,0],
'BST',1,[1921,10,3,1,59,59],[1921,10,3,2,59,59],
'1921040302:00:00','1921040303:00:00','1921100301:59:59','1921100302:59:59' ],
[ [1921,10,3,2,0,0],[1921,10,3,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1922,3,26,1,59,59],[1922,3,26,1,59,59],
'1921100302:00:00','1921100302:00:00','1922032601:59:59','1922032601:59:59' ],
],
1922 =>
[
[ [1922,3,26,2,0,0],[1922,3,26,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1922,10,8,1,59,59],[1922,10,8,2,59,59],
'1922032602:00:00','1922032603:00:00','1922100801:59:59','1922100802:59:59' ],
[ [1922,10,8,2,0,0],[1922,10,8,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1923,4,22,1,59,59],[1923,4,22,1,59,59],
'1922100802:00:00','1922100802:00:00','1923042201:59:59','1923042201:59:59' ],
],
1923 =>
[
[ [1923,4,22,2,0,0],[1923,4,22,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1923,9,16,1,59,59],[1923,9,16,2,59,59],
'1923042202:00:00','1923042203:00:00','1923091601:59:59','1923091602:59:59' ],
[ [1923,9,16,2,0,0],[1923,9,16,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1924,4,13,1,59,59],[1924,4,13,1,59,59],
'1923091602:00:00','1923091602:00:00','1924041301:59:59','1924041301:59:59' ],
],
1924 =>
[
[ [1924,4,13,2,0,0],[1924,4,13,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1924,9,21,1,59,59],[1924,9,21,2,59,59],
'1924041302:00:00','1924041303:00:00','1924092101:59:59','1924092102:59:59' ],
[ [1924,9,21,2,0,0],[1924,9,21,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1925,4,19,1,59,59],[1925,4,19,1,59,59],
'1924092102:00:00','1924092102:00:00','1925041901:59:59','1925041901:59:59' ],
],
1925 =>
[
[ [1925,4,19,2,0,0],[1925,4,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1925,10,4,1,59,59],[1925,10,4,2,59,59],
'1925041902:00:00','1925041903:00:00','1925100401:59:59','1925100402:59:59' ],
[ [1925,10,4,2,0,0],[1925,10,4,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1926,4,18,1,59,59],[1926,4,18,1,59,59],
'1925100402:00:00','1925100402:00:00','1926041801:59:59','1926041801:59:59' ],
],
1926 =>
[
[ [1926,4,18,2,0,0],[1926,4,18,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1926,10,3,1,59,59],[1926,10,3,2,59,59],
'1926041802:00:00','1926041803:00:00','1926100301:59:59','1926100302:59:59' ],
[ [1926,10,3,2,0,0],[1926,10,3,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1927,4,10,1,59,59],[1927,4,10,1,59,59],
'1926100302:00:00','1926100302:00:00','1927041001:59:59','1927041001:59:59' ],
],
1927 =>
[
[ [1927,4,10,2,0,0],[1927,4,10,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1927,10,2,1,59,59],[1927,10,2,2,59,59],
'1927041002:00:00','1927041003:00:00','1927100201:59:59','1927100202:59:59' ],
[ [1927,10,2,2,0,0],[1927,10,2,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1928,4,22,1,59,59],[1928,4,22,1,59,59],
'1927100202:00:00','1927100202:00:00','1928042201:59:59','1928042201:59:59' ],
],
1928 =>
[
[ [1928,4,22,2,0,0],[1928,4,22,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1928,10,7,1,59,59],[1928,10,7,2,59,59],
'1928042202:00:00','1928042203:00:00','1928100701:59:59','1928100702:59:59' ],
[ [1928,10,7,2,0,0],[1928,10,7,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1929,4,21,1,59,59],[1929,4,21,1,59,59],
'1928100702:00:00','1928100702:00:00','1929042101:59:59','1929042101:59:59' ],
],
1929 =>
[
[ [1929,4,21,2,0,0],[1929,4,21,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1929,10,6,1,59,59],[1929,10,6,2,59,59],
'1929042102:00:00','1929042103:00:00','1929100601:59:59','1929100602:59:59' ],
[ [1929,10,6,2,0,0],[1929,10,6,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1930,4,13,1,59,59],[1930,4,13,1,59,59],
'1929100602:00:00','1929100602:00:00','1930041301:59:59','1930041301:59:59' ],
],
1930 =>
[
[ [1930,4,13,2,0,0],[1930,4,13,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1930,10,5,1,59,59],[1930,10,5,2,59,59],
'1930041302:00:00','1930041303:00:00','1930100501:59:59','1930100502:59:59' ],
[ [1930,10,5,2,0,0],[1930,10,5,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1931,4,19,1,59,59],[1931,4,19,1,59,59],
'1930100502:00:00','1930100502:00:00','1931041901:59:59','1931041901:59:59' ],
],
1931 =>
[
[ [1931,4,19,2,0,0],[1931,4,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1931,10,4,1,59,59],[1931,10,4,2,59,59],
'1931041902:00:00','1931041903:00:00','1931100401:59:59','1931100402:59:59' ],
[ [1931,10,4,2,0,0],[1931,10,4,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1932,4,17,1,59,59],[1932,4,17,1,59,59],
'1931100402:00:00','1931100402:00:00','1932041701:59:59','1932041701:59:59' ],
],
1932 =>
[
[ [1932,4,17,2,0,0],[1932,4,17,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1932,10,2,1,59,59],[1932,10,2,2,59,59],
'1932041702:00:00','1932041703:00:00','1932100201:59:59','1932100202:59:59' ],
[ [1932,10,2,2,0,0],[1932,10,2,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1933,4,9,1,59,59],[1933,4,9,1,59,59],
'1932100202:00:00','1932100202:00:00','1933040901:59:59','1933040901:59:59' ],
],
1933 =>
[
[ [1933,4,9,2,0,0],[1933,4,9,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1933,10,8,1,59,59],[1933,10,8,2,59,59],
'1933040902:00:00','1933040903:00:00','1933100801:59:59','1933100802:59:59' ],
[ [1933,10,8,2,0,0],[1933,10,8,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1934,4,22,1,59,59],[1934,4,22,1,59,59],
'1933100802:00:00','1933100802:00:00','1934042201:59:59','1934042201:59:59' ],
],
1934 =>
[
[ [1934,4,22,2,0,0],[1934,4,22,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1934,10,7,1,59,59],[1934,10,7,2,59,59],
'1934042202:00:00','1934042203:00:00','1934100701:59:59','1934100702:59:59' ],
[ [1934,10,7,2,0,0],[1934,10,7,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1935,4,14,1,59,59],[1935,4,14,1,59,59],
'1934100702:00:00','1934100702:00:00','1935041401:59:59','1935041401:59:59' ],
],
1935 =>
[
[ [1935,4,14,2,0,0],[1935,4,14,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1935,10,6,1,59,59],[1935,10,6,2,59,59],
'1935041402:00:00','1935041403:00:00','1935100601:59:59','1935100602:59:59' ],
[ [1935,10,6,2,0,0],[1935,10,6,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1936,4,19,1,59,59],[1936,4,19,1,59,59],
'1935100602:00:00','1935100602:00:00','1936041901:59:59','1936041901:59:59' ],
],
1936 =>
[
[ [1936,4,19,2,0,0],[1936,4,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1936,10,4,1,59,59],[1936,10,4,2,59,59],
'1936041902:00:00','1936041903:00:00','1936100401:59:59','1936100402:59:59' ],
[ [1936,10,4,2,0,0],[1936,10,4,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1937,4,18,1,59,59],[1937,4,18,1,59,59],
'1936100402:00:00','1936100402:00:00','1937041801:59:59','1937041801:59:59' ],
],
1937 =>
[
[ [1937,4,18,2,0,0],[1937,4,18,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1937,10,3,1,59,59],[1937,10,3,2,59,59],
'1937041802:00:00','1937041803:00:00','1937100301:59:59','1937100302:59:59' ],
[ [1937,10,3,2,0,0],[1937,10,3,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1938,4,10,1,59,59],[1938,4,10,1,59,59],
'1937100302:00:00','1937100302:00:00','1938041001:59:59','1938041001:59:59' ],
],
1938 =>
[
[ [1938,4,10,2,0,0],[1938,4,10,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1938,10,2,1,59,59],[1938,10,2,2,59,59],
'1938041002:00:00','1938041003:00:00','1938100201:59:59','1938100202:59:59' ],
[ [1938,10,2,2,0,0],[1938,10,2,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1939,4,16,1,59,59],[1939,4,16,1,59,59],
'1938100202:00:00','1938100202:00:00','1939041601:59:59','1939041601:59:59' ],
],
1939 =>
[
[ [1939,4,16,2,0,0],[1939,4,16,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1939,11,19,1,59,59],[1939,11,19,2,59,59],
'1939041602:00:00','1939041603:00:00','1939111901:59:59','1939111902:59:59' ],
[ [1939,11,19,2,0,0],[1939,11,19,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1940,2,25,1,59,59],[1940,2,25,1,59,59],
'1939111902:00:00','1939111902:00:00','1940022501:59:59','1940022501:59:59' ],
],
1940 =>
[
[ [1940,2,25,2,0,0],[1940,2,25,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1946,10,6,0,59,59],[1946,10,6,1,59,59],
'1940022502:00:00','1940022503:00:00','1946100600:59:59','1946100601:59:59' ],
],
1946 =>
[
[ [1946,10,6,1,0,0],[1946,10,6,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1947,3,16,1,59,59],[1947,3,16,1,59,59],
'1946100601:00:00','1946100601:00:00','1947031601:59:59','1947031601:59:59' ],
],
1947 =>
[
[ [1947,3,16,2,0,0],[1947,3,16,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1947,11,2,0,59,59],[1947,11,2,1,59,59],
'1947031602:00:00','1947031603:00:00','1947110200:59:59','1947110201:59:59' ],
[ [1947,11,2,1,0,0],[1947,11,2,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1948,4,18,1,59,59],[1948,4,18,1,59,59],
'1947110201:00:00','1947110201:00:00','1948041801:59:59','1948041801:59:59' ],
],
1948 =>
[
[ [1948,4,18,2,0,0],[1948,4,18,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1948,10,31,1,59,59],[1948,10,31,2,59,59],
'1948041802:00:00','1948041803:00:00','1948103101:59:59','1948103102:59:59' ],
[ [1948,10,31,2,0,0],[1948,10,31,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1949,4,3,1,59,59],[1949,4,3,1,59,59],
'1948103102:00:00','1948103102:00:00','1949040301:59:59','1949040301:59:59' ],
],
1949 =>
[
[ [1949,4,3,2,0,0],[1949,4,3,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1949,10,30,1,59,59],[1949,10,30,2,59,59],
'1949040302:00:00','1949040303:00:00','1949103001:59:59','1949103002:59:59' ],
[ [1949,10,30,2,0,0],[1949,10,30,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1950,4,16,1,59,59],[1950,4,16,1,59,59],
'1949103002:00:00','1949103002:00:00','1950041601:59:59','1950041601:59:59' ],
],
1950 =>
[
[ [1950,4,16,2,0,0],[1950,4,16,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1950,10,22,1,59,59],[1950,10,22,2,59,59],
'1950041602:00:00','1950041603:00:00','1950102201:59:59','1950102202:59:59' ],
[ [1950,10,22,2,0,0],[1950,10,22,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1951,4,15,1,59,59],[1951,4,15,1,59,59],
'1950102202:00:00','1950102202:00:00','1951041501:59:59','1951041501:59:59' ],
],
1951 =>
[
[ [1951,4,15,2,0,0],[1951,4,15,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1951,10,21,1,59,59],[1951,10,21,2,59,59],
'1951041502:00:00','1951041503:00:00','1951102101:59:59','1951102102:59:59' ],
[ [1951,10,21,2,0,0],[1951,10,21,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1952,4,20,1,59,59],[1952,4,20,1,59,59],
'1951102102:00:00','1951102102:00:00','1952042001:59:59','1952042001:59:59' ],
],
1952 =>
[
[ [1952,4,20,2,0,0],[1952,4,20,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1952,10,26,1,59,59],[1952,10,26,2,59,59],
'1952042002:00:00','1952042003:00:00','1952102601:59:59','1952102602:59:59' ],
[ [1952,10,26,2,0,0],[1952,10,26,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1953,4,19,1,59,59],[1953,4,19,1,59,59],
'1952102602:00:00','1952102602:00:00','1953041901:59:59','1953041901:59:59' ],
],
1953 =>
[
[ [1953,4,19,2,0,0],[1953,4,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1953,10,4,1,59,59],[1953,10,4,2,59,59],
'1953041902:00:00','1953041903:00:00','1953100401:59:59','1953100402:59:59' ],
[ [1953,10,4,2,0,0],[1953,10,4,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1954,4,11,1,59,59],[1954,4,11,1,59,59],
'1953100402:00:00','1953100402:00:00','1954041101:59:59','1954041101:59:59' ],
],
1954 =>
[
[ [1954,4,11,2,0,0],[1954,4,11,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1954,10,3,1,59,59],[1954,10,3,2,59,59],
'1954041102:00:00','1954041103:00:00','1954100301:59:59','1954100302:59:59' ],
[ [1954,10,3,2,0,0],[1954,10,3,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1955,4,17,1,59,59],[1955,4,17,1,59,59],
'1954100302:00:00','1954100302:00:00','1955041701:59:59','1955041701:59:59' ],
],
1955 =>
[
[ [1955,4,17,2,0,0],[1955,4,17,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1955,10,2,1,59,59],[1955,10,2,2,59,59],
'1955041702:00:00','1955041703:00:00','1955100201:59:59','1955100202:59:59' ],
[ [1955,10,2,2,0,0],[1955,10,2,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1956,4,22,1,59,59],[1956,4,22,1,59,59],
'1955100202:00:00','1955100202:00:00','1956042201:59:59','1956042201:59:59' ],
],
1956 =>
[
[ [1956,4,22,2,0,0],[1956,4,22,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1956,10,7,1,59,59],[1956,10,7,2,59,59],
'1956042202:00:00','1956042203:00:00','1956100701:59:59','1956100702:59:59' ],
[ [1956,10,7,2,0,0],[1956,10,7,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1957,4,14,1,59,59],[1957,4,14,1,59,59],
'1956100702:00:00','1956100702:00:00','1957041401:59:59','1957041401:59:59' ],
],
1957 =>
[
[ [1957,4,14,2,0,0],[1957,4,14,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1957,10,6,1,59,59],[1957,10,6,2,59,59],
'1957041402:00:00','1957041403:00:00','1957100601:59:59','1957100602:59:59' ],
[ [1957,10,6,2,0,0],[1957,10,6,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1958,4,20,1,59,59],[1958,4,20,1,59,59],
'1957100602:00:00','1957100602:00:00','1958042001:59:59','1958042001:59:59' ],
],
1958 =>
[
[ [1958,4,20,2,0,0],[1958,4,20,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1958,10,5,1,59,59],[1958,10,5,2,59,59],
'1958042002:00:00','1958042003:00:00','1958100501:59:59','1958100502:59:59' ],
[ [1958,10,5,2,0,0],[1958,10,5,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1959,4,19,1,59,59],[1959,4,19,1,59,59],
'1958100502:00:00','1958100502:00:00','1959041901:59:59','1959041901:59:59' ],
],
1959 =>
[
[ [1959,4,19,2,0,0],[1959,4,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1959,10,4,1,59,59],[1959,10,4,2,59,59],
'1959041902:00:00','1959041903:00:00','1959100401:59:59','1959100402:59:59' ],
[ [1959,10,4,2,0,0],[1959,10,4,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1960,4,10,1,59,59],[1960,4,10,1,59,59],
'1959100402:00:00','1959100402:00:00','1960041001:59:59','1960041001:59:59' ],
],
1960 =>
[
[ [1960,4,10,2,0,0],[1960,4,10,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1960,10,2,1,59,59],[1960,10,2,2,59,59],
'1960041002:00:00','1960041003:00:00','1960100201:59:59','1960100202:59:59' ],
[ [1960,10,2,2,0,0],[1960,10,2,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1961,3,26,1,59,59],[1961,3,26,1,59,59],
'1960100202:00:00','1960100202:00:00','1961032601:59:59','1961032601:59:59' ],
],
1961 =>
[
[ [1961,3,26,2,0,0],[1961,3,26,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1961,10,29,1,59,59],[1961,10,29,2,59,59],
'1961032602:00:00','1961032603:00:00','1961102901:59:59','1961102902:59:59' ],
[ [1961,10,29,2,0,0],[1961,10,29,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1962,3,25,1,59,59],[1962,3,25,1,59,59],
'1961102902:00:00','1961102902:00:00','1962032501:59:59','1962032501:59:59' ],
],
1962 =>
[
[ [1962,3,25,2,0,0],[1962,3,25,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1962,10,28,1,59,59],[1962,10,28,2,59,59],
'1962032502:00:00','1962032503:00:00','1962102801:59:59','1962102802:59:59' ],
[ [1962,10,28,2,0,0],[1962,10,28,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1963,3,31,1,59,59],[1963,3,31,1,59,59],
'1962102802:00:00','1962102802:00:00','1963033101:59:59','1963033101:59:59' ],
],
1963 =>
[
[ [1963,3,31,2,0,0],[1963,3,31,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1963,10,27,1,59,59],[1963,10,27,2,59,59],
'1963033102:00:00','1963033103:00:00','1963102701:59:59','1963102702:59:59' ],
[ [1963,10,27,2,0,0],[1963,10,27,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1964,3,22,1,59,59],[1964,3,22,1,59,59],
'1963102702:00:00','1963102702:00:00','1964032201:59:59','1964032201:59:59' ],
],
1964 =>
[
[ [1964,3,22,2,0,0],[1964,3,22,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1964,10,25,1,59,59],[1964,10,25,2,59,59],
'1964032202:00:00','1964032203:00:00','1964102501:59:59','1964102502:59:59' ],
[ [1964,10,25,2,0,0],[1964,10,25,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1965,3,21,1,59,59],[1965,3,21,1,59,59],
'1964102502:00:00','1964102502:00:00','1965032101:59:59','1965032101:59:59' ],
],
1965 =>
[
[ [1965,3,21,2,0,0],[1965,3,21,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1965,10,24,1,59,59],[1965,10,24,2,59,59],
'1965032102:00:00','1965032103:00:00','1965102401:59:59','1965102402:59:59' ],
[ [1965,10,24,2,0,0],[1965,10,24,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1966,3,20,1,59,59],[1966,3,20,1,59,59],
'1965102402:00:00','1965102402:00:00','1966032001:59:59','1966032001:59:59' ],
],
1966 =>
[
[ [1966,3,20,2,0,0],[1966,3,20,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1966,10,23,1,59,59],[1966,10,23,2,59,59],
'1966032002:00:00','1966032003:00:00','1966102301:59:59','1966102302:59:59' ],
[ [1966,10,23,2,0,0],[1966,10,23,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1967,3,19,1,59,59],[1967,3,19,1,59,59],
'1966102302:00:00','1966102302:00:00','1967031901:59:59','1967031901:59:59' ],
],
1967 =>
[
[ [1967,3,19,2,0,0],[1967,3,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1967,10,29,1,59,59],[1967,10,29,2,59,59],
'1967031902:00:00','1967031903:00:00','1967102901:59:59','1967102902:59:59' ],
[ [1967,10,29,2,0,0],[1967,10,29,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1968,2,18,1,59,59],[1968,2,18,1,59,59],
'1967102902:00:00','1967102902:00:00','1968021801:59:59','1968021801:59:59' ],
],
1968 =>
[
[ [1968,2,18,2,0,0],[1968,2,18,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1968,10,26,22,59,59],[1968,10,26,23,59,59],
'1968021802:00:00','1968021803:00:00','1968102622:59:59','1968102623:59:59' ],
[ [1968,10,26,23,0,0],[1968,10,27,0,0,0],'+01:00:00',[1,0,0],
'IST',0,[1971,10,31,1,59,59],[1971,10,31,2,59,59],
'1968102623:00:00','1968102700:00:00','1971103101:59:59','1971103102:59:59' ],
],
1971 =>
[
[ [1971,10,31,2,0,0],[1971,10,31,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1972,3,19,1,59,59],[1972,3,19,1,59,59],
'1971103102:00:00','1971103102:00:00','1972031901:59:59','1972031901:59:59' ],
],
1972 =>
[
[ [1972,3,19,2,0,0],[1972,3,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1972,10,29,1,59,59],[1972,10,29,2,59,59],
'1972031902:00:00','1972031903:00:00','1972102901:59:59','1972102902:59:59' ],
[ [1972,10,29,2,0,0],[1972,10,29,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1973,3,18,1,59,59],[1973,3,18,1,59,59],
'1972102902:00:00','1972102902:00:00','1973031801:59:59','1973031801:59:59' ],
],
1973 =>
[
[ [1973,3,18,2,0,0],[1973,3,18,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1973,10,28,1,59,59],[1973,10,28,2,59,59],
'1973031802:00:00','1973031803:00:00','1973102801:59:59','1973102802:59:59' ],
[ [1973,10,28,2,0,0],[1973,10,28,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1974,3,17,1,59,59],[1974,3,17,1,59,59],
'1973102802:00:00','1973102802:00:00','1974031701:59:59','1974031701:59:59' ],
],
1974 =>
[
[ [1974,3,17,2,0,0],[1974,3,17,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1974,10,27,1,59,59],[1974,10,27,2,59,59],
'1974031702:00:00','1974031703:00:00','1974102701:59:59','1974102702:59:59' ],
[ [1974,10,27,2,0,0],[1974,10,27,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1975,3,16,1,59,59],[1975,3,16,1,59,59],
'1974102702:00:00','1974102702:00:00','1975031601:59:59','1975031601:59:59' ],
],
1975 =>
[
[ [1975,3,16,2,0,0],[1975,3,16,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1975,10,26,1,59,59],[1975,10,26,2,59,59],
'1975031602:00:00','1975031603:00:00','1975102601:59:59','1975102602:59:59' ],
[ [1975,10,26,2,0,0],[1975,10,26,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1976,3,21,1,59,59],[1976,3,21,1,59,59],
'1975102602:00:00','1975102602:00:00','1976032101:59:59','1976032101:59:59' ],
],
1976 =>
[
[ [1976,3,21,2,0,0],[1976,3,21,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1976,10,24,1,59,59],[1976,10,24,2,59,59],
'1976032102:00:00','1976032103:00:00','1976102401:59:59','1976102402:59:59' ],
[ [1976,10,24,2,0,0],[1976,10,24,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1977,3,20,1,59,59],[1977,3,20,1,59,59],
'1976102402:00:00','1976102402:00:00','1977032001:59:59','1977032001:59:59' ],
],
1977 =>
[
[ [1977,3,20,2,0,0],[1977,3,20,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1977,10,23,1,59,59],[1977,10,23,2,59,59],
'1977032002:00:00','1977032003:00:00','1977102301:59:59','1977102302:59:59' ],
[ [1977,10,23,2,0,0],[1977,10,23,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1978,3,19,1,59,59],[1978,3,19,1,59,59],
'1977102302:00:00','1977102302:00:00','1978031901:59:59','1978031901:59:59' ],
],
1978 =>
[
[ [1978,3,19,2,0,0],[1978,3,19,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1978,10,29,1,59,59],[1978,10,29,2,59,59],
'1978031902:00:00','1978031903:00:00','1978102901:59:59','1978102902:59:59' ],
[ [1978,10,29,2,0,0],[1978,10,29,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1979,3,18,1,59,59],[1979,3,18,1,59,59],
'1978102902:00:00','1978102902:00:00','1979031801:59:59','1979031801:59:59' ],
],
1979 =>
[
[ [1979,3,18,2,0,0],[1979,3,18,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1979,10,28,1,59,59],[1979,10,28,2,59,59],
'1979031802:00:00','1979031803:00:00','1979102801:59:59','1979102802:59:59' ],
[ [1979,10,28,2,0,0],[1979,10,28,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1980,3,16,1,59,59],[1980,3,16,1,59,59],
'1979102802:00:00','1979102802:00:00','1980031601:59:59','1980031601:59:59' ],
],
1980 =>
[
[ [1980,3,16,2,0,0],[1980,3,16,3,0,0],'+01:00:00',[1,0,0],
'IST',1,[1980,10,26,1,59,59],[1980,10,26,2,59,59],
'1980031602:00:00','1980031603:00:00','1980102601:59:59','1980102602:59:59' ],
[ [1980,10,26,2,0,0],[1980,10,26,2,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1981,3,29,0,59,59],[1981,3,29,0,59,59],
'1980102602:00:00','1980102602:00:00','1981032900:59:59','1981032900:59:59' ],
],
1981 =>
[
[ [1981,3,29,1,0,0],[1981,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1981,10,25,0,59,59],[1981,10,25,1,59,59],
'1981032901:00:00','1981032902:00:00','1981102500:59:59','1981102501:59:59' ],
[ [1981,10,25,1,0,0],[1981,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1982,3,28,0,59,59],[1982,3,28,0,59,59],
'1981102501:00:00','1981102501:00:00','1982032800:59:59','1982032800:59:59' ],
],
1982 =>
[
[ [1982,3,28,1,0,0],[1982,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1982,10,24,0,59,59],[1982,10,24,1,59,59],
'1982032801:00:00','1982032802:00:00','1982102400:59:59','1982102401:59:59' ],
[ [1982,10,24,1,0,0],[1982,10,24,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1983,3,27,0,59,59],[1983,3,27,0,59,59],
'1982102401:00:00','1982102401:00:00','1983032700:59:59','1983032700:59:59' ],
],
1983 =>
[
[ [1983,3,27,1,0,0],[1983,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1983,10,23,0,59,59],[1983,10,23,1,59,59],
'1983032701:00:00','1983032702:00:00','1983102300:59:59','1983102301:59:59' ],
[ [1983,10,23,1,0,0],[1983,10,23,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1984,3,25,0,59,59],[1984,3,25,0,59,59],
'1983102301:00:00','1983102301:00:00','1984032500:59:59','1984032500:59:59' ],
],
1984 =>
[
[ [1984,3,25,1,0,0],[1984,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1984,10,28,0,59,59],[1984,10,28,1,59,59],
'1984032501:00:00','1984032502:00:00','1984102800:59:59','1984102801:59:59' ],
[ [1984,10,28,1,0,0],[1984,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1985,3,31,0,59,59],[1985,3,31,0,59,59],
'1984102801:00:00','1984102801:00:00','1985033100:59:59','1985033100:59:59' ],
],
1985 =>
[
[ [1985,3,31,1,0,0],[1985,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1985,10,27,0,59,59],[1985,10,27,1,59,59],
'1985033101:00:00','1985033102:00:00','1985102700:59:59','1985102701:59:59' ],
[ [1985,10,27,1,0,0],[1985,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1986,3,30,0,59,59],[1986,3,30,0,59,59],
'1985102701:00:00','1985102701:00:00','1986033000:59:59','1986033000:59:59' ],
],
1986 =>
[
[ [1986,3,30,1,0,0],[1986,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1986,10,26,0,59,59],[1986,10,26,1,59,59],
'1986033001:00:00','1986033002:00:00','1986102600:59:59','1986102601:59:59' ],
[ [1986,10,26,1,0,0],[1986,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1987,3,29,0,59,59],[1987,3,29,0,59,59],
'1986102601:00:00','1986102601:00:00','1987032900:59:59','1987032900:59:59' ],
],
1987 =>
[
[ [1987,3,29,1,0,0],[1987,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1987,10,25,0,59,59],[1987,10,25,1,59,59],
'1987032901:00:00','1987032902:00:00','1987102500:59:59','1987102501:59:59' ],
[ [1987,10,25,1,0,0],[1987,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1988,3,27,0,59,59],[1988,3,27,0,59,59],
'1987102501:00:00','1987102501:00:00','1988032700:59:59','1988032700:59:59' ],
],
1988 =>
[
[ [1988,3,27,1,0,0],[1988,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1988,10,23,0,59,59],[1988,10,23,1,59,59],
'1988032701:00:00','1988032702:00:00','1988102300:59:59','1988102301:59:59' ],
[ [1988,10,23,1,0,0],[1988,10,23,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1989,3,26,0,59,59],[1989,3,26,0,59,59],
'1988102301:00:00','1988102301:00:00','1989032600:59:59','1989032600:59:59' ],
],
1989 =>
[
[ [1989,3,26,1,0,0],[1989,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1989,10,29,0,59,59],[1989,10,29,1,59,59],
'1989032601:00:00','1989032602:00:00','1989102900:59:59','1989102901:59:59' ],
[ [1989,10,29,1,0,0],[1989,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1990,3,25,0,59,59],[1990,3,25,0,59,59],
'1989102901:00:00','1989102901:00:00','1990032500:59:59','1990032500:59:59' ],
],
1990 =>
[
[ [1990,3,25,1,0,0],[1990,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1990,10,28,0,59,59],[1990,10,28,1,59,59],
'1990032501:00:00','1990032502:00:00','1990102800:59:59','1990102801:59:59' ],
[ [1990,10,28,1,0,0],[1990,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1991,3,31,0,59,59],[1991,3,31,0,59,59],
'1990102801:00:00','1990102801:00:00','1991033100:59:59','1991033100:59:59' ],
],
1991 =>
[
[ [1991,3,31,1,0,0],[1991,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1991,10,27,0,59,59],[1991,10,27,1,59,59],
'1991033101:00:00','1991033102:00:00','1991102700:59:59','1991102701:59:59' ],
[ [1991,10,27,1,0,0],[1991,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1992,3,29,0,59,59],[1992,3,29,0,59,59],
'1991102701:00:00','1991102701:00:00','1992032900:59:59','1992032900:59:59' ],
],
1992 =>
[
[ [1992,3,29,1,0,0],[1992,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1992,10,25,0,59,59],[1992,10,25,1,59,59],
'1992032901:00:00','1992032902:00:00','1992102500:59:59','1992102501:59:59' ],
[ [1992,10,25,1,0,0],[1992,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1993,3,28,0,59,59],[1993,3,28,0,59,59],
'1992102501:00:00','1992102501:00:00','1993032800:59:59','1993032800:59:59' ],
],
1993 =>
[
[ [1993,3,28,1,0,0],[1993,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1993,10,24,0,59,59],[1993,10,24,1,59,59],
'1993032801:00:00','1993032802:00:00','1993102400:59:59','1993102401:59:59' ],
[ [1993,10,24,1,0,0],[1993,10,24,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1994,3,27,0,59,59],[1994,3,27,0,59,59],
'1993102401:00:00','1993102401:00:00','1994032700:59:59','1994032700:59:59' ],
],
1994 =>
[
[ [1994,3,27,1,0,0],[1994,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1994,10,23,0,59,59],[1994,10,23,1,59,59],
'1994032701:00:00','1994032702:00:00','1994102300:59:59','1994102301:59:59' ],
[ [1994,10,23,1,0,0],[1994,10,23,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1995,3,26,0,59,59],[1995,3,26,0,59,59],
'1994102301:00:00','1994102301:00:00','1995032600:59:59','1995032600:59:59' ],
],
1995 =>
[
[ [1995,3,26,1,0,0],[1995,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1995,10,22,0,59,59],[1995,10,22,1,59,59],
'1995032601:00:00','1995032602:00:00','1995102200:59:59','1995102201:59:59' ],
[ [1995,10,22,1,0,0],[1995,10,22,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1996,3,31,0,59,59],[1996,3,31,0,59,59],
'1995102201:00:00','1995102201:00:00','1996033100:59:59','1996033100:59:59' ],
],
1996 =>
[
[ [1996,3,31,1,0,0],[1996,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1996,10,27,0,59,59],[1996,10,27,1,59,59],
'1996033101:00:00','1996033102:00:00','1996102700:59:59','1996102701:59:59' ],
[ [1996,10,27,1,0,0],[1996,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1997,3,30,0,59,59],[1997,3,30,0,59,59],
'1996102701:00:00','1996102701:00:00','1997033000:59:59','1997033000:59:59' ],
],
1997 =>
[
[ [1997,3,30,1,0,0],[1997,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1997,10,26,0,59,59],[1997,10,26,1,59,59],
'1997033001:00:00','1997033002:00:00','1997102600:59:59','1997102601:59:59' ],
[ [1997,10,26,1,0,0],[1997,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1998,3,29,0,59,59],[1998,3,29,0,59,59],
'1997102601:00:00','1997102601:00:00','1998032900:59:59','1998032900:59:59' ],
],
1998 =>
[
[ [1998,3,29,1,0,0],[1998,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1998,10,25,0,59,59],[1998,10,25,1,59,59],
'1998032901:00:00','1998032902:00:00','1998102500:59:59','1998102501:59:59' ],
[ [1998,10,25,1,0,0],[1998,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[1999,3,28,0,59,59],[1999,3,28,0,59,59],
'1998102501:00:00','1998102501:00:00','1999032800:59:59','1999032800:59:59' ],
],
1999 =>
[
[ [1999,3,28,1,0,0],[1999,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[1999,10,31,0,59,59],[1999,10,31,1,59,59],
'1999032801:00:00','1999032802:00:00','1999103100:59:59','1999103101:59:59' ],
[ [1999,10,31,1,0,0],[1999,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2000,3,26,0,59,59],[2000,3,26,0,59,59],
'1999103101:00:00','1999103101:00:00','2000032600:59:59','2000032600:59:59' ],
],
2000 =>
[
[ [2000,3,26,1,0,0],[2000,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2000,10,29,0,59,59],[2000,10,29,1,59,59],
'2000032601:00:00','2000032602:00:00','2000102900:59:59','2000102901:59:59' ],
[ [2000,10,29,1,0,0],[2000,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2001,3,25,0,59,59],[2001,3,25,0,59,59],
'2000102901:00:00','2000102901:00:00','2001032500:59:59','2001032500:59:59' ],
],
2001 =>
[
[ [2001,3,25,1,0,0],[2001,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2001,10,28,0,59,59],[2001,10,28,1,59,59],
'2001032501:00:00','2001032502:00:00','2001102800:59:59','2001102801:59:59' ],
[ [2001,10,28,1,0,0],[2001,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2002,3,31,0,59,59],[2002,3,31,0,59,59],
'2001102801:00:00','2001102801:00:00','2002033100:59:59','2002033100:59:59' ],
],
2002 =>
[
[ [2002,3,31,1,0,0],[2002,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2002,10,27,0,59,59],[2002,10,27,1,59,59],
'2002033101:00:00','2002033102:00:00','2002102700:59:59','2002102701:59:59' ],
[ [2002,10,27,1,0,0],[2002,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2003,3,30,0,59,59],[2003,3,30,0,59,59],
'2002102701:00:00','2002102701:00:00','2003033000:59:59','2003033000:59:59' ],
],
2003 =>
[
[ [2003,3,30,1,0,0],[2003,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2003,10,26,0,59,59],[2003,10,26,1,59,59],
'2003033001:00:00','2003033002:00:00','2003102600:59:59','2003102601:59:59' ],
[ [2003,10,26,1,0,0],[2003,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2004,3,28,0,59,59],[2004,3,28,0,59,59],
'2003102601:00:00','2003102601:00:00','2004032800:59:59','2004032800:59:59' ],
],
2004 =>
[
[ [2004,3,28,1,0,0],[2004,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2004,10,31,0,59,59],[2004,10,31,1,59,59],
'2004032801:00:00','2004032802:00:00','2004103100:59:59','2004103101:59:59' ],
[ [2004,10,31,1,0,0],[2004,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2005,3,27,0,59,59],[2005,3,27,0,59,59],
'2004103101:00:00','2004103101:00:00','2005032700:59:59','2005032700:59:59' ],
],
2005 =>
[
[ [2005,3,27,1,0,0],[2005,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2005,10,30,0,59,59],[2005,10,30,1,59,59],
'2005032701:00:00','2005032702:00:00','2005103000:59:59','2005103001:59:59' ],
[ [2005,10,30,1,0,0],[2005,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2006,3,26,0,59,59],[2006,3,26,0,59,59],
'2005103001:00:00','2005103001:00:00','2006032600:59:59','2006032600:59:59' ],
],
2006 =>
[
[ [2006,3,26,1,0,0],[2006,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2006,10,29,0,59,59],[2006,10,29,1,59,59],
'2006032601:00:00','2006032602:00:00','2006102900:59:59','2006102901:59:59' ],
[ [2006,10,29,1,0,0],[2006,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2007,3,25,0,59,59],[2007,3,25,0,59,59],
'2006102901:00:00','2006102901:00:00','2007032500:59:59','2007032500:59:59' ],
],
2007 =>
[
[ [2007,3,25,1,0,0],[2007,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2007,10,28,0,59,59],[2007,10,28,1,59,59],
'2007032501:00:00','2007032502:00:00','2007102800:59:59','2007102801:59:59' ],
[ [2007,10,28,1,0,0],[2007,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2008,3,30,0,59,59],[2008,3,30,0,59,59],
'2007102801:00:00','2007102801:00:00','2008033000:59:59','2008033000:59:59' ],
],
2008 =>
[
[ [2008,3,30,1,0,0],[2008,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2008,10,26,0,59,59],[2008,10,26,1,59,59],
'2008033001:00:00','2008033002:00:00','2008102600:59:59','2008102601:59:59' ],
[ [2008,10,26,1,0,0],[2008,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2009,3,29,0,59,59],[2009,3,29,0,59,59],
'2008102601:00:00','2008102601:00:00','2009032900:59:59','2009032900:59:59' ],
],
2009 =>
[
[ [2009,3,29,1,0,0],[2009,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2009,10,25,0,59,59],[2009,10,25,1,59,59],
'2009032901:00:00','2009032902:00:00','2009102500:59:59','2009102501:59:59' ],
[ [2009,10,25,1,0,0],[2009,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2010,3,28,0,59,59],[2010,3,28,0,59,59],
'2009102501:00:00','2009102501:00:00','2010032800:59:59','2010032800:59:59' ],
],
2010 =>
[
[ [2010,3,28,1,0,0],[2010,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2010,10,31,0,59,59],[2010,10,31,1,59,59],
'2010032801:00:00','2010032802:00:00','2010103100:59:59','2010103101:59:59' ],
[ [2010,10,31,1,0,0],[2010,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2011,3,27,0,59,59],[2011,3,27,0,59,59],
'2010103101:00:00','2010103101:00:00','2011032700:59:59','2011032700:59:59' ],
],
2011 =>
[
[ [2011,3,27,1,0,0],[2011,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2011,10,30,0,59,59],[2011,10,30,1,59,59],
'2011032701:00:00','2011032702:00:00','2011103000:59:59','2011103001:59:59' ],
[ [2011,10,30,1,0,0],[2011,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2012,3,25,0,59,59],[2012,3,25,0,59,59],
'2011103001:00:00','2011103001:00:00','2012032500:59:59','2012032500:59:59' ],
],
2012 =>
[
[ [2012,3,25,1,0,0],[2012,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2012,10,28,0,59,59],[2012,10,28,1,59,59],
'2012032501:00:00','2012032502:00:00','2012102800:59:59','2012102801:59:59' ],
[ [2012,10,28,1,0,0],[2012,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2013,3,31,0,59,59],[2013,3,31,0,59,59],
'2012102801:00:00','2012102801:00:00','2013033100:59:59','2013033100:59:59' ],
],
2013 =>
[
[ [2013,3,31,1,0,0],[2013,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2013,10,27,0,59,59],[2013,10,27,1,59,59],
'2013033101:00:00','2013033102:00:00','2013102700:59:59','2013102701:59:59' ],
[ [2013,10,27,1,0,0],[2013,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2014,3,30,0,59,59],[2014,3,30,0,59,59],
'2013102701:00:00','2013102701:00:00','2014033000:59:59','2014033000:59:59' ],
],
2014 =>
[
[ [2014,3,30,1,0,0],[2014,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2014,10,26,0,59,59],[2014,10,26,1,59,59],
'2014033001:00:00','2014033002:00:00','2014102600:59:59','2014102601:59:59' ],
[ [2014,10,26,1,0,0],[2014,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2015,3,29,0,59,59],[2015,3,29,0,59,59],
'2014102601:00:00','2014102601:00:00','2015032900:59:59','2015032900:59:59' ],
],
2015 =>
[
[ [2015,3,29,1,0,0],[2015,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2015,10,25,0,59,59],[2015,10,25,1,59,59],
'2015032901:00:00','2015032902:00:00','2015102500:59:59','2015102501:59:59' ],
[ [2015,10,25,1,0,0],[2015,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2016,3,27,0,59,59],[2016,3,27,0,59,59],
'2015102501:00:00','2015102501:00:00','2016032700:59:59','2016032700:59:59' ],
],
2016 =>
[
[ [2016,3,27,1,0,0],[2016,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2016,10,30,0,59,59],[2016,10,30,1,59,59],
'2016032701:00:00','2016032702:00:00','2016103000:59:59','2016103001:59:59' ],
[ [2016,10,30,1,0,0],[2016,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2017,3,26,0,59,59],[2017,3,26,0,59,59],
'2016103001:00:00','2016103001:00:00','2017032600:59:59','2017032600:59:59' ],
],
2017 =>
[
[ [2017,3,26,1,0,0],[2017,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2017,10,29,0,59,59],[2017,10,29,1,59,59],
'2017032601:00:00','2017032602:00:00','2017102900:59:59','2017102901:59:59' ],
[ [2017,10,29,1,0,0],[2017,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2018,3,25,0,59,59],[2018,3,25,0,59,59],
'2017102901:00:00','2017102901:00:00','2018032500:59:59','2018032500:59:59' ],
],
2018 =>
[
[ [2018,3,25,1,0,0],[2018,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2018,10,28,0,59,59],[2018,10,28,1,59,59],
'2018032501:00:00','2018032502:00:00','2018102800:59:59','2018102801:59:59' ],
[ [2018,10,28,1,0,0],[2018,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2019,3,31,0,59,59],[2019,3,31,0,59,59],
'2018102801:00:00','2018102801:00:00','2019033100:59:59','2019033100:59:59' ],
],
2019 =>
[
[ [2019,3,31,1,0,0],[2019,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2019,10,27,0,59,59],[2019,10,27,1,59,59],
'2019033101:00:00','2019033102:00:00','2019102700:59:59','2019102701:59:59' ],
[ [2019,10,27,1,0,0],[2019,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2020,3,29,0,59,59],[2020,3,29,0,59,59],
'2019102701:00:00','2019102701:00:00','2020032900:59:59','2020032900:59:59' ],
],
2020 =>
[
[ [2020,3,29,1,0,0],[2020,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2020,10,25,0,59,59],[2020,10,25,1,59,59],
'2020032901:00:00','2020032902:00:00','2020102500:59:59','2020102501:59:59' ],
[ [2020,10,25,1,0,0],[2020,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2021,3,28,0,59,59],[2021,3,28,0,59,59],
'2020102501:00:00','2020102501:00:00','2021032800:59:59','2021032800:59:59' ],
],
2021 =>
[
[ [2021,3,28,1,0,0],[2021,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2021,10,31,0,59,59],[2021,10,31,1,59,59],
'2021032801:00:00','2021032802:00:00','2021103100:59:59','2021103101:59:59' ],
[ [2021,10,31,1,0,0],[2021,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2022,3,27,0,59,59],[2022,3,27,0,59,59],
'2021103101:00:00','2021103101:00:00','2022032700:59:59','2022032700:59:59' ],
],
2022 =>
[
[ [2022,3,27,1,0,0],[2022,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2022,10,30,0,59,59],[2022,10,30,1,59,59],
'2022032701:00:00','2022032702:00:00','2022103000:59:59','2022103001:59:59' ],
[ [2022,10,30,1,0,0],[2022,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2023,3,26,0,59,59],[2023,3,26,0,59,59],
'2022103001:00:00','2022103001:00:00','2023032600:59:59','2023032600:59:59' ],
],
2023 =>
[
[ [2023,3,26,1,0,0],[2023,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2023,10,29,0,59,59],[2023,10,29,1,59,59],
'2023032601:00:00','2023032602:00:00','2023102900:59:59','2023102901:59:59' ],
[ [2023,10,29,1,0,0],[2023,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2024,3,31,0,59,59],[2024,3,31,0,59,59],
'2023102901:00:00','2023102901:00:00','2024033100:59:59','2024033100:59:59' ],
],
2024 =>
[
[ [2024,3,31,1,0,0],[2024,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2024,10,27,0,59,59],[2024,10,27,1,59,59],
'2024033101:00:00','2024033102:00:00','2024102700:59:59','2024102701:59:59' ],
[ [2024,10,27,1,0,0],[2024,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2025,3,30,0,59,59],[2025,3,30,0,59,59],
'2024102701:00:00','2024102701:00:00','2025033000:59:59','2025033000:59:59' ],
],
2025 =>
[
[ [2025,3,30,1,0,0],[2025,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2025,10,26,0,59,59],[2025,10,26,1,59,59],
'2025033001:00:00','2025033002:00:00','2025102600:59:59','2025102601:59:59' ],
[ [2025,10,26,1,0,0],[2025,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2026,3,29,0,59,59],[2026,3,29,0,59,59],
'2025102601:00:00','2025102601:00:00','2026032900:59:59','2026032900:59:59' ],
],
2026 =>
[
[ [2026,3,29,1,0,0],[2026,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2026,10,25,0,59,59],[2026,10,25,1,59,59],
'2026032901:00:00','2026032902:00:00','2026102500:59:59','2026102501:59:59' ],
[ [2026,10,25,1,0,0],[2026,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2027,3,28,0,59,59],[2027,3,28,0,59,59],
'2026102501:00:00','2026102501:00:00','2027032800:59:59','2027032800:59:59' ],
],
2027 =>
[
[ [2027,3,28,1,0,0],[2027,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2027,10,31,0,59,59],[2027,10,31,1,59,59],
'2027032801:00:00','2027032802:00:00','2027103100:59:59','2027103101:59:59' ],
[ [2027,10,31,1,0,0],[2027,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2028,3,26,0,59,59],[2028,3,26,0,59,59],
'2027103101:00:00','2027103101:00:00','2028032600:59:59','2028032600:59:59' ],
],
2028 =>
[
[ [2028,3,26,1,0,0],[2028,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2028,10,29,0,59,59],[2028,10,29,1,59,59],
'2028032601:00:00','2028032602:00:00','2028102900:59:59','2028102901:59:59' ],
[ [2028,10,29,1,0,0],[2028,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2029,3,25,0,59,59],[2029,3,25,0,59,59],
'2028102901:00:00','2028102901:00:00','2029032500:59:59','2029032500:59:59' ],
],
2029 =>
[
[ [2029,3,25,1,0,0],[2029,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2029,10,28,0,59,59],[2029,10,28,1,59,59],
'2029032501:00:00','2029032502:00:00','2029102800:59:59','2029102801:59:59' ],
[ [2029,10,28,1,0,0],[2029,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2030,3,31,0,59,59],[2030,3,31,0,59,59],
'2029102801:00:00','2029102801:00:00','2030033100:59:59','2030033100:59:59' ],
],
2030 =>
[
[ [2030,3,31,1,0,0],[2030,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2030,10,27,0,59,59],[2030,10,27,1,59,59],
'2030033101:00:00','2030033102:00:00','2030102700:59:59','2030102701:59:59' ],
[ [2030,10,27,1,0,0],[2030,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2031,3,30,0,59,59],[2031,3,30,0,59,59],
'2030102701:00:00','2030102701:00:00','2031033000:59:59','2031033000:59:59' ],
],
2031 =>
[
[ [2031,3,30,1,0,0],[2031,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2031,10,26,0,59,59],[2031,10,26,1,59,59],
'2031033001:00:00','2031033002:00:00','2031102600:59:59','2031102601:59:59' ],
[ [2031,10,26,1,0,0],[2031,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2032,3,28,0,59,59],[2032,3,28,0,59,59],
'2031102601:00:00','2031102601:00:00','2032032800:59:59','2032032800:59:59' ],
],
2032 =>
[
[ [2032,3,28,1,0,0],[2032,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2032,10,31,0,59,59],[2032,10,31,1,59,59],
'2032032801:00:00','2032032802:00:00','2032103100:59:59','2032103101:59:59' ],
[ [2032,10,31,1,0,0],[2032,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2033,3,27,0,59,59],[2033,3,27,0,59,59],
'2032103101:00:00','2032103101:00:00','2033032700:59:59','2033032700:59:59' ],
],
2033 =>
[
[ [2033,3,27,1,0,0],[2033,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2033,10,30,0,59,59],[2033,10,30,1,59,59],
'2033032701:00:00','2033032702:00:00','2033103000:59:59','2033103001:59:59' ],
[ [2033,10,30,1,0,0],[2033,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2034,3,26,0,59,59],[2034,3,26,0,59,59],
'2033103001:00:00','2033103001:00:00','2034032600:59:59','2034032600:59:59' ],
],
2034 =>
[
[ [2034,3,26,1,0,0],[2034,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2034,10,29,0,59,59],[2034,10,29,1,59,59],
'2034032601:00:00','2034032602:00:00','2034102900:59:59','2034102901:59:59' ],
[ [2034,10,29,1,0,0],[2034,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2035,3,25,0,59,59],[2035,3,25,0,59,59],
'2034102901:00:00','2034102901:00:00','2035032500:59:59','2035032500:59:59' ],
],
2035 =>
[
[ [2035,3,25,1,0,0],[2035,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2035,10,28,0,59,59],[2035,10,28,1,59,59],
'2035032501:00:00','2035032502:00:00','2035102800:59:59','2035102801:59:59' ],
[ [2035,10,28,1,0,0],[2035,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2036,3,30,0,59,59],[2036,3,30,0,59,59],
'2035102801:00:00','2035102801:00:00','2036033000:59:59','2036033000:59:59' ],
],
2036 =>
[
[ [2036,3,30,1,0,0],[2036,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2036,10,26,0,59,59],[2036,10,26,1,59,59],
'2036033001:00:00','2036033002:00:00','2036102600:59:59','2036102601:59:59' ],
[ [2036,10,26,1,0,0],[2036,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2037,3,29,0,59,59],[2037,3,29,0,59,59],
'2036102601:00:00','2036102601:00:00','2037032900:59:59','2037032900:59:59' ],
],
2037 =>
[
[ [2037,3,29,1,0,0],[2037,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2037,10,25,0,59,59],[2037,10,25,1,59,59],
'2037032901:00:00','2037032902:00:00','2037102500:59:59','2037102501:59:59' ],
[ [2037,10,25,1,0,0],[2037,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2038,3,28,0,59,59],[2038,3,28,0,59,59],
'2037102501:00:00','2037102501:00:00','2038032800:59:59','2038032800:59:59' ],
],
2038 =>
[
[ [2038,3,28,1,0,0],[2038,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2038,10,31,0,59,59],[2038,10,31,1,59,59],
'2038032801:00:00','2038032802:00:00','2038103100:59:59','2038103101:59:59' ],
[ [2038,10,31,1,0,0],[2038,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2039,3,27,0,59,59],[2039,3,27,0,59,59],
'2038103101:00:00','2038103101:00:00','2039032700:59:59','2039032700:59:59' ],
],
2039 =>
[
[ [2039,3,27,1,0,0],[2039,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2039,10,30,0,59,59],[2039,10,30,1,59,59],
'2039032701:00:00','2039032702:00:00','2039103000:59:59','2039103001:59:59' ],
[ [2039,10,30,1,0,0],[2039,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2040,3,25,0,59,59],[2040,3,25,0,59,59],
'2039103001:00:00','2039103001:00:00','2040032500:59:59','2040032500:59:59' ],
],
2040 =>
[
[ [2040,3,25,1,0,0],[2040,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2040,10,28,0,59,59],[2040,10,28,1,59,59],
'2040032501:00:00','2040032502:00:00','2040102800:59:59','2040102801:59:59' ],
[ [2040,10,28,1,0,0],[2040,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2041,3,31,0,59,59],[2041,3,31,0,59,59],
'2040102801:00:00','2040102801:00:00','2041033100:59:59','2041033100:59:59' ],
],
2041 =>
[
[ [2041,3,31,1,0,0],[2041,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2041,10,27,0,59,59],[2041,10,27,1,59,59],
'2041033101:00:00','2041033102:00:00','2041102700:59:59','2041102701:59:59' ],
[ [2041,10,27,1,0,0],[2041,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2042,3,30,0,59,59],[2042,3,30,0,59,59],
'2041102701:00:00','2041102701:00:00','2042033000:59:59','2042033000:59:59' ],
],
2042 =>
[
[ [2042,3,30,1,0,0],[2042,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2042,10,26,0,59,59],[2042,10,26,1,59,59],
'2042033001:00:00','2042033002:00:00','2042102600:59:59','2042102601:59:59' ],
[ [2042,10,26,1,0,0],[2042,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2043,3,29,0,59,59],[2043,3,29,0,59,59],
'2042102601:00:00','2042102601:00:00','2043032900:59:59','2043032900:59:59' ],
],
2043 =>
[
[ [2043,3,29,1,0,0],[2043,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2043,10,25,0,59,59],[2043,10,25,1,59,59],
'2043032901:00:00','2043032902:00:00','2043102500:59:59','2043102501:59:59' ],
[ [2043,10,25,1,0,0],[2043,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2044,3,27,0,59,59],[2044,3,27,0,59,59],
'2043102501:00:00','2043102501:00:00','2044032700:59:59','2044032700:59:59' ],
],
2044 =>
[
[ [2044,3,27,1,0,0],[2044,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2044,10,30,0,59,59],[2044,10,30,1,59,59],
'2044032701:00:00','2044032702:00:00','2044103000:59:59','2044103001:59:59' ],
[ [2044,10,30,1,0,0],[2044,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2045,3,26,0,59,59],[2045,3,26,0,59,59],
'2044103001:00:00','2044103001:00:00','2045032600:59:59','2045032600:59:59' ],
],
2045 =>
[
[ [2045,3,26,1,0,0],[2045,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2045,10,29,0,59,59],[2045,10,29,1,59,59],
'2045032601:00:00','2045032602:00:00','2045102900:59:59','2045102901:59:59' ],
[ [2045,10,29,1,0,0],[2045,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2046,3,25,0,59,59],[2046,3,25,0,59,59],
'2045102901:00:00','2045102901:00:00','2046032500:59:59','2046032500:59:59' ],
],
2046 =>
[
[ [2046,3,25,1,0,0],[2046,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2046,10,28,0,59,59],[2046,10,28,1,59,59],
'2046032501:00:00','2046032502:00:00','2046102800:59:59','2046102801:59:59' ],
[ [2046,10,28,1,0,0],[2046,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2047,3,31,0,59,59],[2047,3,31,0,59,59],
'2046102801:00:00','2046102801:00:00','2047033100:59:59','2047033100:59:59' ],
],
2047 =>
[
[ [2047,3,31,1,0,0],[2047,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2047,10,27,0,59,59],[2047,10,27,1,59,59],
'2047033101:00:00','2047033102:00:00','2047102700:59:59','2047102701:59:59' ],
[ [2047,10,27,1,0,0],[2047,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2048,3,29,0,59,59],[2048,3,29,0,59,59],
'2047102701:00:00','2047102701:00:00','2048032900:59:59','2048032900:59:59' ],
],
2048 =>
[
[ [2048,3,29,1,0,0],[2048,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2048,10,25,0,59,59],[2048,10,25,1,59,59],
'2048032901:00:00','2048032902:00:00','2048102500:59:59','2048102501:59:59' ],
[ [2048,10,25,1,0,0],[2048,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2049,3,28,0,59,59],[2049,3,28,0,59,59],
'2048102501:00:00','2048102501:00:00','2049032800:59:59','2049032800:59:59' ],
],
2049 =>
[
[ [2049,3,28,1,0,0],[2049,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2049,10,31,0,59,59],[2049,10,31,1,59,59],
'2049032801:00:00','2049032802:00:00','2049103100:59:59','2049103101:59:59' ],
[ [2049,10,31,1,0,0],[2049,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2050,3,27,0,59,59],[2050,3,27,0,59,59],
'2049103101:00:00','2049103101:00:00','2050032700:59:59','2050032700:59:59' ],
],
2050 =>
[
[ [2050,3,27,1,0,0],[2050,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2050,10,30,0,59,59],[2050,10,30,1,59,59],
'2050032701:00:00','2050032702:00:00','2050103000:59:59','2050103001:59:59' ],
[ [2050,10,30,1,0,0],[2050,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2051,3,26,0,59,59],[2051,3,26,0,59,59],
'2050103001:00:00','2050103001:00:00','2051032600:59:59','2051032600:59:59' ],
],
2051 =>
[
[ [2051,3,26,1,0,0],[2051,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2051,10,29,0,59,59],[2051,10,29,1,59,59],
'2051032601:00:00','2051032602:00:00','2051102900:59:59','2051102901:59:59' ],
[ [2051,10,29,1,0,0],[2051,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2052,3,31,0,59,59],[2052,3,31,0,59,59],
'2051102901:00:00','2051102901:00:00','2052033100:59:59','2052033100:59:59' ],
],
2052 =>
[
[ [2052,3,31,1,0,0],[2052,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2052,10,27,0,59,59],[2052,10,27,1,59,59],
'2052033101:00:00','2052033102:00:00','2052102700:59:59','2052102701:59:59' ],
[ [2052,10,27,1,0,0],[2052,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2053,3,30,0,59,59],[2053,3,30,0,59,59],
'2052102701:00:00','2052102701:00:00','2053033000:59:59','2053033000:59:59' ],
],
2053 =>
[
[ [2053,3,30,1,0,0],[2053,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2053,10,26,0,59,59],[2053,10,26,1,59,59],
'2053033001:00:00','2053033002:00:00','2053102600:59:59','2053102601:59:59' ],
[ [2053,10,26,1,0,0],[2053,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2054,3,29,0,59,59],[2054,3,29,0,59,59],
'2053102601:00:00','2053102601:00:00','2054032900:59:59','2054032900:59:59' ],
],
2054 =>
[
[ [2054,3,29,1,0,0],[2054,3,29,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2054,10,25,0,59,59],[2054,10,25,1,59,59],
'2054032901:00:00','2054032902:00:00','2054102500:59:59','2054102501:59:59' ],
[ [2054,10,25,1,0,0],[2054,10,25,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2055,3,28,0,59,59],[2055,3,28,0,59,59],
'2054102501:00:00','2054102501:00:00','2055032800:59:59','2055032800:59:59' ],
],
2055 =>
[
[ [2055,3,28,1,0,0],[2055,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2055,10,31,0,59,59],[2055,10,31,1,59,59],
'2055032801:00:00','2055032802:00:00','2055103100:59:59','2055103101:59:59' ],
[ [2055,10,31,1,0,0],[2055,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2056,3,26,0,59,59],[2056,3,26,0,59,59],
'2055103101:00:00','2055103101:00:00','2056032600:59:59','2056032600:59:59' ],
],
2056 =>
[
[ [2056,3,26,1,0,0],[2056,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2056,10,29,0,59,59],[2056,10,29,1,59,59],
'2056032601:00:00','2056032602:00:00','2056102900:59:59','2056102901:59:59' ],
[ [2056,10,29,1,0,0],[2056,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2057,3,25,0,59,59],[2057,3,25,0,59,59],
'2056102901:00:00','2056102901:00:00','2057032500:59:59','2057032500:59:59' ],
],
2057 =>
[
[ [2057,3,25,1,0,0],[2057,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2057,10,28,0,59,59],[2057,10,28,1,59,59],
'2057032501:00:00','2057032502:00:00','2057102800:59:59','2057102801:59:59' ],
[ [2057,10,28,1,0,0],[2057,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2058,3,31,0,59,59],[2058,3,31,0,59,59],
'2057102801:00:00','2057102801:00:00','2058033100:59:59','2058033100:59:59' ],
],
2058 =>
[
[ [2058,3,31,1,0,0],[2058,3,31,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2058,10,27,0,59,59],[2058,10,27,1,59,59],
'2058033101:00:00','2058033102:00:00','2058102700:59:59','2058102701:59:59' ],
[ [2058,10,27,1,0,0],[2058,10,27,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2059,3,30,0,59,59],[2059,3,30,0,59,59],
'2058102701:00:00','2058102701:00:00','2059033000:59:59','2059033000:59:59' ],
],
2059 =>
[
[ [2059,3,30,1,0,0],[2059,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2059,10,26,0,59,59],[2059,10,26,1,59,59],
'2059033001:00:00','2059033002:00:00','2059102600:59:59','2059102601:59:59' ],
[ [2059,10,26,1,0,0],[2059,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2060,3,28,0,59,59],[2060,3,28,0,59,59],
'2059102601:00:00','2059102601:00:00','2060032800:59:59','2060032800:59:59' ],
],
2060 =>
[
[ [2060,3,28,1,0,0],[2060,3,28,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2060,10,31,0,59,59],[2060,10,31,1,59,59],
'2060032801:00:00','2060032802:00:00','2060103100:59:59','2060103101:59:59' ],
[ [2060,10,31,1,0,0],[2060,10,31,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2061,3,27,0,59,59],[2061,3,27,0,59,59],
'2060103101:00:00','2060103101:00:00','2061032700:59:59','2061032700:59:59' ],
],
2061 =>
[
[ [2061,3,27,1,0,0],[2061,3,27,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2061,10,30,0,59,59],[2061,10,30,1,59,59],
'2061032701:00:00','2061032702:00:00','2061103000:59:59','2061103001:59:59' ],
[ [2061,10,30,1,0,0],[2061,10,30,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2062,3,26,0,59,59],[2062,3,26,0,59,59],
'2061103001:00:00','2061103001:00:00','2062032600:59:59','2062032600:59:59' ],
],
2062 =>
[
[ [2062,3,26,1,0,0],[2062,3,26,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2062,10,29,0,59,59],[2062,10,29,1,59,59],
'2062032601:00:00','2062032602:00:00','2062102900:59:59','2062102901:59:59' ],
[ [2062,10,29,1,0,0],[2062,10,29,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2063,3,25,0,59,59],[2063,3,25,0,59,59],
'2062102901:00:00','2062102901:00:00','2063032500:59:59','2063032500:59:59' ],
],
2063 =>
[
[ [2063,3,25,1,0,0],[2063,3,25,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2063,10,28,0,59,59],[2063,10,28,1,59,59],
'2063032501:00:00','2063032502:00:00','2063102800:59:59','2063102801:59:59' ],
[ [2063,10,28,1,0,0],[2063,10,28,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2064,3,30,0,59,59],[2064,3,30,0,59,59],
'2063102801:00:00','2063102801:00:00','2064033000:59:59','2064033000:59:59' ],
],
2064 =>
[
[ [2064,3,30,1,0,0],[2064,3,30,2,0,0],'+01:00:00',[1,0,0],
'IST',1,[2064,10,26,0,59,59],[2064,10,26,1,59,59],
'2064033001:00:00','2064033002:00:00','2064102600:59:59','2064102601:59:59' ],
[ [2064,10,26,1,0,0],[2064,10,26,1,0,0],'+00:00:00',[0,0,0],
'GMT',0,[2065,3,29,0,59,59],[2065,3,29,0,59,59],
'2064102601:00:00','2064102601:00:00','2065032900:59:59','2065032900:59:59' ],
],
);
%LastRule = (
'zone' => {
'dstoff' => '+01:00:00',
'stdoff' => '+00:00:00',
},
'rules' => {
'03' => {
'flag' => 'last',
'dow' => '7',
'num' => '0',
'type' => 'u',
'time' => '01:00:00',
'isdst' => '1',
'abb' => 'IST',
},
'10' => {
'flag' => 'last',
'dow' => '7',
'num' => '0',
'type' => 'u',
'time' => '01:00:00',
'isdst' => '0',
'abb' => 'GMT',
},
},
);
1;
| nriley/Pester | Source/Manip/TZ/eudubl00.pm | Perl | bsd-2-clause | 66,707 |
:- module(sortalCheck,[sortalCheckDrs/2]).
:- use_module(knowledge(ontology),[isa/2,isnota/2]).
:- use_module(knowledge(title),[title/3]).
:- use_module(library(lists),[member/2,select/3]).
/*========================================================================
Sortal Check (main)
========================================================================*/
sortalCheckDrs(B,Var):-
sortalCheckDrs(B,Var,[],P),
allconsistent(P).
/*========================================================================
Sortal Check (DRSs)
========================================================================*/
sortalCheckDrs(_:drs([],C),Var,P1,P2):- !,
sortalCheckConds(C,Var,P1,P2).
sortalCheckDrs(K:drs([_:_:X|D],C),Var,P1,P2):-
X == Var, !,
sortalCheckDrs(K:drs(D,C),Var,[ref([])|P1],P2).
sortalCheckDrs(drs([_|D],C),Var,P1,P2):- !,
sortalCheckDrs(drs(D,C),Var,P1,P2).
sortalCheckDrs(merge(B1,B2),Var,P1,P3):- !,
sortalCheckDrs(B1,Var,P1,P2),
sortalCheckDrs(B2,Var,P2,P3).
sortalCheckDrs(alfa(_,B1,B2),Var,P1,P3):- !,
sortalCheckDrs(B1,Var,P1,P2),
sortalCheckDrs(B2,Var,P2,P3).
/*========================================================================
Sortal Check (DRS-Conditions)
========================================================================*/
sortalCheckConds([],_,P,P):- !.
sortalCheckConds([_:Cond|C],Var,P1,P2):- !,
sortalCheckConds([Cond|C],Var,P1,P2).
sortalCheckConds([not(drs([],[_:eq(X,Y)]))|_],_,_,_):-
X==Y, !, fail.
sortalCheckConds([not(B)|C],Var,P1,P3):- !,
sortalCheckDrs(B,Var,P1,P2),
sortalCheckConds(C,Var,P2,P3).
sortalCheckConds([nec(B)|C],Var,P1,P3):- !,
sortalCheckDrs(B,Var,P1,P2),
sortalCheckConds(C,Var,P2,P3).
sortalCheckConds([pos(B)|C],Var,P1,P3):- !,
sortalCheckDrs(B,Var,P1,P2),
sortalCheckConds(C,Var,P2,P3).
sortalCheckConds([prop(_,B)|C],Var,P1,P3):- !,
sortalCheckDrs(B,Var,P1,P2),
sortalCheckConds(C,Var,P2,P3).
sortalCheckConds([imp(B1,B2)|C],Var,P1,P4):- !,
sortalCheckDrs(B1,Var,P1,P2),
sortalCheckDrs(B2,Var,P2,P3),
sortalCheckConds(C,Var,P3,P4).
sortalCheckConds([duplex(_,B1,_,B2)|C],Var,P1,P4):- !,
sortalCheckDrs(B1,Var,P1,P2),
sortalCheckDrs(B2,Var,P2,P3),
sortalCheckConds(C,Var,P3,P4).
sortalCheckConds([or(B1,B2)|C],Var,P1,P4):- !,
sortalCheckDrs(B1,Var,P1,P2),
sortalCheckDrs(B2,Var,P2,P3),
sortalCheckConds(C,Var,P3,P4).
sortalCheckConds([pred(X,Sym,_,_)|C],Var,[ref(Ps)|P1],P2):-
X == Var, !,
% select(ref(Ps),P1,P2), !,
sortalCheckConds(C,Var,[ref([Sym|Ps])|P1],P2).
sortalCheckConds([pred(_,_,_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
sortalCheckConds([named(X,Title,_,ttl)|C],Var,P1,P3):-
X == Var,
title(_,Title,Sym),
select(ref(Ps),P1,P2), !,
sortalCheckConds(C,Var,[ref([Sym|Ps])|P2],P3).
sortalCheckConds([named(_,_,_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
sortalCheckConds([rel(_,_,_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
sortalCheckConds([role(_,_,_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
sortalCheckConds([card(_,_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
sortalCheckConds([timex(_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
sortalCheckConds([eq(_,_)|C],Var,P1,P2):-
sortalCheckConds(C,Var,P1,P2).
/*========================================================================
Consistency Check (all referents)
========================================================================*/
allconsistent([]):- !.
allconsistent([ref(Concepts)|L]):-
consistent(Concepts),
allconsistent(L).
/*========================================================================
Consistency Check
========================================================================*/
consistent([]):- !.
consistent([_]):- !.
consistent(L1):-
addSupConcepts(L1,L2),
\+ conflict(L2).
/*========================================================================
Add super concepts (by iteration until fixed point is reached)
========================================================================*/
addSupConcepts(C1,C3):- addSuper(C1,[],C2,Add), addSupConcepts(Add,C2,C3).
addSupConcepts([],C1,C2):- !, C2=C1.
addSupConcepts(_,C1,C2):- addSupConcepts(C1,C2).
/*========================================================================
Add super concepts (one cycle)
========================================================================*/
addSuper([],L,L,[]).
addSuper([X|L1],Accu,L2,[Y|Added]):-
isa(X,Y),
\+ member(Y,L1), \+ member(Y,Accu), !, % if not yet added
addSuper(L1,[X,Y|Accu],L2,Added). % then add concept
addSuper([X|L1],Accu,L2,Added):-
addSuper(L1,[X|Accu],L2,Added).
/*========================================================================
Check for a conflict
========================================================================*/
conflict(L):-
member(X,L),
isnota(X,Y),
member(Y,L), !.
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/candc/src/prolog/boxer/sortalCheck.pl | Perl | mit | 4,974 |
# Transform TODO
# to a YAML format suitable for our needs
use strict;
use utf8;
use open qw(:std :utf8);
binmode(STDOUT, ":utf8");
open(my $fh, '<', 'uas_useragent.csv');
print "# From http://www.pgts.com.au/download/data/browser_list.txt
# via http://www.texsoft.it/index.php?m=sw.php.useragent
test_cases:\n";
foreach my $line (<$fh>) {
$line =~ /"(.*)";"(.*)";.*;"(.*)"/;
my $family = '"' . $1 . '"';
my ($v1, $v2, $v3) = split /\./, $2;
# Some UAs have double quotes in them :-/
my $ua = $3;
$ua =~ s/"/\\"/g;
$ua = '"' . $ua . '"';
# Where version field is something like "Camino 0.8"
my @special = qw(AOL Camino Chimera Epiphany Firebird K-Meleon MultiZilla Phoenix);
foreach (@special) {
$family = "'$_'", $v1 = $1 if ($v1 =~ /$_ (\d+)/);
}
# Unversioned Firefox
$family = '"Firefox"', $v1 = '' if ($v1 =~ /Firefox ?/);
# Mismarked Firefox version
$v1 = '1' if ($family =~ /Firefox/ && $line =~ /Firefox\/1\.0.*$/);
$family = '"MultiZilla"', $v1 = '' if ($v1 =~ /MultiZilla/);
$family = '"IE"' if ($family eq '"Internet Explorer"');
$family = '"Other"', $v1 = $v2 = $v3 = '' if ($family eq '"Xenu Link Sleuth"' || $family eq '"Zyborg"' || $family eq '"Thunderbird"');
print " - user_agent_string: $ua
family: $family
major: " . ($family eq '"Other"' ? '' : ($v1 eq '' ? "'0'" : "'$v1'")) . "
minor: " . ($v2 eq '' ? '' : "'$v2'") . "
patch: " . ($v3 eq '' ? '' : "'$v3'") . "\n";
}
| sean-hill/map-around | node_modules/ua-parser/test_resources/transform-uas_useragent.pl | Perl | mit | 1,509 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from /tmp/ympzZnp0Uq/northamerica. Olson data version 2012c
#
# Do not edit this file directly.
#
package DateTime::TimeZone::America::North_Dakota::Beulah;
{
$DateTime::TimeZone::America::North_Dakota::Beulah::VERSION = '1.46';
}
use strict;
use Class::Singleton 1.03;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::America::North_Dakota::Beulah::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY,
59418039600,
DateTime::TimeZone::NEG_INFINITY,
59418015173,
-24427,
0,
'LMT'
],
[
59418039600,
60502410000,
59418014400,
60502384800,
-25200,
0,
'MST'
],
[
60502410000,
60520550400,
60502388400,
60520528800,
-21600,
1,
'MDT'
],
[
60520550400,
60533859600,
60520525200,
60533834400,
-25200,
0,
'MST'
],
[
60533859600,
60552000000,
60533838000,
60551978400,
-21600,
1,
'MDT'
],
[
60552000000,
61255472400,
60551974800,
61255447200,
-25200,
0,
'MST'
],
[
61255472400,
61366287600,
61255450800,
61366266000,
-21600,
1,
'MWT'
],
[
61366287600,
61370294400,
61366266000,
61370272800,
-21600,
1,
'MPT'
],
[
61370294400,
62051302800,
61370269200,
62051277600,
-25200,
0,
'MST'
],
[
62051302800,
62067024000,
62051281200,
62067002400,
-21600,
1,
'MDT'
],
[
62067024000,
62082752400,
62066998800,
62082727200,
-25200,
0,
'MST'
],
[
62082752400,
62098473600,
62082730800,
62098452000,
-21600,
1,
'MDT'
],
[
62098473600,
62114202000,
62098448400,
62114176800,
-25200,
0,
'MST'
],
[
62114202000,
62129923200,
62114180400,
62129901600,
-21600,
1,
'MDT'
],
[
62129923200,
62145651600,
62129898000,
62145626400,
-25200,
0,
'MST'
],
[
62145651600,
62161372800,
62145630000,
62161351200,
-21600,
1,
'MDT'
],
[
62161372800,
62177101200,
62161347600,
62177076000,
-25200,
0,
'MST'
],
[
62177101200,
62193427200,
62177079600,
62193405600,
-21600,
1,
'MDT'
],
[
62193427200,
62209155600,
62193402000,
62209130400,
-25200,
0,
'MST'
],
[
62209155600,
62224876800,
62209134000,
62224855200,
-21600,
1,
'MDT'
],
[
62224876800,
62240605200,
62224851600,
62240580000,
-25200,
0,
'MST'
],
[
62240605200,
62256326400,
62240583600,
62256304800,
-21600,
1,
'MDT'
],
[
62256326400,
62262378000,
62256301200,
62262352800,
-25200,
0,
'MST'
],
[
62262378000,
62287776000,
62262356400,
62287754400,
-21600,
1,
'MDT'
],
[
62287776000,
62298061200,
62287750800,
62298036000,
-25200,
0,
'MST'
],
[
62298061200,
62319225600,
62298039600,
62319204000,
-21600,
1,
'MDT'
],
[
62319225600,
62334954000,
62319200400,
62334928800,
-25200,
0,
'MST'
],
[
62334954000,
62351280000,
62334932400,
62351258400,
-21600,
1,
'MDT'
],
[
62351280000,
62366403600,
62351254800,
62366378400,
-25200,
0,
'MST'
],
[
62366403600,
62382729600,
62366382000,
62382708000,
-21600,
1,
'MDT'
],
[
62382729600,
62398458000,
62382704400,
62398432800,
-25200,
0,
'MST'
],
[
62398458000,
62414179200,
62398436400,
62414157600,
-21600,
1,
'MDT'
],
[
62414179200,
62429907600,
62414154000,
62429882400,
-25200,
0,
'MST'
],
[
62429907600,
62445628800,
62429886000,
62445607200,
-21600,
1,
'MDT'
],
[
62445628800,
62461357200,
62445603600,
62461332000,
-25200,
0,
'MST'
],
[
62461357200,
62477078400,
62461335600,
62477056800,
-21600,
1,
'MDT'
],
[
62477078400,
62492806800,
62477053200,
62492781600,
-25200,
0,
'MST'
],
[
62492806800,
62508528000,
62492785200,
62508506400,
-21600,
1,
'MDT'
],
[
62508528000,
62524256400,
62508502800,
62524231200,
-25200,
0,
'MST'
],
[
62524256400,
62540582400,
62524234800,
62540560800,
-21600,
1,
'MDT'
],
[
62540582400,
62555706000,
62540557200,
62555680800,
-25200,
0,
'MST'
],
[
62555706000,
62572032000,
62555684400,
62572010400,
-21600,
1,
'MDT'
],
[
62572032000,
62587760400,
62572006800,
62587735200,
-25200,
0,
'MST'
],
[
62587760400,
62603481600,
62587738800,
62603460000,
-21600,
1,
'MDT'
],
[
62603481600,
62619210000,
62603456400,
62619184800,
-25200,
0,
'MST'
],
[
62619210000,
62634931200,
62619188400,
62634909600,
-21600,
1,
'MDT'
],
[
62634931200,
62650659600,
62634906000,
62650634400,
-25200,
0,
'MST'
],
[
62650659600,
62666380800,
62650638000,
62666359200,
-21600,
1,
'MDT'
],
[
62666380800,
62680294800,
62666355600,
62680269600,
-25200,
0,
'MST'
],
[
62680294800,
62697830400,
62680273200,
62697808800,
-21600,
1,
'MDT'
],
[
62697830400,
62711744400,
62697805200,
62711719200,
-25200,
0,
'MST'
],
[
62711744400,
62729884800,
62711722800,
62729863200,
-21600,
1,
'MDT'
],
[
62729884800,
62743194000,
62729859600,
62743168800,
-25200,
0,
'MST'
],
[
62743194000,
62761334400,
62743172400,
62761312800,
-21600,
1,
'MDT'
],
[
62761334400,
62774643600,
62761309200,
62774618400,
-25200,
0,
'MST'
],
[
62774643600,
62792784000,
62774622000,
62792762400,
-21600,
1,
'MDT'
],
[
62792784000,
62806698000,
62792758800,
62806672800,
-25200,
0,
'MST'
],
[
62806698000,
62824233600,
62806676400,
62824212000,
-21600,
1,
'MDT'
],
[
62824233600,
62838147600,
62824208400,
62838122400,
-25200,
0,
'MST'
],
[
62838147600,
62855683200,
62838126000,
62855661600,
-21600,
1,
'MDT'
],
[
62855683200,
62869597200,
62855658000,
62869572000,
-25200,
0,
'MST'
],
[
62869597200,
62887737600,
62869575600,
62887716000,
-21600,
1,
'MDT'
],
[
62887737600,
62901046800,
62887712400,
62901021600,
-25200,
0,
'MST'
],
[
62901046800,
62919187200,
62901025200,
62919165600,
-21600,
1,
'MDT'
],
[
62919187200,
62932496400,
62919162000,
62932471200,
-25200,
0,
'MST'
],
[
62932496400,
62950636800,
62932474800,
62950615200,
-21600,
1,
'MDT'
],
[
62950636800,
62964550800,
62950611600,
62964525600,
-25200,
0,
'MST'
],
[
62964550800,
62982086400,
62964529200,
62982064800,
-21600,
1,
'MDT'
],
[
62982086400,
62996000400,
62982061200,
62995975200,
-25200,
0,
'MST'
],
[
62996000400,
63013536000,
62995978800,
63013514400,
-21600,
1,
'MDT'
],
[
63013536000,
63027450000,
63013510800,
63027424800,
-25200,
0,
'MST'
],
[
63027450000,
63044985600,
63027428400,
63044964000,
-21600,
1,
'MDT'
],
[
63044985600,
63058899600,
63044960400,
63058874400,
-25200,
0,
'MST'
],
[
63058899600,
63077040000,
63058878000,
63077018400,
-21600,
1,
'MDT'
],
[
63077040000,
63090349200,
63077014800,
63090324000,
-25200,
0,
'MST'
],
[
63090349200,
63108489600,
63090327600,
63108468000,
-21600,
1,
'MDT'
],
[
63108489600,
63121798800,
63108464400,
63121773600,
-25200,
0,
'MST'
],
[
63121798800,
63139939200,
63121777200,
63139917600,
-21600,
1,
'MDT'
],
[
63139939200,
63153853200,
63139914000,
63153828000,
-25200,
0,
'MST'
],
[
63153853200,
63171388800,
63153831600,
63171367200,
-21600,
1,
'MDT'
],
[
63171388800,
63185302800,
63171363600,
63185277600,
-25200,
0,
'MST'
],
[
63185302800,
63202838400,
63185281200,
63202816800,
-21600,
1,
'MDT'
],
[
63202838400,
63216752400,
63202813200,
63216727200,
-25200,
0,
'MST'
],
[
63216752400,
63234892800,
63216730800,
63234871200,
-21600,
1,
'MDT'
],
[
63234892800,
63248202000,
63234867600,
63248176800,
-25200,
0,
'MST'
],
[
63248202000,
63266342400,
63248180400,
63266320800,
-21600,
1,
'MDT'
],
[
63266342400,
63279651600,
63266317200,
63279626400,
-25200,
0,
'MST'
],
[
63279651600,
63297792000,
63279630000,
63297770400,
-21600,
1,
'MDT'
],
[
63297792000,
63309286800,
63297766800,
63309261600,
-25200,
0,
'MST'
],
[
63309286800,
63329846400,
63309265200,
63329824800,
-21600,
1,
'MDT'
],
[
63329846400,
63340736400,
63329821200,
63340711200,
-25200,
0,
'MST'
],
[
63340736400,
63361296000,
63340714800,
63361274400,
-21600,
1,
'MDT'
],
[
63361296000,
63372186000,
63361270800,
63372160800,
-25200,
0,
'MST'
],
[
63372186000,
63392745600,
63372164400,
63392724000,
-21600,
1,
'MDT'
],
[
63392745600,
63404240400,
63392720400,
63404215200,
-25200,
0,
'MST'
],
[
63404240400,
63424800000,
63404218800,
63424778400,
-21600,
1,
'MDT'
],
[
63424800000,
63435686400,
63424778400,
63435664800,
-21600,
0,
'CST'
],
[
63435686400,
63456246000,
63435668400,
63456228000,
-18000,
1,
'CDT'
],
[
63456246000,
63467136000,
63456224400,
63467114400,
-21600,
0,
'CST'
],
[
63467136000,
63487695600,
63467118000,
63487677600,
-18000,
1,
'CDT'
],
[
63487695600,
63498585600,
63487674000,
63498564000,
-21600,
0,
'CST'
],
[
63498585600,
63519145200,
63498567600,
63519127200,
-18000,
1,
'CDT'
],
[
63519145200,
63530035200,
63519123600,
63530013600,
-21600,
0,
'CST'
],
[
63530035200,
63550594800,
63530017200,
63550576800,
-18000,
1,
'CDT'
],
[
63550594800,
63561484800,
63550573200,
63561463200,
-21600,
0,
'CST'
],
[
63561484800,
63582044400,
63561466800,
63582026400,
-18000,
1,
'CDT'
],
[
63582044400,
63593539200,
63582022800,
63593517600,
-21600,
0,
'CST'
],
[
63593539200,
63614098800,
63593521200,
63614080800,
-18000,
1,
'CDT'
],
[
63614098800,
63624988800,
63614077200,
63624967200,
-21600,
0,
'CST'
],
[
63624988800,
63645548400,
63624970800,
63645530400,
-18000,
1,
'CDT'
],
[
63645548400,
63656438400,
63645526800,
63656416800,
-21600,
0,
'CST'
],
[
63656438400,
63676998000,
63656420400,
63676980000,
-18000,
1,
'CDT'
],
[
63676998000,
63687888000,
63676976400,
63687866400,
-21600,
0,
'CST'
],
[
63687888000,
63708447600,
63687870000,
63708429600,
-18000,
1,
'CDT'
],
[
63708447600,
63719337600,
63708426000,
63719316000,
-21600,
0,
'CST'
],
[
63719337600,
63739897200,
63719319600,
63739879200,
-18000,
1,
'CDT'
],
[
63739897200,
63751392000,
63739875600,
63751370400,
-21600,
0,
'CST'
],
[
63751392000,
63771951600,
63751374000,
63771933600,
-18000,
1,
'CDT'
],
[
63771951600,
63782841600,
63771930000,
63782820000,
-21600,
0,
'CST'
],
[
63782841600,
63803401200,
63782823600,
63803383200,
-18000,
1,
'CDT'
],
[
63803401200,
63814291200,
63803379600,
63814269600,
-21600,
0,
'CST'
],
[
63814291200,
63834850800,
63814273200,
63834832800,
-18000,
1,
'CDT'
],
];
sub olson_version { '2012c' }
sub has_dst_changes { 61 }
sub _max_year { 2022 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
sub _last_offset { -21600 }
my $last_observance = bless( {
'format' => 'C%sT',
'gmtoff' => '-6:00',
'local_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 734083,
'local_rd_secs' => 7200,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 734083,
'utc_rd_secs' => 7200,
'utc_year' => 2011
}, 'DateTime' ),
'offset_from_std' => 0,
'offset_from_utc' => -21600,
'until' => [],
'utc_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 734083,
'local_rd_secs' => 28800,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 734083,
'utc_rd_secs' => 28800,
'utc_year' => 2011
}, 'DateTime' )
}, 'DateTime::TimeZone::OlsonDB::Observance' )
;
sub _last_observance { $last_observance }
my $rules = [
bless( {
'at' => '2:00',
'from' => '2007',
'in' => 'Nov',
'letter' => 'S',
'name' => 'US',
'offset_from_std' => 0,
'on' => 'Sun>=1',
'save' => '0',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' ),
bless( {
'at' => '2:00',
'from' => '2007',
'in' => 'Mar',
'letter' => 'D',
'name' => 'US',
'offset_from_std' => 3600,
'on' => 'Sun>=8',
'save' => '1:00',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' )
]
;
sub _rules { $rules }
1;
| leighpauls/k2cro4 | third_party/perl/perl/vendor/lib/DateTime/TimeZone/America/North_Dakota/Beulah.pm | Perl | bsd-3-clause | 12,479 |
package DDG::Goodie::Frequency;
# ABSTRACT: Displays frequency of alphabet character (a-z)
use strict;
use DDG::Goodie;
triggers start => 'frequency', 'freq';
handle remainder => sub {
if ($_ =~ /^of ([a-z]|(?:all ?|)(?:letters|characters|chars|)) in (.+)/i)
{
my $collect = lc $1;
my $target_str = lc $2;
# warn qq($collect\t$target_str\n);
my $count = 0;
my %freq;
my @chars = split //, $target_str;
foreach (@chars)
{
if ($_ =~ /[a-z]/)
{
if ($collect =~ /all|letters|characters|chars/) { ++$freq{$_}; }
else { ++$freq{$_} if $_ eq $collect; }
++$count;
};
};
my @out;
foreach my $key (keys %freq)
{
push @out, join ":", $key, $freq{$key} . "/" . $count;
};
return "Frequency: " . join ' ',sort(@out) if @out;
};
return;
};
zci is_cached => 1;
1;
| aleksandar-todorovic/zeroclickinfo-goodies | lib/DDG/Goodie/Frequency.pm | Perl | apache-2.0 | 818 |
=head1 This Week on perl5-porters (18-24 August 2003)
The Perl 5 porters are rather focused on perl 5.8.1 tuning and
adjustments. Smokes, valgrind checks, modules updates, and regression bug
hunting were among the main events of the week.
=head2 DESTROYing blessed code refs
Last year, Simon Cozens found that blessed code references weren't
DESTROYed properly (bug #10030). Dave Mitchell looked at it, and
diagnosed that it's caused by the fact that the blessed anonymous
subroutine is not a closure (usually) -- and indeed, turning it into a
closure fixes the behaviour of the destructor. Dave thinks that this bug
would be inefficient to fix, and suggests to document it as a misfeature.
Later in the thread, the proper meaning of the I<closure> word is discussed.
http://www.xray.mpe.mpg.de/mailing-lists/perl5-porters/2003-08/msg01300.html
=head2 More valgrinding
Last week, Jarkko Hietaniemi added to the test harness the possibility to
run the test suite under valgrind. This week, helped by Marcus
Holland-Moritz and other members of the crowd, he slightly tweaked
valgrind's setup to produce more useful and accurate reports. Some
interesting bugs are shaked out.
http://www.xray.mpe.mpg.de/mailing-lists/perl5-porters/2003-08/msg01455.html
=head2 Localization of C<$|>
Stas Bekman found a new bug in the localization of the C<$|> variable.
This bug wasn't present in perl 5.8.0, and causes significant problems
with mod_perl 2. Dave Mitchell identified himself as the guilty patcher,
explained the problem, and provided a better version of his original
patch.
http://www.xray.mpe.mpg.de/mailing-lists/perl5-porters/2003-08/msg01438.html
=head2 In Brief
Jari Aalto wishes that here-documents could be declared with the quote
operators syntax, as in : C<< <<q(FOO) >>, C<< <<qq(BAR) >>, etc. (bug
#23348). This doesn't seem to be unreasonable.
Tassilo von Parseval added to Devel::PPPort, the backward compatibility
wrapper for XS code, some adapters for the C<grok_*> functions, that
convert strings to numbers. (See a recent F<perlapi> manpage for the
description of those functions.)
Enache Adrian fixed the handling of utf-8 strings in error messages.
Richard Dawe provided a portability patch for perl 5.6.2 on DJGPP. Philip
Newton is testing perl 5.8.1 on DJGPP as well.
Michael G Schwern cleaned up a bit Pod::Html.
=head2 Smoke news
Alain (alian) Barbet was busy on the portability front. He found out that
some pack() tests fails on NetBSD/sparc, compiled with gcc 2.x. This is
apparently due (after investigating with Nicholas Clark) to a bug in
C<**>. (bug #23463).
Alain also released a new version of his Test-Smoke-Database distribution
on CPAN, a software that presents and summarizes smoke results via a web
interface.
Abigail started to produce smoke results for perl 5.6.2. One of the tests
for int() fails when perl 5.6.2 is compiled with 64 bit arithmetic, due to
a transitional known bug in his version of glibc. (A workaround for
this bug was put in perl 5.8.0.)
=head2 Module news
Dan Kogai released Encode 1.98. Michael Schwern released
ExtUtils::MakeMaker 6.16. And I see on CPAN that CGI.pm 3.00 is out.
Moreover, Ken Williams proposed to backport Cwd to CPAN, with the recent
tainting fixes in it.
http://www.xray.mpe.mpg.de/mailing-lists/perl5-porters/2003-08/msg01422.html
=head2 About this summary
This week's summary was written by Rafael Garcia-Suarez. Summaries are
published weekly on F<http://use.perl.org/> and on a mailing list, which
subscription address is F<perl5-summary-subscribe@perl.org>. Comments and
corrections are welcome.
| rjbs/perlweb | docs/dev/perl5/list-summaries/2003/p5p-200308-3.pod | Perl | apache-2.0 | 3,623 |
#Time-stamp: "2001-02-23 20:09:47 MST" -*-Text-*-
# This document contains text in Perl "POD" format.
# Use a POD viewer like perldoc or perlman to render it.
=head1 NAME
HTML::Tree::AboutTrees -- article on tree-shaped data structures in Perl
=head1 SYNOPSIS
# This an article, not a module.
=head1 DESCRIPTION
The following article by Sean M. Burke first appeared in I<The Perl
Journal> #18 and is copyright 2000 The Perl Journal. It appears
courtesy of Jon Orwant and The Perl Journal. This document may be
distributed under the same terms as Perl itself.
=head1 Trees
-- Sean M. Burke
=over
"AaaAAAaauugh! Watch out for that tree!"
-- I<George of the Jungle theme>
=back
Perl's facility with references, combined with its automatic management of
memory allocation, makes it straightforward to write programs that store data
in structures of arbitrary form and complexity.
But I've noticed that many programmers, especially those who started out
with more restrictive languages, seem at home with complex but uniform
data structures -- N-dimensional arrays, or more struct-like things like
hashes-of-arrays(-of-hashes(-of-hashes), etc.) -- but they're often uneasy
with buliding more freeform, less tabular structures, like
tree-shaped data structures.
But trees are easy to build and manage in Perl, as I'll demonstrate
by showing off how the HTML::Element class manages elements in an HTML
document tree, and by walking you through a from-scratch implementation
of game trees. But first we need to nail down what we mean by a "tree".
=head2 Socratic Dialogues: "What is a Tree?"
My first brush with tree-shaped structures was in linguistics classes,
where tree diagrams are used to describe the syntax underlying natural
language sentences. After learning my way around I<those> trees, I
started to wonder -- are what I'm used to calling "trees" the same as what
programmers call "trees"? So I asked lots of helpful and patient
programmers how they would define a tree. Many replied with a
answer in jargon that they could not really explain (understandable,
since explaining things, especially defining things, is harder
than people think):
=over
-- So what I<is> a "tree", a tree-shaped data structure?
-- A tree is a special case of an acyclic directed graph!
-- What's a "graph"?
-- Um... lines... and... you draw it... with... arcs! nodes! um...
=back
The most helpful were folks who couldn't explain directly, but with
whom I could get into a rather Socratic dialog (where I<I> asked the
half-dim half-earnest questions), often with much doodling of
illustrations...
Question: so what's a tree?
Answer: A tree is a collection of nodes that are linked together in a,
well, tree-like way! Like this I<[drawing on a napkin]:>
A
/ \
B C
/ | \
D E F
Q: So what do these letters represent?
A: Each is a different node, a bunch of data. Maybe C is a
bunch of data that stores a number, maybe a hash table, maybe nothing
at all besides the fact that it links to D, E, and F (which are other
nodes).
Q: So what're the lines between the nodes?
A: Links. Also called "arcs". They just symbolize the fact that each
node holds a list of nodes it links to.
Q: So what if I draw nodes and links, like this...
B -- E
/ \ / \
A C
\ /
E
Is that still a tree?
A: No, not at all. There's a lot of un-treelike things about that.
First off, E has a link coming off of it going into nowhere. You can't have
a link to nothing -- you can only link to another node. Second off, I
don't know what that sideways link between B and E means...
Q: Okay, let's work our way up from something simpler. Is this a tree...?
A
A: Yes, I suppose. It's a tree of just one node.
Q: And how about...
A
B
A: No, you can't just have nodes floating there, unattached.
Q: Okay, I'll link A and B. How's this?
A
|
B
A: Yup, that's a tree. There's a node A, and a node B, and they're linked.
Q: How is that tree any different from this one...?
B
|
A
A: Well, in both cases A and B are linked. But it's in a different
direction.
Q: Direction? What does the direction mean?
A: Well, it depends what the tree represents. If it represents a
categorization, like this:
citrus
/ | \
orange lemon kumquat ...
then you mean to say that oranges, lemons, kumquats, etc., are a kind of
citrus. But if you drew it upside down, you'd be saying, falsely, that
citrus is a kind of kumquat, a kind of lemon, and a kind of orange.
If the tree represented cause-and-effect (or at least what situations
could follow others), or represented what's a part of what, you
wouldn't want to get those backwards, either. So with the nodes you
draw together on paper, one has to be over the other, so you can tell which
way the relationship in the tree works.
Q: So are these two trees the same?
A A
/ \ / \
B C B \
C
A: Yes, although by convention we often try to line up things in the
same generation, like it is in the diagram on the left.
Q: "generation"? This is a family tree?
A: No, not unless it's a family tree for just yeast cells or something
else that reproduces asexually.
But for sake of having lots of terms to use, we just pretend that links
in the tree represent the "is a child of" relationship, instead of "is a
kind of" or "is a part of", or "could result from", or whatever the real
relationship is. So we get to borrow a lot of kinship words for
describing trees -- B and C are "children" (or "daughters") of A; A is
the "parent" (or "mother") of B and C. Node C is a "sibling" (or
"sister") of node C; and so on, with terms like "descedants" (a node's
children, children's children, etc.), and "generation" (all the
nodes at the same "level" in the tree, i.e., are either all
grandchildren of the top node, or all great-grand-children, etc.), and
"lineage" or "ancestors" (parents, and parent's parents, etc., all the
way to the topmost node).
So then we get to express rules in terms like "B<A node cannot have more
than one parent>", which means that this is not a valid tree:
A
/ \
B C
\ /
E
And: "B<A node can't be its own parent>", which excludes this looped-up
connection:
/\
A |
\/
Or, put more generally: "B<A node can't be its own ancestor>", which
excludes the above loop, as well as the one here:
/\
Z |
/ |
A |
/ \ |
B C |
\/
That tree is excluded because A is a child of Z, and Z is a child of C,
and C is a child of A, which means A is its own great-grandparent. So
this whole network can't be a tree, because it breaks the sort of
meta-rule: B<once any node in the supposed tree breaks the rules for
trees, you don't have a tree anymore.>
Q: Okay, now, are these two trees the same?
A A
/ | \ / | \
B C D D C B
A: It depends whether you're basing your concept of trees on each node
having a set (unordered list) of children, or an (ordered) list of
children. It's a question of whether ordering is important for what
you're doing. With my diagram of citrus types, ordering isn't
important, so these tree diagrams express the same thing:
citrus
/ | \
orange lemon kumquat
citrus
/ | \
kumquat orange lemon
because it doesn't make sense to say that oranges are "before" or
"after" kumquats in the whole botanical scheme of things. (Unless, of
course, you I<are> using ordering to mean something, like a degree of
genetic similarity.)
But consider a tree that's a diagram of what steps are comprised in an
activity, to some degree of specificity:
make tea
/ | \
pour infuse serve
hot water / \
in cup/pot / \
add let
tea sit
leaves
This means that making tea consists of putting hot water in a cup or
put, infusing it (which itself consists of adding tea leaves and letting
it sit), then serving it -- I<in that order>. If you serve an empty
dry pot (sipping from empty cups, etc.), let it sit, add tea leaves,
and pour in hot water, then what you're doing is performance art, not
tea preparation:
perfomance
art
/ | \
serve infuse pour
/ \ hot water
/ \ in cup/pot
let add
sit tea
leaves
Except for my having renamed the root, this tree is the same as
the making-tea tree as far as what's under what, but it differs
in order, and what the tree means makes the order important.
Q: Wait -- "root"? What's a root?
A: Besides kinship terms like "mother" and "daugher", the jargon for
tree parts also has terms from real-life tree parts: the part that
everything else grows from is called the root; and nodes that don't
have nodes attached to them (i.e., childless nodes) are called
"leaves".
Q: But you've been drawing all your trees with the root at the top and
leaves at the bottom.
A: Yes, but for some reason, that's the way everyone seems to think of
trees. They can draw trees as above; or they can draw them sort of
sideways with indenting representing what nodes are children of what:
* make tea
* pour hot water in cup/pot
* infuse
* add tea leaves
* let sit
* serve
...but folks almost never seem to draw trees with the root at the
bottom. So imagine it's based on spider plant in a hanging pot.
Unfortunately, spider plants I<aren't> botanically trees, they're
plants; but "spider plant diagram" is rather a mouthful, so let's just
call them trees.
=head2 Trees Defined Formally
In time, I digested all these assorted facts about programmers' ideas of
trees (which turned out to be just a more general case of linguistic
ideas of trees) into a single rule:
* A node is an item that contains ("is over", "is parent of", etc.)
zero or more other nodes.
From this you can build up formal definitions for useful terms, like so:
* A node's B<descendants> are defined as all its children, and all
their children, and so on. Or, stated recursively: a node's
descendants are all its children, and all its children's descendants.
(And if it has no children, it has no descendants.)
* A node's B<ancestors> consist of its parent, and its parent's
parent, etc, up to the root. Or, recursively: a node's ancestors
consist of its parent and its parent's ancestors. (If it has no parent,
it has no ancestors.)
* A B<tree> is a root node and all the root's descendants.
And you can add a proviso or two to clarify exactly what I impute to the
word "other" in "other nodes":
* A node cannot contain itself, or contain any node that contains it,
etc. Looking at it the other way: a node cannot be its own parent or
ancestor.
* A node can be root (i.e., no other node contains it) or can be
contained by only one parent; no node can be the child of two or more
parents.
Add to this the idea that children are sometimes ordered, and sometimes
not, and that's about all you need to know about defining what a tree
is. From there it's a matter of using them.
=head2 Markup Language Trees: HTML-Tree
While not I<all> markup languages are inherently tree-like, the
best-known family of markup languages, HTML, SGML, and XML, are about
as tree-like as you can get. In these languages, a document consists
of elements and character data in a tree structure where
there is one root element, and elements can contain either other
elements, or character data.
=over
Footnote:
For sake of simplicity, I'm glossing over
comments (<!-- ... -->), processing instructions (<?xml
version='1.0'>), and declarations (<!ELEMENT ...>, <!DOCTYPE ...>).
And I'm not bothering to distinguish entity references
(<, @) or CDATA sections (<![CDATA[ ...]]>) from normal text.
=back
For example, consider this HTML document:
<html lang="en-US">
<head>
<title>
Blank Document!
</title>
</head>
<body bgcolor="#d010ff">
I've got
<em>
something to saaaaay
</em>
!
</body>
</html>
I've indented this to point out what nodes (elements or text items) are
children of what, with each node on a line of its own.
The HTML::TreeBuilder module (in the CPAN distribution HTML-Tree)
does the work of taking HTML source and
building in memory the tree that the document source represents.
=over
Footnote: it requires the HTML::Parser module, which tokenizes the
source -- i.e., identifies each tag, bit of text, comment, etc.
=back
The trees structures that it builds represent bits of text with
normal Perl scalar string values; but elements are represented with
objects -- that is, chunks of data that belong to a
class (in this case, HTML::Element), a class that provides methods
(routines) for accessing the pieces of data in each element, and
otherwise doing things with elements. (See my article in TPJ#17 for a
quick explanation of objects, the POD document C<perltoot> for a longer
explanation, or Damian Conway's excellent book I<Object-Oriented Perl>
for the full story.)
Each HTML::Element object contains a number of pieces of data:
* its element name ("html", "h1", etc., accessed as $element->tag)
* a list of elements (or text segments) that it contains, if any
(accessed as $element->content_list or $element->content, depending on
whether you want a list, or an arrayref)
* what element, if any, contains it (accessed as $element->parent)
* and any SGML attributes that the element has,
such as C<lang="en-US">, C<align="center">, etc. (accessed as
$element->attr('lang'), $element->attr('center'), etc.)
So, for example, when HTML::TreeBuilder builds the tree for the above
HTML document source, the object for the "body" element has these pieces of
data:
* element name: "body"
* nodes it contains:
the string "I've got "
the object for the "em" element
the string "!"
* its parent:
the object for the "html" element
* bgcolor: "#d010ff"
Now, once you have this tree of objects, almost anything you'd want to
do with it starts with searching the tree for some bit of information
in some element.
Accessing a piece of information in, say, a hash of hashes of hashes,
is straightforward:
$password{'sean'}{'sburke1'}{'hpux'}
because you know that all data points in that structure are accessible
with that syntax, but with just different keys. Now, the "em" element
in the above HTML tree does happen to be accessible
as the root's child #1's child #1:
$root->content->[1]->content->[1]
But with trees, you typically don't know the exact location (via
indexes) of the data you're looking for. Instead, finding what you want
will typically involve searching through the tree, seeing if every node is
the kind you want. Searching the whole tree is simple enough -- look at
a given node, and if it's not what you want, look at its children, and
so on. HTML-Tree provides several methods that do this for you, such as
C<find_by_tag_name>, which returns the elements (or the first element, if
called in scalar context) under a given node (typically the root) whose
tag name is whatever you specify.
For example, that "em" node can be found as:
my $that_em = $root->find_by_tag_name('em');
or as:
@ems = $root->find_by_tag_name('em');
# will only have one element for this particular tree
Now, given an HTML document of whatever structure and complexity, if you
wanted to do something like change every
=over
E<lt>emE<gt>I<stuff>E<lt>/emE<gt>
=back
to
=over
E<lt>em class="funky"E<gt>
B<E<lt>bE<gt>[-E<lt>/bE<gt>>
I<stuff>
B<E<lt>bE<gt>-]E<lt>/bE<gt>>
E<lt>/emE<gt>
=back
the first step is to frame this operation in terms of what you're doing
to the tree. You're changing this:
em
|
...
to this:
em
/ | \
b ... b
| |
"[-" "-]"
In other words, you're finding all elements whose tag name is "em",
setting its class attribute to "funky", and adding one child to the start
of its content list -- a new "b" element
whose content is the text string "[-" -- and one to the end of its
content list -- a new "b" element whose content is the text string "-]".
Once you've got it in these terms, it's just a matter of running to the
HTML::Element documentation, and coding this up with calls to the
appropriate methods, like so:
use HTML::Element 1.53;
use HTML::TreeBuilder 2.96;
# Build the tree by parsing the document
my $root = HTML::TreeBuilder->new;
$root->parse_file('whatever.html'); # source file
# Now make new nodes where needed
foreach my $em ($root->find_by_tag_name('em')) {
$em->attr('class', 'funky'); # Set that attribute
# Make the two new B nodes
my $new1 = HTML::Element->new('b');
my $new2 = HTML::Element->new('b');
# Give them content (they have none at first)
$new1->push_content('[-');
$new2->push_content('-]');
# And put 'em in place!
$em->unshift_content($new1);
$em->push_content($new2);
}
print
"<!-- Looky see what I did! -->\n",
$root->as_HTML(), "\n";
The class HTML::Element provides just about every method I can image you
needing, for manipulating trees made of HTML::Element objects. (And
what it doesn't directly provide, it will give you the components to build
it with.)
=head2 Building Your Own Trees
Theoretically, any tree is pretty much like any other tree, so you could
use HTML::Element for anything you'd ever want to do with tree-arranged
objects. However, as its name implies, HTML::Element is basically
I<for> HTML elements; it has lots of features that make sense only for
HTML elements (like the idea that every element must have a tag-name).
And it lacks some features that might be useful for general applications
-- such as any sort of checking to make sure that you're not trying to
arrange objects in a non-treelike way. For a general-purpose tree class
that does have such features, you can use Tree::DAG_Node, also available
from CPAN.
However, if your task is simple enough, you might find it overkill to
bother using Tree::DAG_Node. And, in any case, I find that the best
way to learn how something works is to implement it (or something like
it, but simpler) yourself. So I'll here discuss how you'd implement a tree
structure, I<without> using any of the existing classes for tree nodes.
=head2 Implementation: Game Trees for Alak
Suppose that the task at hand is to write a program that can play
against a human opponent at a strategic board game (as opposed to a
board game where there's an element of chance). For most such games, a
"game tree" is an essential part of the program (as I will argue,
below), and this will be our test case for implementing a tree
structure from stratch.
For sake of simplicity, our game is not chess or backgammon, but instead
a much simpler game called Alak. Alak was invented by the mathematician
A. K. Dewdney, and described in his 1984 book I<Planiverse>. The rules
of Alak are simple:
=over
Footnote: Actually, I'm describing only my
interpetation of the rules Dewdney describes in I<Planiverse>. Many
other interpretations are possible.
=back
* Alak is a two-player game played on a one-dimensional board with
eleven slots on it. Each slot can hold at most one piece at a time.
There's two kinds of pieces, which I represent here as "x" and "o" --
x's belong to one player (called X), o's to the other (called O).
* The initial configuration of the board is:
xxxx___oooo
For sake of the article, the slots are numbered from 1 (on the left) to
11 (on the right), and X always has the first move.
* The players take turns moving. At each turn, each player can move
only one piece, once. (This unlike checkers, where you move one piece
per move but get to keep moving it if you jump an your opponent's
piece.) A player cannot pass up on his turn. A player can move any one
of his pieces to the next unoccupied slot to its right or left, which
may involve jumping over occupied slots. A player cannot move a piece
off the side of the board.
* If a move creates a pattern where the opponent's pieces are
surrounded, on both sides, by two pieces of the mover's color (with no
intervening unoccupied blank slot), then those surrounded pieces are
removed from the board.
* The goal of the game is to remove all of your opponent's pieces, at
which point the game ends. Removing all-but-one ends the game as
well, since the opponent can't surround you with one piece, and so will
always lose within a few moves anyway.
Consider, then, this rather short game where X starts:
xxxx___oooo
^ Move 1: X moves from 3 (shown with caret) to 5
(Note that any of X's pieces could move, but
that the only place they could move to is 5.)
xx_xx__oooo
^ Move 2: O moves from 9 to 7.
xx_xx_oo_oo
^ Move 3: X moves from 4 to 6.
xx__xxoo_oo
^ Move 4: O (stupidly) moves from 10 to 9.
xx__xxooo_o
^ Move 5: X moves from 5 to 10, making the board
"xx___xoooxo". The three o's that X just
surrounded are removed.
xx___x___xo
O has only one piece, so has lost.
Now, move 4 could have gone quite the other way:
xx__xxoo_oo
Move 4: O moves from 8 to 4, making the board
"xx_oxxo__oo". The surrounded x's are removed.
xx_o__o__oo
^ Move 5: X moves from 1 to 2.
_xxo__o__oo
^ Move 6: O moves from 7 to 6.
_xxo_o___oo
^ Move 7: X moves from 2 to 5, removing the o at 4.
__x_xo___oo
...and so on.
To teach a computer program to play Alak (as player X, say), it needs to
be able to look at the configuration of the board, figure out what moves
it can make, and weigh the benefit or costs, immediate or eventual, of
those moves.
So consider the board from just before move 3, and figure all the possible
moves X could make. X has pieces in slots 1, 2, 4, and 5. The leftmost
two x's (at 1 and 2) are up against the end of the board, so they
can move only right. The other two x's (at 4 and 5) can move either
right or left:
Starting board: xx_xx_oo_oo
moving 1 to 3 gives _xxxx_oo_oo
moving 2 to 3 gives x_xxx_oo_oo
moving 4 to 3 gives xxx_x_oo_oo
moving 5 to 3 gives xxxx__oo_oo
moving 4 to 6 gives xx__xxoo_oo
moving 5 to 6 gives xx_x_xoo_oo
For the computer to decide which of these is the best move to make, it
needs to quantify the benefit of these moves as a number -- call that
the "payoff". The payoff of a move can be figured as just the number
of x pieces removed by the most recent move, minus the nubmer of o
pieces removed by the most recent move. (It so happens that the rules
of the game mean that no move can delete both o's and x's, but the
formula still applies.) Since none of these moves removed any pieces,
all these moves have the same immediate payoff: 0.
Now, we could race ahead and write an Alak-playing program that could
use the immediate payoff to decide which is the best move to make.
And when there's more than one best move (as here, where all the moves
are equally good), it could choose randomly between the good
alternatives. This strategy is simple to implement; but it makes for a
very dumb program. Consider what O's response to each of the potential
moves (above) could be. Nothing immediately suggests itself for the
first four possibilities (X having moved something to position 3), but
either of the last two (illustrated below) are pretty perilous,
because in either case O has the obvious option (which he would be
foolish to pass up) of removing x's from the board:
xx_xx_oo_oo
^ X moves 4 to 6.
xx__xxoo_oo
^ O moves 8 to 4, giving "xx_oxxo__oo". The two
surrounded x's are removed.
xx_o__o__oo
or
xx_xx_oo_oo
^ X moves 5 to 6.
xx_x_xoo_oo
^ O moves 8 to 5, giving "xx_xoxo__oo". The one
surrounded x is removed.
xx_xo_o__oo
Both contingencies are quite bad for X -- but this is not captured
by the fact that they start out with X thinking his move will be
harmless, having a payoff of zero.
So what's needed is for X to think I<more> than one step ahead -- to
consider not merely what it can do in this move, and what the payoff
is, but to consider what O might do in response, and the
payoff of those potential moves, and so on with X's possible responses
to those cases could be. All these possibilities form a game tree -- a
tree where each node is a board, and its children are successors of
that node -- i.e., the boards that could result from every move
possible, given the parent's board.
But how to represent the tree, and how to represent the nodes?
Well, consider that a node holds several pieces of data:
1) the configuration of the board, which, being nice and simple and
one-dimensional, can be stored as just a string, like "xx_xx_oo_oo".
2) whose turn it is, X or O. (Or: who moved last, from which we can
figure whose turn it is).
3) the successors (child nodes).
4) the immediate payoff of having moved to this board position from its
predecessor (parent node).
5) and what move gets us from our predecessor node to here. (Granted,
knowing the board configuration before and after the move, it's easy to
figure out the move; but it's easier still to store it as one is
figuring out a node's successors.)
6) whatever else we might want to add later.
These could be stored equally well in an array or in a hash, but it's my
experience that hashes are best for cases where you have more than just
two or three bits of data, or especially when you might need to add new
bits of data. Moreover, hash key names are mnemonic --
$node->{'last_move_payoff'} is plain as day, whereas it's not so easy having to
remember with an array that $node->[3] is where you decided to keep the
payoff.
=over
Footnote:
Of course, there are ways around that problem: just swear you'll never
use a real numeric index to access data in the array, and instead use
constants with mnemonic names:
use strict;
use constant idx_PAYOFF => 3;
...
$n->[idx_PAYOFF]
Or use a pseudohash. But I prefer to keep it simple, and use a hash.
These are, incidentally, the same arguments that
people weigh when trying to decide whether their object-oriented
modules should be based on blessed hashes, blessed arrays, or what.
Essentially the only difference here is that we're not blessing our
nodes or talking in terms of classes and methods.
[end footnote]
=back
So, we might as well represent nodes like so:
$node = { # hashref
'board' => ...board string, e.g., "xx_x_xoo_oo"
'last_move_payoff' => ...payoff of the move
that got us here.
'last_move_from' => ...the start...
'last_move_to' => ...and end point of the move
that got us here. E.g., 5 and 6,
representing a move from 5 to 6.
'whose_turn' => ...whose move it then becomes.
just an 'x' or 'o'.
'successors' => ...the successors
};
Note that we could have a field called something like 'last_move_who' to
denote who last moved, but since turns in Alak always alternate (and
no-one can pass), storing whose move it is now I<and> who last moved is
redundant -- if X last moved, it's O turn now, and vice versa.
I chose to have a 'whose_turn' field instead of a 'last_move_who', but
it doesn't really matter. Either way, we'll end up inferring one from
the other at several points in the program.
When we want to store the successors of a node, should we use an array
or a hash? On the one hand, the successors to $node aren't essentially
ordered, so there's no reason to use an array per se; on the other hand,
if we used a hash, with successor nodes as values, we don't have
anything particularly meaningful to use as keys. (And we can't use the
successors themselves as keys, since the nodes are referred to by
hash references, and you can't use a reference as a hash key.) Given no
particularly compelling reason to do otherwise, I choose to just use an
array to store all a node's successors, although the order is never
actually used for anything:
$node = {
...
'successors' => [ ...nodes... ],
...
};
In any case, now that we've settled on what should be in a node,
let's make a little sample tree out of a few nodes and see what we can
do with it:
# Board just before move 3 in above game
my $n0 = {
'board' => 'xx_xx_oo_oo',
'last_move_payoff' => 0,
'last_move_from' => 9,
'last_move_to' => 7,
'whose_turn' => 'x',
'successors' => [],
};
# And, for now, just two of the successors:
# X moves 4 to 6, giving xx__xxoo_oo
my $n1 = {
'board' => 'xx__xxoo_oo',
'last_move_payoff' => 0,
'last_move_from' => 4,
'last_move_to' => 6,
'whose_turn' => 'o',
'successors' => [],
};
# or X moves 5 to 6, giving xx_x_xoo_oo
my $n2 = {
'board' => 'xx_x_xoo_oo',
'last_move_payoff' => 0,
'last_move_from' => 5,
'last_move_to' => 6,
'whose_turn' => 'o',
'successors' => [],
};
# Now connect them...
push @{$n0->{'successors'}}, $n1, $n2;
=head2 Digression: Links to Parents
In comparing what we store in an Alak game tree node to what
HTML::Element stores in HTML element nodes, you'll note one big
difference: every HTML::Element node contains a link to its parent,
whereas we don't have our Alak nodes keeping a link to theirs.
The reason this can be an important difference is because it can affect
how Perl knows when you're not using pieces of memory anymore.
Consider the tree we just built, above:
node 0
/ \
node 1 node 2
There's two ways Perl knows you're using a piece of memory:
1) it's memory that belongs directly to a variable (i.e., is necessary
to hold that variable's value, or valueI<s> in the case of a hash or
array), or 2) it's a piece of memory that something holds a reference
to. In the above code, Perl knows that the hash for node 0 (for board
"xx_xx_oo_oo") is in use because something (namely, the variable
C<$n0>) holds a reference to it. Now, even if you followed the above
code with this:
$n1 = $n2 = 'whatever';
to make your variables C<$n1> and C<$n2> stop holding references to
the hashes for the two successors of node 0, Perl would still know that
those hashes are still in use, because node 0's successors array holds
a reference to those hashes. And Perl knows that node 0 is still in
use because something still holds a reference to it. Now, if you
added:
my $root = $n0;
This would change nothing -- there's just be I<two> things holding a
reference to the node 0 hash, which in turn holds a reference to the
node 1 and node 2 hashes. And if you then added:
$n0 = 'stuff';
still nothing would change, because something (C<$root>) still holds a
reference to the node 0 hash. But once I<nothing> holds a reference to
the node 0 hash, Perl will know it can destroy that hash (and reclaim
the memory for later use, say), and once it does that, nothing will hold
a reference to the node 1 or the node 2 hashes, and those will be
destroyed too.
But consider if the node 1 and node 2 hashes each had an attribute
"parent" (or "predecessor") that held a reference to node 0. If your
program stopped holding a reference to the node 0 hash, Perl could
I<not> then say that I<nothing> holds a reference to node 0 -- because
node 1 and node 2 still do. So, the memory for nodes 0, 1, and 2 would
never get reclaimed (until your program ended, at which point Perl
destroys I<everything>). If your program grew and discarded lots of
nodes in the game tree, but didn't let Perl know it could reclaim their
memory, your program could grow to use immense amounts of memory --
never a nice thing to have happen. There's three ways around this:
1) When you're finished with a node, delete the reference each of its
children have to it (in this case, deleting $n1->{'parent'}, say).
When you're finished with a whole tree, just go through the whole tree
erasing links that children have to their children.
2) Reconsider whether you really need to have each node hold a reference
to its parent. Just not having those links will avoid the whole
problem.
3) use the WeakRef module with Perl 5.6 or later. This allows you to
"weaken" some references (like the references that node 1 and 2 could
hold to their parent) so that they don't count when Perl goes asking
whether anything holds a reference to a given piece of memory. This
wonderful new module eliminates the headaches that can often crop up
with either of the two previous methods.
It so happens that our Alak program is simple enough that we don't need
for our nodes to have links to their parents, so the second solution is
fine. But in a more advanced program, the first or third solutions
might be unavoidable.
=head2 Recursively Printing the Tree
I don't like working blind -- if I have any kind of a complex data
structure in memory for a program I'm working on, the first thing I do
is write something that can dump that structure to the screen so I can
make sure that what I I<think> is in memory really I<is> what's in
memory. Now, I could just use the "x" pretty-printer command in Perl's
interactive debugger, or I could have the program use the
C<Data::Dumper> module. But in this case, I think the output from those
is rather too verbose. Once we have trees with dozens of nodes in them,
we'll really want a dump of the tree to be as concise as possible,
hopefully just one line per node. What I'd like is something that can
print C<$n0> and its successors (see above) as something like:
xx_xx_oo_oo (O moved 9 to 7, 0 payoff)
xx__xxoo_oo (X moved 4 to 6, 0 payoff)
xx_x_xoo_oo (X moved 5 to 6, 0 payoff)
A subroutine to print a line for a given node, and then do that again for
each successor, would look something like:
sub dump_tree {
my $n = $_[0]; # "n" is for node
print
...something expressing $n'n content...
foreach my $s (@{$n->{'successors'}}) {
# "s for successor
dump($s);
}
}
And we could just start that out with a call to C<dump_tree($n0)>.
Since this routine...
=over
Footnote:
I first wrote this routine starting out with "sub dump {". But when
I tried actually calling C<dump($n0)>, Perl would dump core! Imagine
my shock when I discovered that this is absolutely to be expected --
Perl provides a built-in function called C<dump>, the purpose of which
is to, yes, make Perl dump core. Calling our routine "dump_tree"
instead of "dump" neatly avoids that problem.
=back
...does its work (dumping the subtree at and under the
given node) by calling itself, it's B<recursive>. However, there's a
special term for this kind of recursion across a tree: traversal. To
B<traverse> a tree means to do something to a node, and to traverse its
children. There's two prototypical ways to do this, depending on what
happens when:
traversing X in pre-order:
* do something to X
* then traverse X's children
traversing X in post-order:
* traverse X's children
* then do something to X
Dumping the tree to the screen the way we want it happens to be a matter
of pre-order traversal, since the thing we do (print a description of
the node) happens before we recurse into the successors.
When we try writing the C<print> statement for our above C<dump_tree>,
we can get something like:
sub dump_tree {
my $n = $_[0];
# "xx_xx_oo_oo (O moved 9 to 7, 0 payoff)"
print
$n->{'board'}, " (",
($n->{'whose_turn'} eq 'o' ? 'X' : 'O'),
# Infer who last moved from whose turn it is now.
" moved ", $n->{'last_move_from'},
" to ", $n->{'last_move_to'},
", ", $n->{'last_move_payoff'},
" payoff)\n",
;
foreach my $s (@{$n->{'successors'}}) {
dump_tree($s);
}
}
If we run this on $n0 from above, we get this:
xx_xx_oo_oo (O moved 9 to 7, 0 payoff)
xx__xxoo_oo (X moved 4 to 6, 0 payoff)
xx_x_xoo_oo (X moved 5 to 6, 0 payoff)
Each line on its own is fine, but we forget to allow for indenting, and
without that we can't tell what's a child of what. (Imagine if the
first successor had successors of its own -- you wouldn't be able to
tell if it were a child, or a sibling.) To get indenting, we'll need
to have the instances of the C<dump_tree> routine know how far down in
the tree they're being called, by passing a depth parameter between
them:
sub dump_tree {
my $n = $_[0];
my $depth = $_[1];
$depth = 0 unless defined $depth;
print
" " x $depth,
...stuff...
foreach my $s (@{$n->{'successors'}}) {
dump_tree($s, $depth + 1);
}
}
When we call C<dump_tree($n0)>, C<$depth> (from C<$_[1]>) is undefined, so
gets set to 0, which translates into an indenting of no spaces. But when
C<dump_tree> invokes itself on C<$n0>'s children, those instances see
C<$depth> + 1 as their C<$_[1]>, giving appropriate indenting.
=over
Footnote:
Passing values around between different invocations of a recursive
routine, as shown, is a decent way to share the data. Another way
to share the data is by keeping it in a global variable, like C<$Depth>,
initially set to 0. Each time C<dump_tree> is about to recurse, it must
C<++$Depth>, and when it's back, it must C<--$Depth>.
Or, if the reader is familiar with closures, consider this approach:
sub dump_tree {
# A wrapper around calls to a recursive closure:
my $start_node = $_[0];
my $depth = 0;
# to be shared across calls to $recursor.
my $recursor;
$recursor = sub {
my $n = $_[0];
print " " x $depth,
...stuff...
++$depth;
foreach my $s (@{$n->{'successors'}}) {
$recursor->($s);
}
--$depth;
}
$recursor->($start_node); # start recursing
undef $recursor;
}
The reader with an advanced understanding of Perl's reference-count-based
garbage collection is invited to consider why it is currently necessary
to undef $recursor (or otherwise change its value) after all recursion
is done.
The reader whose mind is perverse in other ways is invited to consider
how (or when!) passing a depth parameter around is unnecessary because
of information that Perl's C<caller(N)> function reports!
[end footnote]
=back
=head2 Growing the Tree
Our C<dump_tree> routine works fine for the sample tree we've got, so
now we should get the program working on making its own trees, starting
from a given board.
In C<Games::Alak> (the CPAN-released version of Alak that uses
essentially the same code that we're currently discussing the
tree-related parts of), there is a routine called C<figure_successors>
that, given one childless node, will figure out all its possible
successors. That is, it looks at the current board, looks at every piece
belonging to the player whose turn it is, and considers the effect of
moving each piece every possible way -- notably, it figures out the
immediate payoff, and if that move would end the game, it notes that by
setting an "endgame" entry in that node's hash. (That way, we know that
that's a node that I<can't> have successors.)
In the code for C<Games::Alak>, C<figure_successors> does all these things,
in a rather straightforward way. I won't walk you through the details
of the C<figure_successors> code I've written, since the code has
nothing much to do with trees, and is all just implementation of the Alak
rules for what can move where, with what result. Espicially interested
readers can puzzle over that part of code in the source listing in the
archive from CPAN, but others can just assume that it works as described
above.
But consider that C<figure_successors>, regardless of its inner
workings, does not grow the I<tree>; it only makes one set of successors
for one node at a time. It has to be up to a different routine to call
C<figure_successors>, and to keep applying it as needed, in order to
make a nice big tree that our game-playing program can base its
decisions on.
Now, we could do this by just starting from one node, applying
C<figure_successors> to it, then applying C<figure_successors> on all
the resulting children, and so on:
sub grow { # Just a first attempt at this!
my $n = $_[0];
figure_successors($n);
unless
@{$n->{'successors'}}
# already has successors.
or $n->{'endgame'}
# can't have successors.
}
foreach my $s (@{$n->{'successors'}}) {
grow($s); # recurse
}
}
If you have a game tree for tic-tac-toe, and you grow it without
limitation (as above), you will soon enough have a fully "solved" tree,
where every node that I<can> have successors I<does>, and all the leaves
of the tree are I<all> the possible endgames (where, in each case, the
board is filled). But a game of Alak is different from tic-tac-toe,
because it can, in theory, go on forever. For example, the following
sequence of moves is quite possible:
xxxx___oooo
xxx_x__oooo
xxx_x_o_ooo
xxxx__o_ooo (x moved back)
xxxx___oooo (o moved back)
...repeat forever...
So if you tried using our above attempt at a C<grow> routine, Perl would
happily start trying to construct an infinitely deep tree, containing
an infinite number of nodes, consuming an infinite amount of memory, and
requiring an infinite amount of time. As the old saying goes: "You
can't have everything -- where would you put it?" So we have to place
limits on how much we'll grow the tree.
There's more than one way to do this:
1. We could grow the tree until we hit some limit on the number of
nodes we'll allow in the tree.
2. We could grow the tree until we hit some limit on the amount of time
we're willing to spend.
3. Or we could grow the tree until it is fully fleshed out to a certain
depth.
Since we already know to track depth (as we did in writing C<dump_tree>),
we'll do it that way, the third way. The implementation for that third
approach is also pretty straightforward:
$Max_depth = 3;
sub grow {
my $n = $_[0];
my $depth = $_[1] || 0;
figure_successors($n)
unless
$depth >= $Max_depth
or @{$n->{'successors'}}
or $n->{'endgame'}
}
foreach my $s (@{$n->{'successors'}}) {
grow($s, $depth + 1);
}
# If we're at $Max_depth, then figure_successors
# didn't get called, so there's no successors
# to recurse under -- that's what stops recursion.
}
If we start from a single node (whether it's a node for the starting board
"xxxx___oooo", or for whatever board the computer is faced with), set
C<$Max_depth> to 4, and apply C<grow> to it, it will grow the tree to
include several hundred nodes.
=over
Footnote:
If at each move there are four pieces that can move, and they can each
move right or left, the "branching factor" of the tree is eight, giving
a tree with 1 (depth 0) + 8 (depth 1) + 8 ** 2 + 8 ** 3 + 8 ** 4 =
4681 nodes in it. But, in practice, not all pieces can move in both
directions (none of the x pieces in "xxxx___oooo" can move left, for
example), and there may be fewer than four pieces, if some were lost.
For example, there are 801 nodes in a tree of depth four starting
from "xxxx___oooo", suggesting an average branching factor of about
five (801 ** (1/4) is about 5.3), not eight.
=back
What we need to derive from that tree is the information about what
are the best moves for X. The simplest way to consider the payoff of
different successors is to just average them -- but what we average
isn't always their immediate payoffs (because that'd leave us using
only one generation of information), but the average payoff of I<their>
successors, if any. We can formalize this as:
To figure a node's average payoff:
If the node has successors:
Figure each successor's average payoff.
My average payoff is the average of theirs.
Otherwise:
My average payoff is my immediate payoff.
Since this involves recursing into the successors I<before> doing
anything with the current node, this will traverse the tree
I<in post-order>.
We could work that up as a routine of its own, and apply that to the
tree after we've applied C<grow> to it. But since we'd never
grow the tree without also figuring the average benefit, we might as well
make that figuring part of the C<grow> routine itself:
$Max_depth = 3;
sub grow {
my $n = $_[0];
my $depth = $_[1] || 0;
figure_successors($n);
unless
$depth >= $Max_depth
or @{$n->{'successors'}}
or $n->{'endgame'}
}
if(@{$n->{'successors'}}) {
my $a_payoff_sum = 0;
foreach my $s (@{$n->{'successors'}}) {
grow($s, $depth + 1); # RECURSE
$a_payoff_sum += $s->{'average_payoff'};
}
$n->{'average_payoff'}
= $a_payoff_sum / @{$n->{'successors'}};
} else {
$n->{'average_payoff'}
= $n->{'last_move_payoff'};
}
}
So, by time C<grow> has applied to a node (wherever in the tree it is),
it will have figured successors if possible (which, in turn, sets
C<last_move_payoff> for each node it creates), and will have set
C<average_benefit>.
Beyond this, all that's needed is to start the board out with a root
note of "xxxx___oooo", and have the computer (X) take turns with the
user (O) until someone wins. Whenever it's O's turn, C<Games::Alak>
presents a prompt to the user, letting him know the state of the current
board, and asking what move he selects. When it's X's turn, the
computer grows the game tree as necessary (using just the C<grow>
routine from above), then selects the move with the highest average
payoff (or one of the highest, in case of a tie).
In either case, "selecting" a move means just setting that move's node
as the new root of the program's game tree. Its sibling nodes and their
descendants (the boards that I<didn't> get selected) and its parent node
will be erased from memory, since they will no longer be in use (as Perl
can tell by the fact that nothing holds references to them anymore).
The interface code in C<Games::Alak> (the code that prompts the user for
his move) actually supports quite a few options besides just moving --
including dumping the game tree to a specified depth (using a slightly
fancier version of C<dump_tree>, above), resetting the game, changing
C<$Max_depth> in the middle of the game, and quitting the game. Like
C<figure_successors>, it's a bit too long to print here, but interested
users are welcome to peruse (and freely modify) the code, as well as to
enjoy just playing the game.
Now, in practice, there's more to game trees than this: for games with a
larger branching factor than Alak has (which is most!), game trees of
depth four or larger would contain too many nodes to be manageable, most
of those nodes being strategically quite uninteresting for either
player; dealing with game trees specifically is therefore a matter of
recognizing uninteresting contingencies and not bothering to grow the
tree under them.
=over
Footnote:
For example, to choose a straightforward case: if O has a choice between
moves that put him in immediate danger of X winning and moves that
don't, then O won't ever choose the dangerous moves (and if he does, the
computer will know enough to end the game), so there's no point in
growing the tree any further beneath those nodes.
=back
But this sample implementation should illustrate the basics of
how to build and manipulate a simple tree structure in memory.
And once you've understood the basics of tree storage here, you should
be ready to better understand the complexities and peculiarities of
other systems for creating, accessing, and changing trees, including
Tree::DAG_Node, HTML::Element, XML::DOM, or related formalisms
like XPath and XSL.
B<[end body of article]>
=head2 [Author Credit]
Sean M. Burke (C<sburke@cpan.org>) is a tree-dwelling hominid.
=head2 References
Dewdney, A[lexander] K[eewatin]. 1984. I<Planiverse: Computer Contact
with a Two-Dimensional World.> Poseidon Press, New York.
Knuth, Donald Ervin. 1997. I<Art of Computer Programming, Volume 1,
Third Edition: Fundamental Algorithms>. Addison-Wesley, Reading, MA.
Wirth, Niklaus. 1976. I<Algorithms + Data Structures = Programs>
Prentice-Hall, Englewood Cliffs, NJ.
Worth, Stan and Allman Sheldon. Circa 1967. I<George of the Jungle>
theme. [music by Jay Ward.]
Wirth's classic, currently and lamentably out of print, has a good
section on trees. I find it clearer than Knuth's (if not quite as
encyclopedic), probably because Wirth's example code is in a
block-structured high-level language (basically Pascal), instead
of in assembler (MIX). I believe the book was re-issued in the
1980s under the titles I<Algorithms and Data Structures> and, in a
German edition, I<Algorithmen und Datenstrukturen>. Cheap copies
of these editions should be available through used book services
such as C<abebooks.com>.
Worth's classic, however, is available on the
soundtrack to the 1997 I<George of the Jungle> movie, as
performed by The Presidents of the United States of America.
=head1 BACK
Return to the L<HTML::Tree|HTML::Tree> docs.
=cut
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/HTML/Tree/AboutTrees.pod | Perl | mit | 49,595 |
#!/usr/bin/perl -w
use strict;
# tiling to gap
my $skipWrongSupercontig = 1;
my $exportTelomers = 0;
my $minLength = 20;
my $file = $ARGV[0];
if ( ! $ARGV[0] ) { die "NO ARGUMENT PASSED. PLEASE SPECIFY TILING FILE" };
if ( ! -f $file ) { die "TILING FILE $file DOESNT EXISTS" };
if ( ! $file =~ /\.coords/ ) { die "$file ISNT A TILING FILE" };
my $filename = $file;
$filename =~ s/\.coords/\.gap/;
#print "$0 $file > $filename\n";
my %hash;
my %sizes;
open FILE, "<$file" or die "COULD NOT OPEN FILE $file";
while (my $line = <FILE>)
{
chomp $line;
$line =~ s/^\s+//;
#/home/saulo/Desktop/Genome/progs/MUMmer3.22/seqs/c_neo_r265.fasta /home/saulo/Desktop/Genome/progs/MUMmer3.22/seqs/CBS7750_supercontigs.fasta
#NUCMER
#
# [S1] [E1] | [S2] [E2] | [LEN 1] [LEN 2] | [% IDY] | [LEN R] [LEN Q] | [COV R] [COV Q] | [TAGS]
#===============================================================================================================================
# 1 18865 | 1 18865 | 18865 18865 | 100.00 | 1510064 1447040 | 1.25 1.30 | supercontig_1.01_of_Cryptococcus_neoformans_Serotype_B_R265 supercontig_1.01_cryptococcus_gattii_CBS7750
if ( $line =~ /^(\d+)\s+(\d+)\s+\|\s+(\d+)\s+(\d+)\s+\|\s+(\d+)\s+(\d+)\s+\|\s+(\S+)\s+\|\s+(\d+)\s+(\d+)\s+\|\s+(\S+)\s+(\S+)\s+\|\s+(\S+)\s+(\S+)/ )
# s1 e1 | s2 e2 | len1 len2 | id% | lenr lenq | covr covq | refname queryname
# 1 2 3 4 5 6 7 8 9 10 11 12 13
{
my $refStart = $1;
my $refEnd = $2;
my $qryStart = $3;
my $qryEnd = $4;
my $refLen = $5;
my $qryLen = $6;
my $ident = $7;
my $refTLen = $8;
my $qryTLen = $9;
my $refCov = $10;
my $qryCov = $11;
my $refName = $12;
my $qryName = $13;
#print $line, "\n", $refStart, "\t", $refEnd, "\t", $qryStart, "\t", $qryEnd, "\n\n";
$sizes{$refName} = $refTLen;
$sizes{$qryName} = $qryTLen;
if ( $skipWrongSupercontig )
{
my $refContig;
my $qryContig;
if ($refName =~ /supercontig_1\.(\d+)/) { $refContig = $1; };
if ($qryName =~ /supercontig_1\.(\d+)/) { $qryContig = $1; };
if ((defined $refContig) && (defined $qryContig) && ($refContig != $qryContig))
{
#print "SKIPPING PAIR $refContig $qryContig\n";
next;
}
else
{
#print "$refContig $qryContig ALLOWED\n";
}
}
my $id = 0;
if ((exists $hash{$refName}) && (exists ${$hash{$refName}}{$qryName}))
{
$id = @{$hash{$refName}{$qryName}};
}
$hash{$refName}{$qryName}[$id][0] = $refStart;
$hash{$refName}{$qryName}[$id][1] = $refEnd;
$hash{$refName}{$qryName}[$id][2] = $qryStart;
$hash{$refName}{$qryName}[$id][3] = $qryEnd;
#print "REFNAME $refName QUERYNAME $qryName REFSTART $refStart REFEND $refEnd QRYSTART $qryStart QRYEND $qryEnd REFTLEN $refTLen QRYTLEN $qryTLen\n";
} else {
#print "ELSE :: $line\n";
}
}
close FILE;
my %gaps;
my %inserts;
foreach my $refName (sort keys %hash)
{
my $refSize = $sizes{$refName};
foreach my $qryName (sort keys %{$hash{$refName}})
{
my $qrySize = $sizes{$qryName};
my $positions = $hash{$refName}{$qryName};
my $totalPositions = scalar @$positions;
my $refLastStart = 0;
my $refLastEnd = 0;
my $qryLastStart = 0;
my $qryLastEnd = 0;
for (my $p = 0; $p < $totalPositions; $p++)
{
my $id = $positions->[$p];
my $refStart = $id->[0];
my $refEnd = $id->[1];
my $qryStart = $id->[2];
my $qryEnd = $id->[3];
# [S1] [E1] | [S2] [E2] | [LEN 1] [LEN 2] | [% IDY] | [LEN R] [LEN Q] | [COV R] [COV Q] | [TAGS]
#===============================================================================================================================
# 1 18865 | 1 18865 | 18865 18865 | 100.00 | 1447040 1510064 | 1.30 1.25 | supercontig_1.01_cryptococcus_gattii_CBS7750 supercontig_1.01_of_Cryptococcus_neoformans_Serotype_B_R265
# 18866 20898 | 18966 20998 | 2033 2033 | 100.00 | 1447040 1510064 | 0.14 0.13 | supercontig_1.01_cryptococcus_gattii_CBS7750 supercontig_1.01_of_Cryptococcus_neoformans_Serotype_B_R265
# 20899 23891 | 21114 24106 | 2993 2993 | 100.00 | 1447040 1510064 | 0.21 0.20 | supercontig_1.01_cryptococcus_gattii_CBS7750 supercontig_1.01_of_Cryptococcus_neoformans_Serotype_B_R265
#REFERENCE GAPS
#GAP
#print "P #$p REFSTART $refStart REFEND $refEnd QRYSTART $qryStart QRYEND $qryEnd\n";
if ($refStart > ($refLastEnd+1))
{
my $gapSize = ($refStart-1) - ($refLastEnd+1);
if ((defined $minLength) && ($gapSize > $minLength))
{
push(@{$gaps{$refName}{$qryName}}, [$refLastEnd+1, $refStart-1]);
#printf "REF GAP :: INS :: REFNAME %s QRYNAME %s LASTEND %07d START %07d END %07d GAPSIZE %07d\n", $refName, $qryName, $refLastEnd, $refStart, $refEnd, $gapSize;
}
if (($p == ($totalPositions-1)) && (($refSize-1) > $refEnd) && $exportTelomers)
{
my $gapSize = ($refSize-1) - ($refLastEnd+1);
push(@{$gaps{$refName}{$qryName}}, [$refLastEnd+1, $refSize-1]);
#printf "REF GAP :: INS [LAST] :: REFNAME %s QRYNAME %s END %07d REFSIZE %07d LASTEND %07d GAPSIZE %07d\n", $refName, $qryName, $refEnd, $refSize, $refLastEnd, $gapSize;
}
}
#QUERY GAPS
#INS
if ($qryStart > ($qryLastEnd+1))
{
my $gapSize = ($qryStart-1) - ($qryLastEnd+1);
if ((defined $minLength) && ($gapSize > $minLength))
{
push(@{$inserts{$refName}{$qryName}}, [$qryLastEnd+1, $qryStart-1]);
#printf "QRY GAP :: GAP :: REFNAME %s QRYNAME %s LASTEND %07d START %07d END %07d GAPSIZE %07d\n", $refName, $qryName, $qryLastEnd, $qryStart, $qryEnd, $gapSize;
}
if (($p == ($totalPositions-1)) && (($qrySize-1) > $qryEnd) && $exportTelomers)
{
my $gapSize = ($qrySize-1) - ($qryEnd+1);
push(@{$inserts{$refName}{$qryName}}, [$qryEnd+1, $refSize-1]);
#printf "QRY GAP :: GAP [LAST] :: REFNAME %s QRYNAME %s END %07d QRYSIZE %07d LASTEND %07d GAPSIZE %07d\n", $refName, $qryName, $qryEnd, $qrySize, $qryLastEnd, $gapSize;
}
}
$refLastStart = $refStart;
$refLastEnd = $refEnd;
$qryLastStart = $qryStart;
$qryLastEnd = $qryEnd;
}
}
}
#print "#"x20, "\nQUERY\n", "#"x20, "\n";
foreach my $refName (sort keys %gaps)
{
my $qryNames = $gaps{$refName};
foreach my $qryName (sort keys %$qryNames)
{
foreach my $pos (@{$qryNames->{$qryName}})
{
my $start = $pos->[0];
my $end = $pos->[1];
my $length = $end - $start;
# supercontig_1.16_of_Cryptococcus_neoformans_Serotype_B_R265 560201 561028
# printf "REFNAME %s QRYNAME %s START %07d END %07d LENGTH %07d\n", $refName, $qryName, $start, $end, $length;
printf "GAP\t%s\t%s\t%07d\t%07d\t%07d\n",$refName,$qryName,$start,$end, ($end-$start);
}
}
}
#print "#"x20, "\nINSERT\n", "#"x20, "\n";
foreach my $refName (sort keys %inserts)
{
my $qryNames = $inserts{$refName};
foreach my $qryName (sort keys %$qryNames)
{
foreach my $pos (@{$qryNames->{$qryName}})
{
my $start = $pos->[0];
my $end = $pos->[1];
my $length = $end - $start;
# supercontig_1.16_of_Cryptococcus_neoformans_Serotype_B_R265 560201 561028
# printf "REFNAME %s QRYNAME %s START %07d END %07d LENGTH %07d\n", $refName, $qryName, $start, $end, $length;
printf "INS\t%s\t%s\t%07d\t%07d\t%07d\n",$qryName,$refName,$start,$end, ($end-$start);
}
}
}
1;
| sauloal/perlscripts | Bio/progs_parsers/mummer/coords_to_gap.pl | Perl | mit | 7,686 |
%%%%%%%%%%%%%%% Last Week Task %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
/* Yet To-Do:
1-fix the rplaced variables [X] to be X
2-Dealing with morphological differences cat> , cat>s should be considered shared item.
/*
[NOTE]: We no longer turning trees into lists using the
=.. operator because now we now that a tree is a list of a head and a
tail,the head is in the form (Word:Tag). The tail is a list of
daughters which are other sub-trees of the form {Role,List}.
*/
%%%%%%%%%%%%%%% Transformation (1)%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%--------------------------------------------------------------------
%Step 1: Get open-class words for each tree separatly.
%--------------------------------------------------------------------
getOpenClass(T, OPEN) :-
getOpenClass(T, [], OPEN).
getOpenClass(X, OPEN, OPEN) :-
(var(X); atomic(X)),
!.
getOpenClass([H | T], OPEN0, OPEN2) :-
!,
getOpenClass(H, OPEN0, OPEN1),
getOpenClass(T, OPEN1, OPEN2).
%% I added the TAG (name) to the list of tags as it represents proper noun
%% The TAG could be a veriable because not all words has a certain proper tag (for now), thus, having a variable as a tag is going to be ignored.
getOpenClass((WORD:TAG), OPEN0, OPEN1) :-
((var(TAG); \+ member(TAG, [noun, verb, name])) ->
OPEN1 = OPEN0;
member(WORD:TAG, OPEN0) ->
OPEN1 = OPEN0;
OPEN1 = [(WORD:TAG) | OPEN0]).
getOpenClass({_ROLE, L}, OPEN0, OPEN1) :-
!,
getOpenClass(L, OPEN0, OPEN1).
%-------------------------------------------------------------------------
%Step 2: For two lists of open-class words, find & return the shared ones
%-------------------------------------------------------------------------
getSharedOpenClassItems(X, Y, SHARED) :-
getOpenClass(X, OPENX),
getOpenClass(Y, OPENY),
shared2(OPENX, OPENY, SHARED).
shared0([], _L, []).
shared0([H | T], Y, [H | SHARED]) :-
member(H, Y),
!,
shared0(T, Y, SHARED).
shared0([_ | T], Y, SHARED) :-
shared0(T, Y, SHARED).
shared1(L0, L1, SHARED) :-
findall(X, (member(X, L0), member(X, L1)), SHARED).
shared2(L0, L1, SHARED) :-
qsort(L0, LS0),
qsort(L1, LS1),
shared2q(LS0, LS1, SHARED).
shared2q([], _, []).
shared2q(_, [], []).
shared2q([H0 | T0], [H1 | T1], SHARED2) :-
(H0 = H1 ->
(shared2q(T0, T1, SHARED1),
SHARED2 = [H0=_ | SHARED1]);
H0 @< H1 ->
shared2q(T0, [H1 | T1], SHARED2);
shared2q([H0 | T0], T1, SHARED2)).
| AllanRamsay/dgParser | openclasspatterns.pl | Perl | mit | 2,467 |
use strict;
use CXGN::Page;
my $page=CXGN::Page->new('index.html','html2pl converter');
$page->header('Maps');
print<<END_HEREDOC;
<br />
<center>
<table summary="" class="boxbgcolor2" width="100\%">
<tr>
<td width="25\%"> </td>
<td width="50\%" class="left">
<div class="boxcontent">
<div class="subheading">
<u>Tomato maps</u>
</div>
<div class="boxsubcontent">
<p><em>L. esculentum x L. pennellii</em> maps:</p>
<ul style="list-style-type:none">
<li><p><a href="/maps/tomato_arabidopsis/index.pl">Tomato - Arabidopsis synteny map</a></p></li>
<li><p><a href="/maps/pennellii_il/index.pl">Isogenic Line (IL) map</a></p></li>
</ul>
<p><em>L. pimpinellifolium</em> inbred backcross lines map:</p>
<ul style="list-style-type:none">
<li><p><a href="lpimp_ibl/index.pl">IBL map</a></p></li>
</ul>
</div>
<div class="subheading">
<u>Markers</u>
</div>
<div class="boxsubcontent">
<p><a href="/markers/cos_markers.pl">COS-markers</a></p>
<p><a href="/markers/microsats.pl">Microsatellites (SSRs)</a></p>
</div>
</div>
</td>
<td width="25\%"> </td>
</tr>
</table>
</center>
END_HEREDOC
$page->footer(); | solgenomics/sgn | cgi-bin/maps/index.pl | Perl | mit | 1,303 |
#!/usr/bin/env perl
use strict;
use warnings;
use File::Basename;
# Use current directory to find modules
use FindBin;
use lib $FindBin::Bin . "/perl/";
use lib $FindBin::Bin . '/../libs/bioinf-perl/lib';
use McCortexScripts;
use UsefulModule;
sub print_usage
{
for my $err (@_) { print STDERR "Error: $err\n"; }
print STDERR "" .
"Usage: $0 <kmer> <genome_size> <in.ctx>
Graph sequence coverage and read length from cortex header. Gives same result
with raw and cleaned graph.
Example: $0 31 3G sam.clean.ctx > sam.clean.kmercov
\n";
exit(-1);
}
if(@ARGV != 3) { print_usage(); }
my $maxk = shift(@ARGV);
my $genome_size = shift(@ARGV);
my $graph_file = shift(@ARGV);
if($maxk !~ /^\d+$/ || !($maxk & 1)) { die("Invalid maxk value: $maxk"); }
$maxk = mccortex_maxk($maxk);
$genome_size =~ s/,//g;
$genome_size = str2num($genome_size);
my $cmd = dirname(__FILE__)."/../bin/mccortex$maxk";
my $cmdline = "$cmd view -q -i $graph_file";
if(!(-e $cmd)) {
die("executable bin/mccortex$maxk doesn't exist -- did you compile for MAXK=$maxk?\n");
}
elsif(!(-x $cmd)) {
die("bin/mccortex$maxk doesn't appear to be executable\n");
}
my ($ksize, $readlen, $total_seq);
# grep these lines:
#
# kmer size: (\d+)
#
# mean input contig length:\s*([0-9,]+)
# total sequence loaded:\s*([0-9,]+)
#
my $in;
open($in, '-|', $cmdline) or die $!;
while(defined(my $line = <$in>)) {
chomp($line);
if($line =~ /kmer size:\s*(\d+)/i) {
if(defined($ksize)) { die("Duplicate kmer size line: $line"); }
$ksize = $1;
}
if($line =~ /mean input contig length:\s*([0-9,]+)/i) {
if(defined($readlen)) { die("Duplicate read length line: $line"); }
$readlen = $1;
$readlen =~ s/,//g;
}
if($line =~ /total sequence loaded:\s*([0-9,]+)/i) {
if(defined($total_seq)) { die("Duplicate total seq. loaded line: $line"); }
$total_seq = $1;
$total_seq =~ s/,//g;
}
}
# Number of reads * kmers per read
my $nreads = ($total_seq / $readlen);
my $kmers_per_read = ($readlen-$ksize+1);
my $nkmers_read = $nreads * $kmers_per_read;
my $kmercov = sprintf("%.2f", $nkmers_read / $genome_size);
print STDERR "[$0] ksize: $ksize\n";
print STDERR "[$0] total_seq: $total_seq\n";
print STDERR "[$0] readlen: $readlen\n";
print STDERR "[$0] nreads: $nreads\n";
print STDERR "[$0] kmers_per_read: $kmers_per_read\n";
print STDERR "[$0] nkmers_read: $nkmers_read\n";
print STDERR "[$0] kmercov: $kmercov\n";
print int($kmercov+0.5)."\n";
exit(0);
| mcveanlab/mccortex | scripts/mccortex-kcovg.pl | Perl | mit | 2,471 |
#!/usr/bin/perl -w
#
# flamegraph.pl flame stack grapher.
#
# This takes stack samples and renders a call graph, allowing hot functions
# and codepaths to be quickly identified. Stack samples can be generated using
# tools such as DTrace, perf, SystemTap, and Instruments.
#
# USAGE: ./flamegraph.pl [options] input.txt > graph.svg
#
# grep funcA input.txt | ./flamegraph.pl [options] > graph.svg
#
# Options are listed in the usage message (--help).
#
# The input is stack frames and sample counts formatted as single lines. Each
# frame in the stack is semicolon separated, with a space and count at the end
# of the line. These can be generated using DTrace with stackcollapse.pl,
# and other tools using the stackcollapse variants.
#
# The output graph shows relative presense of functions in stack samples. The
# ordering on the x-axis has no meaning; since the data is samples, time order
# of events is not known. The order used sorts function names alphabetically.
#
# While intended to process stack samples, this can also process stack traces.
# For example, tracing stacks for memory allocation, or resource usage. You
# can use --title to set the title to reflect the content, and --countname
# to change "samples" to "bytes" etc.
#
# HISTORY
#
# This was inspired by Neelakanth Nadgir's excellent function_call_graph.rb
# program, which visualized function entry and return trace events. As Neel
# wrote: "The output displayed is inspired by Roch's CallStackAnalyzer which
# was in turn inspired by the work on vftrace by Jan Boerhout". See:
# https://blogs.oracle.com/realneel/entry/visualizing_callstacks_via_dtrace_and
#
# Copyright 2011 Joyent, Inc. All rights reserved.
# Copyright 2011 Brendan Gregg. All rights reserved.
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at docs/cddl1.txt or
# http://opensource.org/licenses/CDDL-1.0.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at docs/cddl1.txt.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
# 15-Aug-2014 terrencehan Support parallel processes
# 17-Mar-2013 Tim Bunce Added options and more tunables.
# 15-Dec-2011 Dave Pacheco Support for frames with whitespace.
# 10-Sep-2011 Brendan Gregg Created this.
use strict;
use POSIX;
use Data::Dumper qw/Dumper/;
use Getopt::Long;
# tunables
my $fonttype = "Menlo";
my $imagewidth = 1360; # max width, pixels
my $frameheight = 16; # max height is dynamic
my $fontsize = 10; # base text size
my $fontwidth = 0.59; # avg width relative to fontsize
my $minwidth = 0.1; # min function width, pixels
my $titletext = "Flame Graph"; # centered heading
my $nametype = "Function:"; # what are the names in the data?
my $countname = "samples"; # what are the counts in the data?
my $colors = "hot"; # color theme
my $bgcolor1 = "#eeeeee"; # background color gradient start
my $bgcolor2 = "#eeeeb0"; # background color gradient stop
my $nameattrfile; # file holding function attributes
my $timemax; # (override the) sum of the counts
my $factor = 1; # factor to scale counts by
my $resolution = 100; # us
my $filter = '';
GetOptions(
'fonttype=s' => \$fonttype,
'width=i' => \$imagewidth,
'height=i' => \$frameheight,
'fontsize=f' => \$fontsize,
'fontwidth=f' => \$fontwidth,
'minwidth=f' => \$minwidth,
'title=s' => \$titletext,
'nametype=s' => \$nametype,
'countname=s' => \$countname,
'nameattr=s' => \$nameattrfile,
'total=s' => \$timemax,
'factor=f' => \$factor,
'colors=s' => \$colors,
'filter=s' => \$filter,
'res=i' => \$resolution,
) or die <<USAGE_END;
USAGE: $0 [options] infile > outfile.svg\n
--title # change title text
--width # width of image (default 1200)
--height # height of each frame (default 16)
--minwidth # omit smaller functions (default 0.1 pixels)
--fonttype # font type (default "Verdana")
--fontsize # font size (default 12)
--countname # count type label (default "samples")
--nametype # name type label (default "Function:")
--colors # "hot" or "mem" palette (default "hot")
eg,
$0 --title="Flame Graph: malloc()" trace.txt > graph.svg
USAGE_END
$imagewidth = ceil($imagewidth);
# internals
my $ypad1 = $fontsize * 4; # pad top, include title
my $ypad2 = $fontsize * 2 + 10; # pad bottom, include labels
my $xpad = 10; # pad lefm and right
my $depthmax = 0;
my %Events;
my %nameattr;
if ($nameattrfile) {
# The name-attribute file format is a function name followed by a tab then
# a sequence of tab separated name=value pairs.
open my $attrfh, $nameattrfile or die "Can't read $nameattrfile: $!\n";
while (<$attrfh>) {
chomp;
my ($funcname, $attrstr) = split /\t/, $_, 2;
die "Invalid format in $nameattrfile" unless defined $attrstr;
$nameattr{$funcname} = { map { split /=/, $_, 2 } split /\t/, $attrstr };
}
}
if ($colors eq "mem") {
$bgcolor1 = "#eeeeee";
$bgcolor2 = "#e0e0ff";
}
# SVG functions
{ package SVG;
sub new {
my $class = shift;
my $self = {};
bless ($self, $class);
return $self;
}
sub header {
my ($self, $w, $h) = @_;
$self->{svg} .= <<SVG;
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" width="$w" height="$h" onload="init(evt)" viewBox="0 0 $w $h" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink">
SVG
}
sub include {
my ($self, $content) = @_;
$self->{svg} .= $content;
}
sub colorAllocate {
my ($self, $r, $g, $b) = @_;
return "rgb($r,$g,$b)";
}
sub group_start {
my ($self, $attr) = @_;
my @g_attr = map {
exists $attr->{$_} ? sprintf(qq/$_="%s"/, $attr->{$_}) : ()
} qw(class style onmouseover onmouseout);
push @g_attr, $attr->{g_extra} if $attr->{g_extra};
$self->{svg} .= sprintf qq/<g %s>\n/, join(' ', @g_attr);
$self->{svg} .= sprintf qq/<title>%s<\/title>/, $attr->{title}
if $attr->{title}; # should be first element within g container
if ($attr->{href}) {
my @a_attr;
push @a_attr, sprintf qq/xlink:href="%s"/, $attr->{href} if $attr->{href};
# default target=_top else links will open within SVG <object>
push @a_attr, sprintf qq/target="%s"/, $attr->{target} || "_top";
push @a_attr, $attr->{a_extra} if $attr->{a_extra};
$self->{svg} .= sprintf qq/<a %s>/, join(' ', @a_attr);
}
}
sub group_end {
my ($self, $attr) = @_;
$self->{svg} .= qq/<\/a>\n/ if $attr->{href};
$self->{svg} .= qq/<\/g>\n/;
}
sub filledRectangle {
my ($self, $x1, $y1, $x2, $y2, $fill, $extra) = @_;
$x1 = sprintf "%0.1f", $x1;
$x2 = sprintf "%0.1f", $x2;
my $w = sprintf "%0.1f", $x2 - $x1;
my $h = sprintf "%0.1f", $y2 - $y1;
$extra = defined $extra ? $extra : "";
$self->{svg} .= qq/<rect x="$x1" y="$y1" width="$w" height="$h" fill="$fill" $extra \/>\n/;
}
sub stringTTF {
my ($self, $color, $font, $size, $angle, $x, $y, $str, $loc, $extra) = @_;
$loc = defined $loc ? $loc : "left";
$extra = defined $extra ? $extra : "";
$self->{svg} .= qq/<text text-anchor="$loc" x="$x" y="$y" font-size="$size" font-family="$font" fill="$color" $extra >$str<\/text>\n/;
}
sub svg {
my $self = shift;
return "$self->{svg}</svg>\n";
}
1;
}
sub color {
my $type = shift;
return "rgb(0,0,0)" unless defined $type;
if ($type eq "hot") {
my $r = 205 + int(rand(50));
my $g = 0 + int(rand(230));
my $b = 0 + int(rand(55));
return "rgb($r,$g,$b)";
}
if ($type eq "mem") {
my $r = 0 + int(rand(0));
my $g = 190 + int(rand(50));
my $b = 0 + int(rand(230));
return "rgb($r,$g,$b)";
}
if ($type eq "blocked") {
my $r = 88;
my $g = 155;
my $b = 211;
return "rgb($r,$g,$b)";
}
if ($type eq "blank") {
my $r = 255;
my $g = 255;
my $b = 255;
return "rgb($r,$g,$b)";
}
return "rgb(0,0,0)";
}
sub flow {
my ($layer, $last, $this, $v, $timestamp) = @_;
my $len_a = @$last - 1;
my $len_b = @$this - 1;
my $i = 0;
my $len_same;
for (; $i <= $len_a; $i++) {
last if $i > $len_b;
last if $last->[$i] ne $this->[$i];
}
$len_same = $i;
for ($i = $len_a; $i >= $len_same; $i--) {
my $k = "$last->[$i];$i";
# a unique ID is constructed from "func;depth;etime";
# func-depth isn't unique, it may be repeated later.
$layer->{node}->{"$k;$v"}->{stime} = delete $layer->{tmp}->{$k}->{stime};
$layer->{node}->{"$k;$v"}->{abtt} = delete $layer->{tmp}->{$k}->{abtt};
delete $layer->{tmp}->{$k};
}
for ($i = $len_same; $i <= $len_b; $i++) {
my $k = "$this->[$i];$i";
$layer->{tmp}->{$k}->{stime} = $v;
$layer->{tmp}->{$k}->{abtt} = $timestamp;
}
return $this;
}
# Parse input
my $time = 0;
my %layers = ();
my $stimestamp = 0; # start timestamp
my $etimestamp = 0; # end timestamp
#group by pid
while (<>) {
chomp;
if(/^resolution=(\d+)\s+.*$/) {
$resolution=$1;
next;
}
my ($stack, $samples, $timestamp) = (/^([^\s]+)\s*(\d+(?:\.\d*)?)?\s*(\d+)?$/);
$samples = 1 unless (defined $samples);
my @stack_items = split ";", $stack;
my $layer_key = $stack_items[0];
$layers{$layer_key}{stack} //= [];
$layers{$layer_key}{time} //= 0;
$layers{$layer_key}{stimestamp} //= $timestamp;
$layers{$layer_key}{not_blank} ||= $filter ne '' && $stack =~ /$filter/ ? 1 : 0;
push $layers{$layer_key}{stack}, $_;
$layers{$layer_key}{time} += $samples;
my $timestamp_res = $timestamp + $resolution * $samples;
$stimestamp = $stimestamp == 0 ? $timestamp : $stimestamp < $timestamp ? $stimestamp : $timestamp;
$etimestamp = $etimestamp > $timestamp_res ? $etimestamp : $timestamp_res;
$layers{$layer_key}->{stimestamp}= $layers{$layer_key}->{stimestamp} < $timestamp ? $layers{$layer_key}->{stimestamp}: $timestamp;
}
# get max time
my $max_group_time = 0;
for my $key (keys %layers) {
$max_group_time = $layers{$key}{time} > $max_group_time ? $layers{$key}{time} : $max_group_time;
}
for my $key (keys %layers) {
$time = 0;
my $last = [];
my $layer = $layers{$key};
for (@{$layers{$key}->{stack}}) {
chomp;
my ($stack, $samples, $timestamp) = /^([^\s]+)\s*(\d+(?:\.\d*)?)?\s*(\d+)?$/;
$samples = 1 unless (defined $samples);
$stack =~ tr/<>/()/;
$last = flow($layer, $last, [ '', split ";", $stack ], $time, $timestamp);
$time += $samples;
}
flow($layer, $last, [], $time, 1);
}
die "ERROR: No stack counts found\n" unless $time;
if ($timemax and $timemax < $max_group_time) {
warn "Specified --total $timemax is less than actual total $time, so ignored\n"
if $timemax/$max_group_time > 0.02; # only warn is significant (e.g., not rounding etc)
undef $timemax;
}
$timemax ||= $max_group_time;
my $widthpertime = ($imagewidth - 2 * $xpad) / $timemax;
my $minwidth_time = $minwidth / $widthpertime;
my %Node;
for my $key (keys %layers) {
my $layer = $layers{$key};
$layer->{depthmax} = 0;
# prune blocks that are too narrow and determine max depth
while (my ($id, $node) = each %{$layer->{node}}) {
my ($func, $depth, $etime) = split ";", $id;
my $stime = $node->{stime};
die "missing start for $id" if not defined $stime;
if (($etime-$stime) < $minwidth_time) {
delete $Node{$id};
next;
}
$layer->{depthmax} = $depth if $depth > $layer->{depthmax};
}
#$depthmax += ($depthmax == 0 ? 1 : 0) + $layer->{depthmax};
$depthmax += 1 + $layer->{depthmax};
}
my $time_segment = $etimestamp - $stimestamp;
# Draw canvas
my $imageheight = ($depthmax * $frameheight) + $ypad1 + $ypad2;
my $im = SVG->new();
$im->header($imagewidth + 300, $imageheight);
my $inc = <<INC;
<defs >
<linearGradient id="background" y1="0" y2="1" x1="0" x2="0" >
<stop stop-color="$bgcolor1" offset="5%" />
<stop stop-color="$bgcolor2" offset="95%" />
</linearGradient>
</defs>
<style type="text/css">
.func_g:hover { stroke:#333; stroke-width:0.3; }
</style>
<script type="text/ecmascript">
<![CDATA[
var details;
function init(evt) { details = document.getElementById("details").firstChild; }
function s(info) { details.nodeValue = "$nametype " + info; }
function c() { details.nodeValue = ' '; }
]]>
</script>
INC
$im->include($inc);
$im->filledRectangle(0, 0, $imagewidth, $imageheight, 'url(#background)');
my ($white, $black, $vvdgrey, $vdgrey) = (
$im->colorAllocate(255, 255, 255),
$im->colorAllocate(0, 0, 0),
$im->colorAllocate(40, 40, 40),
$im->colorAllocate(160, 160, 160),
);
$im->stringTTF($black, $fonttype, $fontsize + 5, 0.0, int($imagewidth / 2), $fontsize * 2, "$titletext($time_segment)", "middle");
$im->stringTTF($black, $fonttype, $fontsize, 0.0, $xpad, $imageheight - ($ypad2 / 2), " ", "", 'id="details"');
for my $key (keys %layers) {
my $layer = $layers{$key};
my $offset_time = int(($layer->{stimestamp} - $stimestamp) / $time_segment * $timemax);
while (my ($id, $node) = each %{$layer->{node}}) {
my ($func, $depth, $etime) = split ";", $id;
my $stime = $node->{stime};
my $abtt = $node->{abtt};
my $new_etime = $etime + $offset_time;
my $new_stime = $stime + $offset_time;
$layer->{new_node}->{"$func;$depth;$new_etime"}->{stime} = $new_stime;
$layer->{new_node}->{"$func;$depth;$new_etime"}->{abtt} = $abtt;
}
delete $layer->{node};
$layer->{node} = $layer->{new_node};
}
# Draw frames
my $acc = 0;
for my $key (sort { $layers{$b}->{time} <=> $layers{$a}->{time} } keys %layers) {
my $layer = $layers{$key};
my $group_base_heigth = $imageheight - $acc;
$acc += $layer->{depthmax} * $frameheight + $frameheight + 1;
while (my ($id, $node) = each %{$layer->{node}}) {
my ($func, $depth, $etime) = split ";", $id;
my $stime = $node->{stime};
my $abtt = $node->{abtt};
next if($func eq "");
$etime = $timemax if $func eq "" and $depth == 0;
my $x1 = $xpad + $stime * $widthpertime;
my $x2 = $xpad + $etime * $widthpertime;
my $y1 = $group_base_heigth - $ypad2 - ($depth + 1) * $frameheight + 1;
my $y2 = $group_base_heigth - $ypad2 - $depth * $frameheight;
my $samples = sprintf "%.0f", ($etime - $stime) * $factor;
(my $samples_txt = $samples) # add commas per perlfaq5
=~ s/(^[-+]?\d+?(?=(?>(?:\d{3})+)(?!\d))|\G\d{3}(?=\d))/$1,/g;
my $info;
if ($func eq "" and $depth == 0) {
$info = "all ($samples_txt $countname, 100%)";
} else {
my $pct = sprintf "%.2f", ((100 * $samples) / ($timemax * $factor));
my $escaped_func = $func;
$escaped_func =~ s/&/&/g;
$escaped_func =~ s/</</g;
$escaped_func =~ s/>/>/g;
$info = "$escaped_func ($samples_txt $countname, $pct%)";
}
my $nameattr = { %{ $nameattr{$func}||{} } }; # shallow clone
$nameattr->{class} ||= "func_g";
$nameattr->{onmouseover} ||= "s('".$info.$abtt."')";
$nameattr->{onmouseout} ||= "c()";
$nameattr->{title} ||= $info.$abtt;
$im->group_start($nameattr);
my $color;
if($filter ne '') {
if($layer->{not_blank}) {
$color = color($func eq "sleep" ? "blocked" : $colors),
}
else {
$color = color("blank"),
}
}
else {
$color = color($func eq "sleep" ? "blocked" : $colors),
}
$im->filledRectangle($x1, $y1, $x2, $y2, $color, 'rx="2" ry="2"');
my $chars = int( ($x2 - $x1) / ($fontsize * $fontwidth));
if ($chars >= 3) { # room for one char plus two dots
my $text = substr $func, 0, $chars;
substr($text, -2, 2) = ".." if $chars < length $func;
$text =~ s/&/&/g;
$text =~ s/</</g;
$text =~ s/>/>/g;
$im->stringTTF($black, $fonttype, $fontsize, 0.0, $x1 + 3, 3 + ($y1 + $y2) / 2, $text, "");
}
$im->group_end($nameattr);
}
}
#print Dumper \%layers;
print $im->svg;
| terrencehan/eprobe | script/flamegraph.pl | Perl | mit | 17,600 |
#!/usr/bin/perl
use strict;
use warnings;
use feature 'say';
use List::MoreUtils qw(firstidx);
use List::Util qw(max);
my $fname = shift;
open my $fh, "<", $fname
or die "Can't open $fname: $!";
my $line = <$fh>;
chomp $line;
my @arr = split "\t", $line;
my %seen;
my $ctr = 0;
$seen{"@arr"} = 1;
while (1) {
my $idx_max = firstidx { $_ == max @arr } @arr;
my $tomove = $arr[$idx_max];
$arr[$idx_max] = 0;
my $cur_idx = ($idx_max + 1) % @arr;
while ($tomove--) {
$arr[$cur_idx]++;
$cur_idx = ($cur_idx + 1) % @arr;
}
$ctr++;
last if $seen{"@arr"};
$seen{"@arr"} = 1;
}
say $ctr;
| bewuethr/advent_of_code | 2017/day06/day06a.pl | Perl | mit | 645 |
package CXGN::Trial::TrialDesign::Plugin::Lattice;
use Moose::Role;
use List::Util qw| max |;
sub create_design {
my $self = shift;
my %lattice_design;
my $rbase = R::YapRI::Base->new();
my @stock_list;
my $number_of_blocks;
my $number_of_reps;
my $stock_data_matrix;
my $r_block;
my $result_matrix;
my @plot_numbers;
my @stock_names;
my @block_numbers;
my @rep_numbers;
my @converted_plot_numbers;
my @control_list_crbd;
my %control_names_lookup;
my $fieldmap_row_number;
my @fieldmap_row_numbers;
my $fieldmap_col_number;
my $plot_layout_format;
my @col_number_fieldmaps;
if ($self->has_stock_list()) {
@stock_list = @{$self->get_stock_list()};
} else {
die "No stock list specified\n";
}
my $block_number_calculated = sqrt(scalar(@stock_list));
if ($block_number_calculated =~ /^\d+$/ ){
$number_of_blocks = $block_number_calculated;
} else {
die "Square root of Number of stocks (".scalar(@stock_list).") for lattice design should give a whole number.\n";
}
if ($self->has_control_list_crbd()) {
@control_list_crbd = @{$self->get_control_list_crbd()};
%control_names_lookup = map { $_ => 1 } @control_list_crbd;
$self->_check_controls_and_accessions_lists;
}
if ($self->has_number_of_reps()) {
$number_of_reps = $self->get_number_of_reps();
if ($number_of_reps == 2 || $number_of_reps == 3){
} else {
die "Number of reps should be 2 for SIMPLE and 3 for TRIPLE lattice design.\n";
}
} else {
die "Number of reps not specified\n";
}
if ($self->has_fieldmap_col_number()) {
$fieldmap_col_number = $self->get_fieldmap_col_number();
}
if ($self->has_fieldmap_row_number()) {
$fieldmap_row_number = $self->get_fieldmap_row_number();
my $colNumber = ((scalar(@stock_list) * $number_of_reps)/$fieldmap_row_number);
$fieldmap_col_number = CXGN::Trial::TrialDesign::validate_field_colNumber($colNumber);
}
if ($self->has_plot_layout_format()) {
$plot_layout_format = $self->get_plot_layout_format();
}
$stock_data_matrix = R::YapRI::Data::Matrix->new(
{
name => 'stock_data_matrix',
rown => 1,
coln => scalar(@stock_list),
data => \@stock_list,
}
);
$r_block = $rbase->create_block('r_block');
$stock_data_matrix->send_rbase($rbase, 'r_block');
$r_block->add_command('library(agricolae)');
$r_block->add_command('trt <- stock_data_matrix[1,]');
#$r_block->add_command('block_size <- '.$block_size);
$r_block->add_command('number_of_reps <- '.$number_of_reps);
#$r_block->add_command('randomization_method <- "'.$self->get_randomization_method().'"');
if ($self->has_randomization_seed()){
$r_block->add_command('randomization_seed <- '.$self->get_randomization_seed());
$r_block->add_command('lattice<-design.lattice(trt,r=number_of_reps,serie=3,kinds="Super-Duper", seed=randomization_seed)');
}
else {
$r_block->add_command('lattice<-design.lattice(trt,r=number_of_reps,serie=3,kinds="Super-Duper")');
}
$r_block->add_command('lattice_book<-lattice$book');
$r_block->add_command('lattice_book<-as.matrix(lattice_book)');
my @commands = $r_block->read_commands();
print STDERR join "\n", @commands;
print STDERR "\n";
$r_block->run_block();
$result_matrix = R::YapRI::Data::Matrix->read_rbase( $rbase,'r_block','lattice_book');
@plot_numbers = $result_matrix->get_column("plots");
#print STDERR Dumper(@plot_numbers);
@block_numbers = $result_matrix->get_column("block");
my $max = max( @block_numbers );
@rep_numbers = $result_matrix->get_column("r");
@stock_names = $result_matrix->get_column("trt");
@converted_plot_numbers=@{$self->_convert_plot_numbers(\@plot_numbers, \@rep_numbers, $number_of_reps)};
if ($plot_layout_format eq "zigzag") {
if (!$fieldmap_col_number){
@col_number_fieldmaps = ((1..$number_of_blocks) x ($number_of_blocks * $number_of_reps));
#print STDERR Dumper(\@col_number_fieldmaps);
} else {
@col_number_fieldmaps = ((1..$fieldmap_col_number) x $fieldmap_row_number);
}
}
elsif ($plot_layout_format eq "serpentine") {
if (!$fieldmap_row_number) {
for my $rep (1 .. ($number_of_blocks * $number_of_reps)){
if ($rep % 2){
push @col_number_fieldmaps, (1..$number_of_blocks);
} else {
push @col_number_fieldmaps, (reverse 1..$number_of_blocks);
}
}
} else {
for my $rep (1 .. $fieldmap_row_number){
if ($rep % 2){
push @col_number_fieldmaps, (1..$fieldmap_col_number);
} else {
push @col_number_fieldmaps, (reverse 1..$fieldmap_col_number);
}
}
}
}
if ($plot_layout_format && !$fieldmap_col_number && !$fieldmap_row_number){
@fieldmap_row_numbers = (@block_numbers);
}
elsif ($plot_layout_format && $fieldmap_row_number){
@fieldmap_row_numbers = ((1..$fieldmap_row_number) x $fieldmap_col_number);
@fieldmap_row_numbers = sort {$a <=> $b} @fieldmap_row_numbers;
}
my %seedlot_hash;
if($self->get_seedlot_hash){
%seedlot_hash = %{$self->get_seedlot_hash};
}
for (my $i = 0; $i < scalar(@converted_plot_numbers); $i++) {
my %plot_info;
$plot_info{'stock_name'} = $stock_names[$i];
$plot_info{'seedlot_name'} = $seedlot_hash{$stock_names[$i]}->[0];
if ($plot_info{'seedlot_name'}){
$plot_info{'num_seed_per_plot'} = $self->get_num_seed_per_plot;
}
$plot_info{'block_number'} = $block_numbers[$i];
$plot_info{'plot_name'} = $converted_plot_numbers[$i];
$plot_info{'rep_number'} = $rep_numbers[$i];
$plot_info{'is_a_control'} = exists($control_names_lookup{$stock_names[$i]});
$plot_info{'plot_number'} = $converted_plot_numbers[$i];
$plot_info{'plot_num_per_block'} = $converted_plot_numbers[$i];
if ($fieldmap_row_numbers[$i]){
$plot_info{'row_number'} = $fieldmap_row_numbers[$i];
$plot_info{'col_number'} = $col_number_fieldmaps[$i];
}
$lattice_design{$converted_plot_numbers[$i]} = \%plot_info;
}
%lattice_design = %{$self->_build_plot_names(\%lattice_design)};
return \%lattice_design;
}
1;
| solgenomics/sgn | lib/CXGN/Trial/TrialDesign/Plugin/Lattice.pm | Perl | mit | 6,169 |
#!/usr/bin/perl
my @array = (4,3,5,22,25,5,-48,0,4,-3);
heap_sort(\@array);
print "@array\n";
exit;
sub heap_sort
{
my($list) = @_;
my $count = scalar @$list;
heap($count,$list);
my $right = $count - 1;
while($right > 0)
{
@$list[0,$right] = @$list[$right,0];
sift(0,$right-1,$list);
$right--;
}
}
sub heap
{
my ($count,$list) = @_;
my $left = ($count - 2) / 2;
while($left >= 0)
{
sift($left,$count-1,$list);
$left--;
}
}
sub sift
{
my($left,$right,$list) = @_;
my $root = $left;
while($root * 2 + 1 <= $right)
{
my $child = $root * 2 + 1;
$child++ if($child + 1 <= $right && $list->[$child] < $list->[$child+1]);
if($list->[$root] < $list->[$child])
{
@$list[$root,$child] = @$list[$child,$root];
$root = $child;
}else{ return }
}
}
| dimir2/hse12pi2-scripts | MartirosyanA/pyromidal.pl | Perl | mit | 1,088 |
use strict;
use Data::Dumper;
use Carp;
use gjoseqlib;
#
# This is a SAS Component
#
use SeedEnv;
my $sapO = SAPserver->new();
=head1 svr_blast
Run blast locally
------
Example: svr_blast -p pegs 83333.1 [ blast PEGs identified in file against genome 83333.1 ]
svr_blast -d pegs 83333.1 [ use blastn, not blastp ]
svr_blast -s pegs 83333.1 [ blast PEGs in fasta file against genome 83333.1 ]
svr_blast -p pegs [ blast PEGs identified in file against themselves ]
svr_blast 83333.1 [ sequences of PEGs from the last column of STDIN input against genome]
svr_blast [ sequences of PEGs from the last column of STDIN input against themselves ]
svr_blast -c 1 [ sequences of PEGs from the first column of STDIN input against themselves ]
svr_blast -c 1 -parms='-m8' [ sequences of PEGs from the first column of STDIN input against themselves - -m8 format ]
The output is exactly the unfiltered blast output
------
This svr command may be thought of as implementing two types of requests:
1. "Blast a set of PEGs against the genes (or protein products) in a set of genomes"
2. "Blast a set of PEGs against itself".
When we say "set of pegs" or "pegs in genome" we mean either the DNA or the protein sequences
corresponding to the pegs. Which is determined by the -d flag or its absence (think of
protein by default, -d for DNA is that is what you want).
A set of PEGs can be read from a file. If the file contains just IDs, use "-p IDfile".
If the file contains actual sequence in FASTA format use "-s fasta.file".
If you are blasting PEGs against genomes, the genomes are given as one or more
arguments of the form xxx.yyy (where xxx.yyy is the genome ID; for example, E.coli is 83333.1).
You can read the PEG ids from standard input, much like most of the
other SVR scripts (this is done only if -s File and -p File were
omitted). IDs are from the last column in the STDIN file, or from
another column specified using the -c argument. The standard input
should be a tab-separated table (i.e., each line is a tab-separated
set of fields). Normally, the last field in each line would contain
the PEG for which aliases are being requested. If some other column
contains the PEGs, use
-c N
where N is the column (from 1) that contains the PEG in each case.
NOTE: the PEG sequences are formed as the union of the sequences derived
from
1. the IDs from STDIN (only if -p and -s are omitted)
2. the ids from the -p file
3. the sequences from the -s file
This is a pipe command. The input is taken from the standard input, and the
output is to the standard output.
The parameters of the BLAST run are the defaults, unless you use
-parms='parameters passed to blast'
=head2 Command-Line Options
=over 4
=item -c Column
This is used only if the column containing PEGs is not the last.
=back
=head2 Output Format
The output is just the BLAST output.
=cut
my $usage = "usage: svr_blast [-c column] [-s fasta.file] [-p IDfile] [-d] [G1 G2...]";
my $column;
my $pFile;
my $sFile;
my $d = 0;
my $parms = "";
while ($ARGV[0] && ($ARGV[0] =~ /^-/))
{
$_ = shift @ARGV;
if ($_ =~ s/^-c=*//) { $column = ($_ || shift @ARGV) }
elsif ($_ =~ s/^-d//) { $d = 1 }
elsif ($_ =~ s/^-s=*//) { $sFile = ($_ || shift @ARGV) }
elsif ($_ =~ s/^-parms=*//) { $parms = ($_ || shift @ARGV) }
elsif ($_ =~ s/^-p=*//) { $pFile = ($_ || shift @ARGV) }
else { die "Bad Flag: $_" }
}
my @genomes = @ARGV;
my @fid_ids = ();
if ((! $sFile) && (! $pFile))
{
ScriptThing::AdjustStdin();
my @lines = map { chomp; [split(/\t/,$_)] } <STDIN>;
if (! $column) { $column = @{$lines[0]} }
@fid_ids = map { $_->[$column-1] } @lines;
}
my @fids_seq = ();
my @fids_id = ();
if ($sFile)
{
@fids_seq = &gjoseqlib::read_fasta($sFile);
}
my %seen = map { ($_->[0] => 1 ) } @fids_seq; ### [ID,Comment,Sequence]
if ($pFile)
{
open(PF,"<",$pFile) || die "could not open $pFile";
while (defined($_ = <PF>))
{
if ($_ =~ /^(fig\|\d+\.\d+\.peg\.\d+)/)
{
push(@fid_ids,$1);
}
}
close(PF);
}
my %ids_to_get = map { $_ => 1 } grep { ! $seen{$_} } @fid_ids;
my @extra_ids = keys(%ids_to_get);
push(@fids_seq,&tuples(\@extra_ids,$d));
my @genome_pegs = ();
if (@genomes > 0)
{
my $genomeH = $sapO->all_features( -ids => \@genomes, -type => ['peg'] );
foreach my $genome (keys(%$genomeH))
{
my $pegs = $genomeH->{$genome};
push(@genome_pegs,&tuples($pegs,$d));
}
}
my $qF = "query.$$.fasta";
my $dbF = "db.$$.fasta";
&gjoseqlib::print_alignment_as_fasta($qF,\@fids_seq);
if (@genomes > 0)
{
&gjoseqlib::print_alignment_as_fasta($dbF,\@genome_pegs);
}
else
{
&gjoseqlib::print_alignment_as_fasta($dbF,\@fids_seq);
}
my $pflag = $d ? 'F' : 'T';
system "formatdb -i $dbF -p $pflag";
my $cmd = $d ? 'blastn' : 'blastp';
open(BLAST,"blastall $parms -p $cmd -i $qF -d $dbF |")
|| die "could not make blast run, sorry";
while (defined($_ = <BLAST>))
{
print $_;
}
close(BLAST);
unlink($dbF,$qF);
sub tuples {
my($ids,$dna) = @_;
my $idsH = $sapO->ids_to_sequences( -ids => $ids,
-protein => ($dna ? 0 : 1),
-fasta => 0 );
return map { [$_,'',$idsH->{$_}] } keys(%$idsH);
}
| kbase/kb_seed | service-scripts/svr_blast.pl | Perl | mit | 5,471 |
:- consult(initial1).
goal(and(eventually(all(p, or(not(person(p)), at(p, city3)))), all(p, all(y, or(not(person(p)), or(not(city(y)), or(not(at(p, y)), eventually(always(at(p, y)))))))))).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_ec/test/pddl_tests/ZenoTravel/p19.pl | Perl | mit | 191 |
slice(X, I, K, L) :- slice(X, I, 1, K, L).
slice([_|T], I, J, K, L) :- J < I, JJ is (J + 1), slice(T, I, JJ, K, L).
slice(_, _, J, K, []) :- K < J.
slice([H|T], I, J, K, L) :- JJ is (J + 1), slice(T, I, JJ, K, LL), append([H], LL, L).
| dvberkel/99-prolog-problems | Prolog-Lists/18.pl | Perl | mit | 236 |
#!/usr/bin/perl
use strict;
use warnings;
# Problem 2
# Even Fibonacci numbers
# Each new term in the Fibonacci sequence is generated by adding the previous
# two terms. By starting with 1 and 2, the first 10 terms will be:
# 1, 2, 3, 5, 8, 13, 21, 34, 55, 89, ...
# Find the sum of all the even-valued terms in the sequence which do not exceed
# four million.
my $last1 = 2;
my $last2 = 1;
my $total = 2;
my $cur = 0;
while ($cur <= 4000000) {
$cur = $last1 + $last2;
$total += $cur if $cur % 2 == 0;
$last2 = $last1;
$last1 = $cur;
}
print "$total\n";
| erinspice/euler | perl/002.pl | Perl | mit | 575 |
package Model::R::ToPay::SettingGroup;
use strict;
use base qw(Model::R::ToPay);
__PACKAGE__->meta->setup(
table => 'setting_group',
columns => [
id => { type => 'integer', not_null => 1, sequence => 'gate_sequence' },
name => { type => 'varchar', length => 128, not_null => 1, remarks => 'A group name' },
description => { type => 'varchar', length => 512 },
],
primary_key_columns => [ 'id' ],
relationships => [
setting => {
class => 'Model::R::ToPay::Setting',
column_map => { id => 'group_id' },
type => 'one to many',
},
],
);
__PACKAGE__->meta->make_manager_class('setting_group');
1;
| ant-i/db-crud | dbs/Model/R/ToPay/SettingGroup.pm | Perl | apache-2.0 | 728 |
package Paws::ELB::Policies;
use Moose;
has AppCookieStickinessPolicies => (is => 'ro', isa => 'ArrayRef[Paws::ELB::AppCookieStickinessPolicy]');
has LBCookieStickinessPolicies => (is => 'ro', isa => 'ArrayRef[Paws::ELB::LBCookieStickinessPolicy]');
has OtherPolicies => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
1;
### main pod documentation begin ###
=head1 NAME
Paws::ELB::Policies
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::ELB::Policies object:
$service_obj->Method(Att1 => { AppCookieStickinessPolicies => $value, ..., OtherPolicies => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::ELB::Policies object:
$result = $service_obj->Method(...);
$result->Att1->AppCookieStickinessPolicies
=head1 DESCRIPTION
The policies for a load balancer.
=head1 ATTRIBUTES
=head2 AppCookieStickinessPolicies => ArrayRef[L<Paws::ELB::AppCookieStickinessPolicy>]
The stickiness policies created using CreateAppCookieStickinessPolicy.
=head2 LBCookieStickinessPolicies => ArrayRef[L<Paws::ELB::LBCookieStickinessPolicy>]
The stickiness policies created using CreateLBCookieStickinessPolicy.
=head2 OtherPolicies => ArrayRef[Str|Undef]
The policies other than the stickiness policies.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::ELB>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/ELB/Policies.pm | Perl | apache-2.0 | 1,863 |
package API::Region;
#
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
use UI::Utils;
use Mojo::Base 'Mojolicious::Controller';
use Data::Dumper;
use JSON;
use MojoPlugins::Response;
my $finfo = __FILE__ . ":";
sub index {
my $self = shift;
my @data;
my $orderby = $self->param('orderby') || "name";
my $rs_data = $self->db->resultset("Region")->search( undef, { prefetch => ['division'], order_by => 'me.' . $orderby } );
while ( my $row = $rs_data->next ) {
push(
@data, {
"id" => $row->id,
"name" => $row->name,
}
);
}
$self->success( \@data );
}
sub create{
my $self = shift;
my $division_name = $self->param('division_name');
my $params = $self->req->json;
if (!defined($params)) {
return $self->alert("parameters must be in JSON format, please check!");
}
if ( !&is_oper($self) ) {
return $self->alert("You must be an ADMIN or OPER to perform this operation!");
}
my $existing_region = $self->db->resultset('Region')->search( { name => $params->{name} } )->get_column('name')->single();
if (defined($existing_region)) {
return $self->alert("region[". $params->{name} . "] already exists.");
}
my $divsion_id = $self->db->resultset('Division')->search( { name => $division_name } )->get_column('id')->single();
if (!defined($divsion_id)) {
return $self->alert("division[". $division_name . "] does not exist.");
}
my $insert = $self->db->resultset('Region')->create(
{
name => $params->{name},
division => $divsion_id
} );
$insert->insert();
my $response;
my $rs = $self->db->resultset('Region')->find( { id => $insert->id } );
if (defined($rs)) {
$response->{id} = $rs->id;
$response->{name} = $rs->name;
$response->{division_name} = $division_name;
$response->{divsion_id} = $rs->division->id;
return $self->success($response);
}
return $self->alert("create region ". $params->{name}." failed.");
}
1;
| PSUdaemon/traffic_control | traffic_ops/app/lib/API/Region.pm | Perl | apache-2.0 | 2,625 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V9::Resources::DomainCategory;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
campaign => $args->{campaign},
category => $args->{category},
categoryRank => $args->{categoryRank},
coverageFraction => $args->{coverageFraction},
domain => $args->{domain},
hasChildren => $args->{hasChildren},
languageCode => $args->{languageCode},
recommendedCpcBidMicros => $args->{recommendedCpcBidMicros},
resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V9/Resources/DomainCategory.pm | Perl | apache-2.0 | 1,477 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCleanFastaHeaders;
use strict;
use warnings;
use feature 'say';
use Bio::Seq;
use Bio::SeqIO;
use Bio::EnsEMBL::KillList::HiveKillList;
use parent ('Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveBaseRunnableDB');
sub fetch_input {
my $self = shift;
return 1;
}
sub run {
my $self = shift;
$self->clean_headers;
return 1;
}
sub write_output {
my $self = shift;
my $file_path = $self->output_file_path;
my $output_hash = {};
$output_hash->{'iid'} = $file_path;
# If this param exists pass it along. This is for chunking analyses further down, if there
# are multiple input ids flowing into the chunking analysis then it needs the specific chunk
# dir name for each file from the indexing step
if($self->param('chunk_dir_name')) {
$output_hash->{'chunk_dir_name'} = $self->param('chunk_dir_name');
}
$self->dataflow_output_id($output_hash,1);
return 1;
}
sub clean_headers {
my ($self) = @_;
my $input_file;
my $output_file;
my $min_seq_length = 10;
my $skip_broken_headers = 0;
my $header_source;
my $use_killlist = 0;
my $kill_list_object;
my %kill_list;
# Either take the input file path from the parameters hash if it exists, otherwise parse it from
# the input id. Throw is you can't find either file
if($self->param_is_defined('input_file_path')) {
$input_file = $self->param('input_file_path');
unless(-e $input_file) {
$self->throw("You have specified a path to the protein file in the pipeline config, but this file does not exist. Note that ".
"specifying a path overwrites any input id that might be present, so remove the path from the config/analysis_base ".
"table if you want to use the input id as the path");
}
} else {
$input_file = $self->input_id;
unless(-e $input_file) {
$self->throw("You have not specified an input_file_path variable in your hive config, so the input id was used instead. When the ".
"input id was parsed the resulting file path does not exist.\nInput id (unparsed):\n".$self->input_id."\nParsed input id:\n".
$input_file);
}
}
# Open output file, either from parameters hash first or failing that by putting _clean on the input file path
if($self->param_is_defined('output_file_path')) {
say "You have defined an output file path to write to";
$output_file = $self->param('output_file_path');
} else {
$output_file = $input_file."_clean";
}
# Set this for write_output
$self->output_file_path($output_file);
# Set the min seq length if defined, otherwise use the default (10)
if($self->param_is_defined('min_seq_length')) {
$min_seq_length = $self->param('min_seq_length');
}
# Unless a source is defined throw
unless($self->param_is_defined('header_source')) {
$self->throw("You have not defined the header_source in your config file (e.g. header_source => 'uniprot')");
}
# Set the flag, unless the string is 'no'. Note that if it's 0 it will get set through this but will
# be considered false later and therefore not used
if($self->param_is_defined('skip_broken_headers') && ($self->param('skip_broken_headers') eq "1" ||
$self->param('skip_broken_headers') eq "yes" || $self->param('skip_broken_headers') eq "YES")) {
$skip_broken_headers = 1;
}
# Use the killlist or not
if($self->param_is_defined('use_killlist') && ($self->param('use_killlist') eq "1" ||
$self->param('use_killlist') eq "yes" || $self->param('use_killlist') eq "YES")) {
say "Using the killlist";
unless($self->param_is_defined('killlist_type')) {
$self->throw("You have selected to use the killlist but haven't defined a killlist_type in your pipeline config, e.g ".
" 'killlist_type' => 'protein'");
}
unless($self->param_is_defined('killlist_db')) {
$self->throw("You have selected to use the killlist but haven't defined a killlist_db in your pipeline config, e.g ".
"'killlist_db' => $self->o('killlist_db')");
}
unless($self->param_is_defined('KILL_LIST_FILTER')) {
say "You have selected to use the killlist but haven't defined a KILL_LIST_FILTER hash in your pipeline config, ".
"the HiveKillList module will look for a default hash for your molecule type";
}
say "Killlist molecule type set to:\n".$self->param('killlist_type');
$use_killlist = 1;
$kill_list_object = Bio::EnsEMBL::KillList::HiveKillList->new(-TYPE => $self->param('killlist_type'),
-KILL_LIST_DB => $self->param('killlist_db'),
-FILTER_PARAMS => $self->param('KILL_LIST_FILTER'),
);
%kill_list = %{ $kill_list_object->get_kill_list() };
} else {
say "Not using the killlist";
}
say "Reading from input file:\n".$input_file;
say "Will write cleaned seqs to:\n".$output_file;
say "Min seq length set to:\n".$min_seq_length;
say "Skip broken headers set to:\n".$skip_broken_headers;
say "Source set to:\n".$self->param('header_source');
my $seqin = new Bio::SeqIO( -file => "<$input_file",
-format => "Fasta",
);
my $seqout = new Bio::SeqIO( -file => ">$output_file",
-format => "Fasta"
);
# Some counts for the stats at the end
my $input_seq_count = 0;
my $output_seq_count = 0;
my $short_count = 0;
my $skip_count = 0;
my $duplicate_count = 0;
my $killed_count = 0;
# This hash is just used to track duplicate accessions
my %uniprot_accession_hash = ();
# Loop through the set of sequences
while(my $prot = $seqin->next_seq) {
$input_seq_count++;
# If it's below the min seq length then skip it
if($prot->length <= $min_seq_length) {
say STDERR "Length < ".$min_seq_length.": rejecting ".$prot->display_id." with length ".$prot->length;
$short_count++;
next;
}
# Try and find the parse the accession
my $display_id = $prot->display_id;
my $uniprot_accession;
$uniprot_accession = $self->match_against_source($display_id);
# If nothing was parsed then check if the skip headers flag is in use. If it is then skip, otherwise throw
unless($uniprot_accession) {
if($skip_broken_headers) {
say STDERR "Skipping the following header/sequence as accession can't be parsed and skip_broken_headers is in use:\n".
$display_id;
$skip_count++;
next;
} else {
$self->throw("Could not match a uniprot accession in the header. Header:\n".$display_id);
}
}
# Check if the accession is already in the hash, if it is then skip
if($uniprot_accession_hash{$uniprot_accession}) {
say STDERR "Skipping accession as it has already been seen. Accession:\n".$uniprot_accession;
$duplicate_count++;
next;
}
# If it's in the killlist then remove
if ($use_killlist) {
if (exists( $kill_list{$uniprot_accession})) {
say STDERR "$uniprot_accession is present in kill list DB, discarded.\n";
$killed_count++;
next;
}
}
# At this point this is the first time the accession was seen so set it
$uniprot_accession_hash{$uniprot_accession} = 1;
# Write out the cleaned sequence
$prot->display_id($uniprot_accession);
$prot->desc("");
$prot->seq(uc($prot->seq));
$seqout->write_seq($prot);
$output_seq_count++;
}
say "After cleaning:";
say "Input sequence count: ".$input_seq_count;
say "Output sequence count: ".$output_seq_count;
say "Skipped sequence count: ".$skip_count;
say "Short sequence count: ".$short_count;
say "Duplicate sequence count: ".$duplicate_count;
say "Killed sequence count: ".$killed_count;
}
sub match_against_source {
my ($self,$display_id) = @_;
my $uniprot_accession;
# Set the source
my $header_source = $self->param('header_source');
if($header_source eq 'uniprot') {
# if this is working on something that has come from the processing module, then the headers will already be fixed and versioned
if($self->param('header_pre_cleaned')) {
if($display_id =~ /^.+\.\d+/) {
$uniprot_accession = $&;
}
} elsif($display_id =~ /^(sp|tr)\|([^\|]+)\|.+ SV\=(\d+)/) {
$uniprot_accession = $2.'.'.$3;
}
}
else {
$self->throw("You have entered a source that is not supported. The code must be updated to ".
"deal with it. Source:\n".$header_source);
}
return($uniprot_accession);
}
sub output_file_path {
my ($self,$path) = @_;
if(defined($path)) {
$self->param('_output_file_path',$path);
}
return($self->param('_output_file_path'));
}
1;
| mn1/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/RunnableDB/HiveCleanFastaHeaders.pm | Perl | apache-2.0 | 9,634 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::kayako::sql::mode::listdepartment;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
}
sub run {
my ($self, %options) = @_;
$self->{sql} = $options{sql};
$self->{sql}->connect();
$self->{sql}->query(query => "SELECT departmentid, title FROM swdepartments");
while ((my $row = $self->{sql}->fetchrow_hashref())) {
$self->{output}->output_add(long_msg => "'" . $row->{title} . "' [id = " . $row->{departmentid} . "]");
}
$self->{output}->output_add(severity => 'OK',
short_msg => 'List departments:');
$self->{output}->display(nolabel => 1, force_ignore_perfdata => 1, force_long_output => 1);
$self->{output}->exit();
}
1;
__END__
=head1 MODE
List departments of kayako
=back
=cut
| Sims24/centreon-plugins | apps/kayako/sql/mode/listdepartment.pm | Perl | apache-2.0 | 1,804 |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
package AI::MXNet::Random;
use strict;
use warnings;
use AI::MXNet::Base;
use AI::MXNet::NDArray::Base;
use AI::MXNet::Function::Parameters;
=head1 NAME
AI::MXNet::Random - Handling of randomization in MXNet.
=cut
=head1 DESCRIPTION
Handling of randomization in MXNet.
=cut
=head2 seed
Seed the random number generators in mxnet.
This seed will affect behavior of functions in this module,
as well as results from executors that contains Random number
such as Dropout operators.
Parameters
----------
seed_state : int
The random number seed to set to all devices.
Notes
-----
The random number generator of mxnet is by default device specific.
This means if you set the same seed, the random number sequence
generated from GPU0 can be different from CPU.
=cut
method seed(Int $seed_state)
{
check_call(AI::MXNetCAPI::RandomSeed($seed_state));
}
sub AUTOLOAD {
my $sub = $AI::MXNet::Random::AUTOLOAD;
$sub =~ s/.*:://;
$sub = "_random_$sub";
shift;
return AI::MXNet::NDArray->$sub(@_);
}
1;
| sergeykolychev/mxnet | perl-package/AI-MXNet/lib/AI/MXNet/Random.pm | Perl | apache-2.0 | 1,876 |
#!/usr/bin/perl -w
use strict;
use FindBin;
BEGIN {
push( @INC, "$FindBin::RealBin" ); ## Path to local modules
}
use Text::Template;
use Data::Dumper;
# Example:
# clear; sudo ./importiso.pl --distro ubuntu --release 1404 /vagrant/isos/ubuntu-14.04-server-amd64.iso
# clear; sudo ./importiso.pl --distro fedora --release 19 /vagrant/isos/Fedora-19-x86_64-DVD.iso
# clear; ./importiso.pl --distro fedora --release 20 my.iso
#TODO C add a relative path to the script dir and use it as an include path form modules.
use BootServerConfigFile;
use CliOptionHandling;
use CommandOptionHandling;
use IsoInfoFile;
use KickstartConfig;
use YamlDistroConfigFile;
# EXAMPLE: ./importiso.rb --combo DIPM /vagrant/isos/DMICRO_SERVER-R03.00.05.05.DISK1.iso
# clear;./importiso.rb --distro fedora --release 20
# Guiding moto: Do one thing and do it well.
# ===========================================================================
# V A R I A B L E S
# ===========================================================================
# This is a fictisious command, it is here to make this script compatible
# with the common functions.
my $f_szImportCommand = "import";
my %f_hDefaultValues;
$f_hDefaultValues{"BS_HOME_DIRECTORY"} = "/home/vagrant";
$f_hDefaultValues{"BS_TMP_MOUNT_POINT"} = $f_hDefaultValues{"BS_HOME_DIRECTORY"} . "/mnt";
$f_hDefaultValues{"BS_BOOT_KERNEL_BASE_DIRECTORY"} = "/var/tftp";
$f_hDefaultValues{"BS_BOOT_KERNEL_BASE_DIRECTORY_OWNER"} = "nobody";
$f_hDefaultValues{"BS_IMAGE_BASE_DIRECTORY"} = "/var/ks/images";
$f_hDefaultValues{"BS_CONFIG_BASE_DIRECTORY"} = "/var/ks/configs";
my %f_hFinishedValues;
$f_hFinishedValues{"BS_HOME_DIRECTORY"} = $f_hDefaultValues{"BS_HOME_DIRECTORY"};
$f_hFinishedValues{"BS_TMP_MOUNT_POINT"} = $f_hDefaultValues{"BS_TMP_MOUNT_POINT"};
$f_hFinishedValues{"BS_BOOT_KERNEL_BASE_DIRECTORY"} = $f_hDefaultValues{"BS_BOOT_KERNEL_BASE_DIRECTORY"};
$f_hFinishedValues{"BS_BOOT_KERNEL_BASE_DIRECTORY_OWNER"} = $f_hDefaultValues{"BS_BOOT_KERNEL_BASE_DIRECTORY_OWNER"};
$f_hFinishedValues{"BS_IMAGE_BASE_DIRECTORY"} = $f_hDefaultValues{"BS_IMAGE_BASE_DIRECTORY"};
$f_hFinishedValues{"BS_CONFIG_BASE_DIRECTORY"} = $f_hDefaultValues{"BS_CONFIG_BASE_DIRECTORY"};
$f_hFinishedValues{"BS_DISTRO_CONFIGURATION_FILE"} = "/var/ks/distros.yaml";
# This is a definition of all the valid options.
my %f_hValidOption;
my $nHaveParameter = 1;
AddValidOption(\%f_hValidOption, "--distro", [ $f_szImportCommand ], $nHaveParameter, "Distribution name. E.g. 'fedora'", undef);
AddValidOption(\%f_hValidOption, "--arch", [ $f_szImportCommand ], $nHaveParameter, "Architecture. e.g. 'x86_64'", "x86_64");
AddValidOption(\%f_hValidOption, "--release", [ $f_szImportCommand ], $nHaveParameter, "Release ID, e.g. '20' or '10u8'", undef);
AddValidOption(\%f_hValidOption, "--mountpoint", [ $f_szImportCommand ], $nHaveParameter, "temporary mountpoint for the ISO.", $f_hDefaultValues{"BS_TMP_MOUNT_POINT"});
#print Dumper(%f_hValidOption);
my %f_hOptionInteractionForCommand;
# This variable assignement is to make the copy and paste work from bootmgmt.rb
# 'required' here means, the option has to be available when performing the command.
# it does not mean that the option is mandatory on the CLI.
my $szCommand = $f_szImportCommand;
$f_hOptionInteractionForCommand{$szCommand}{"OptionList"}{"--distro"} = "OneOf";
$f_hOptionInteractionForCommand{$szCommand}{"OptionInfo"}{"--distroOneOf"}{"exclude"} = [ "--combo" ];
$f_hOptionInteractionForCommand{$szCommand}{"OptionInfo"}{"--distroOneOf"}{"necessity"} = "required";
$f_hOptionInteractionForCommand{$szCommand}{"OptionList"}{"--combo"} = "OneOf";
$f_hOptionInteractionForCommand{$szCommand}{"OptionInfo"}{"--comboOneOf"}{"exclude"} = [ "--distro" ];
$f_hOptionInteractionForCommand{$szCommand}{"OptionInfo"}{"--comboOneOf"}{"necessity"} = "required";
$f_hOptionInteractionForCommand{$szCommand}{"OptionList"}{"--release"} = "required";
$f_hOptionInteractionForCommand{$szCommand}{"OptionList"}{"--arch"} = "required";
$f_hOptionInteractionForCommand{$szCommand}{"OptionList"}{"--mountpoint"} = "required";
#print Dumper(%f_hOptionInteractionForCommand);
# ============================================================
# F U N C T I O N S
# ============================================================
# -----------------------------------------------------------------
# ---------------
sub CommandHandlingForImport {
my $refhcombinedData = shift;
my %hCombinedData = %$refhcombinedData;
#print Dumper(%hPopulatedOptionList);
#print "---\n";
#print Dumper(%hFinishedValues);
#print Dumper(\%hCombinedData);
if ( ! -f $hCombinedData{"IsoImageName"} ) {
die("!!! ISO image does not exist: " . $hCombinedData{"IsoImageName"});
}
DieIfIsoAlreadyMounted(\%hCombinedData);
DieIfExecuteFails("fuseiso " . $hCombinedData{"IsoImageName"} . " " . $hCombinedData{"BS_TMP_MOUNT_POINT"});
# TODO Validate the kernel target dir exists
# If Distribution is given, then call that function, and the distribution can then
# call any OS/Arch specific functions.
# If this is a --combo, then populate the ISO kernel data.
if ( ! exists $hCombinedData{"--distro"} ) {
die("!!! Developer: Please implement handling of missing --distro, that would be extracting the info based on the --combo.");
}
$hCombinedData{"BootDistroName"} = $hCombinedData{"--distro"};
$hCombinedData{"BootDistroId"} = $hCombinedData{"--release"};
$hCombinedData{"Arch"} = $hCombinedData{"--arch"};
# based on --distro get: KernelFileList and RelativeKernelSource.
IifPutiIsoInfoDataInHash(\%hCombinedData);
CopyBootKernel(\%hCombinedData);
CopyIsoContent(\%hCombinedData);
# TODO V Only do this if it exists. In F20 it does not exist.
#CopyDefaultKickStartCfg();
#UpdateBootDataFiles();
print "III Writing config file to: $hCombinedData{BS_DISTRO_CONFIGURATION_FILE}\n";
UpdateDistroConfigFile($hCombinedData{BS_DISTRO_CONFIGURATION_FILE}, \%hCombinedData);
sleep 1;
DieIfExecuteFails("fusermount -u $hCombinedData{'BS_TMP_MOUNT_POINT'}");
} # end CommandHandlingForImport
# -----------------------------------------------------------------
# ---------------
sub CopyBootKernel {
my $refhFinishedValues = shift;
my %hFinishedValues = %$refhFinishedValues;
#print Dumper(%hFinishedValues);
# TODO V verify predefinitions, like BootDistroName having been defined.
# TODO C Populate $f_hCardinalIsoData
my $szBootKernelTarget = $hFinishedValues{"BootDistroName"} . "_" . $hFinishedValues{"BootDistroId"} . "_" . $hFinishedValues{"Arch"};
my $szDestinationDirectory = $hFinishedValues{"BS_BOOT_KERNEL_BASE_DIRECTORY"} ."/${szBootKernelTarget}";
if ( ! -d $szDestinationDirectory ) {
# Create target directory if it does not exists.
DieIfExecuteFails("mkdir $szDestinationDirectory");
# copy all boot kernel files.
#print Dumper(\%hFinishedValues);
print "III Copy boot kernel files to $szDestinationDirectory\n";
foreach my $szKernelFile (@{$hFinishedValues{"KernelFiles"}}) {
#print "DDD szKernelFile: ${szKernelFile}\n";
DieIfExecuteFails("cp $hFinishedValues{'BS_TMP_MOUNT_POINT'}/$hFinishedValues{'RelativeKernelSource'}/${szKernelFile} $szDestinationDirectory");
}
DieIfExecuteFails("chown -R $hFinishedValues{'BS_BOOT_KERNEL_BASE_DIRECTORY_OWNER'} $szDestinationDirectory");
} else {
# TODO C Unless clobber is in effect then if the files exists at the target then fail.
print "WWW Boot kernel files not copied, since target directory exists: $hFinishedValues{'BS_BOOT_KERNEL_BASE_DIRECTORY'}/${szBootKernelTarget}\n";
}
}
# -----------------------------------------------------------------
# copy the packages
# ---------------
sub CopyIsoContent {
my $refhFinishedValues = shift;
my %hFinishedValues = %$refhFinishedValues;
my $szIsoContentTarget = $hFinishedValues{"BootDistroName"} . "_" . $hFinishedValues{"BootDistroId"} . "_" . $hFinishedValues{"Arch"};
my $szDestinationDirectory = $hFinishedValues{"BS_IMAGE_BASE_DIRECTORY"} ."/" . $szIsoContentTarget;
if ( ! -d $szDestinationDirectory ) {
# Create target directory if it does not exists.
# Create target directory if it does not exists.
ExecuteCmd("mkdir $szDestinationDirectory");
# copy all files.
#Trace(7, "DDD ISO content to #{$f_szImagesBaseDirectory}/#{$f_hCardinalIsoData["ImageTarget"]}")
print "III Copy ISO content to $szDestinationDirectory\n";
ExecuteCmd("cp -r $hFinishedValues{'BS_TMP_MOUNT_POINT'}/. $szDestinationDirectory");
} else {
# TODO C Unless clobber is in effect then if the files exists at the target then fail.
print "WWW ISO content files are not copied, since target directory exists: $szDestinationDirectory\n";
}
} # end CopyIsoContent
# -----------------------------------------------------------------
# ---------------
sub DieIfIsoAlreadyMounted {
my $refhFinishedValues = shift;
my %hFinishedValues = %$refhFinishedValues;
my $szTmpMountPoint = $hFinishedValues{"BS_TMP_MOUNT_POINT"};
my @arOutput = `grep $szTmpMountPoint $hFinishedValues{"BS_HOME_DIRECTORY"}/.mtab.fuseiso`;
if ( defined($arOutput[0]) && $arOutput[0] =~ /$szTmpMountPoint/ ) {
die("!!! The tmp mount point is in use. Please unmount; fusermount -u $szTmpMountPoint");
}
}
# -----------------------------------------------------------------
# ---------------
sub DieIfExecuteFails {
my $szCmd = shift;
# TODO V die on empty command.
my @arOutput = `$szCmd`;
if ( $? != 0 ) {
die("!!! operaiont failed '$szCmd': @arOutput");
}
} # end DieIfExecuteFails
# -----------------------------------------------------------------
# ---------------
sub ExecuteCmd {
my $szCmd = shift;
my $nAudit = shift;
if ( !defined($nAudit) ) {
$nAudit=0;
} #endif auditing undefined.
# if ( $nAudit == 1 ) {
# $szCmd = "$f_szClearAudit -c '$szCmd'";
# } # endif audition.
# TODO: Check that a command was given.
# $f_pLogHandler->notice("exec: $szCmd");
# Why do it twice?
# #$f_pLogHandler->info("exec: $szCmd ");
my $nRc;
my @arOutput;
# if ( $f_nDryRun == $f_nEnable ) {
# push(@arOutput, "DRYRUN: $szCmd 2>&1");
# $nRc=0;
# } else {
@arOutput = `$szCmd 2>&1`;
$nRc=$?;
# }
# if ( $nRc == 0 ) {
# $f_pLogHandler->notice(@arOutput);
# print "[Ok]\n";
# } else {
# $f_pLogHandler->error(@arOutput);
# $f_pLogHandler->critical("FAILED $nRc");
# carp("[FAIL]");
# } # endif rc
return($nRc);
} # endif execcheck
# -----------------------------------------------------------------
# ---------------
sub LoadVariableFromEnvironment {
my $refhFinishedValues = shift;
my @arEnvVarToLookFor = (
"BS_IMAGE_BASE_DIRECTORY",
"BS_BOOT_KERNEL_BASE_DIRECTORY",
"BS_CONFIG_BASE_DIRECTORY",
"BS_DISTRO_CONFIGURATION_FILE"
);
foreach my $szEnvVarName (@arEnvVarToLookFor) {
if ( exists $ENV{$szEnvVarName} ) {
# TODO C validate that this is a clean dir and not some command.
$refhFinishedValues->{$szEnvVarName} = $ENV{$szEnvVarName};
}
} # end foreach.
}
# ============================================================
# # # ### # #
## ## # # # ## #
# # # # # # # # # #
# # # # # # # # #
# # ####### # # # #
# # # # # # ##
# # # # ### # #
# ============================================================
# Load the Configurations from the config_boot_server_tool.yaml
BscPutConfigDataInHash(\%f_hFinishedValues);
LoadVariableFromEnvironment(\%f_hFinishedValues);
#print Dumper(%f_hValidOption);
#print Dumper(%f_hValidOption);
my %hProvidedParameters = HandleCommandLine(\%f_hValidOption, 1);
#print Dumper(%f_hValidOption);
#print Dumper(%hProvidedParameters);
#my %tmp = ( "--combo", "Alpha" );
#$tmp{"--release"} = "20";
#print Dumper(%tmp);
#print "Release: $tmp{'--release'}\n";
DieOnInvalidOptionsForCommand($f_szImportCommand, \%f_hValidOption, \%hProvidedParameters);
my %hPopulatedOptionList = DieOnSemanticErrorsOfOptionsForCommand($f_szImportCommand, \%f_hValidOption, \%hProvidedParameters, \%f_hOptionInteractionForCommand);
#print Dumper(%hProvidedParameters);
#print "---\n";
#print Dumper(%hPopulatedOptionList);
$f_hFinishedValues{"IsoImageName"} = GetNextCliArgument("!!! You must provide the name of the ISO image at the end.");
my %hCombinedData = ( %hPopulatedOptionList, %f_hFinishedValues);
CommandHandlingForImport(\%hCombinedData);
| henk52/OPSbst | bin/importiso.pl | Perl | apache-2.0 | 12,902 |
package Paws::EC2::CreateEgressOnlyInternetGatewayResult;
use Moose;
has ClientToken => (is => 'ro', isa => 'Str', request_name => 'clientToken', traits => ['NameInRequest',]);
has EgressOnlyInternetGateway => (is => 'ro', isa => 'Paws::EC2::EgressOnlyInternetGateway', request_name => 'egressOnlyInternetGateway', traits => ['NameInRequest',]);
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::EC2::CreateEgressOnlyInternetGatewayResult
=head1 ATTRIBUTES
=head2 ClientToken => Str
Unique, case-sensitive identifier you provide to ensure the idempotency
of the request.
=head2 EgressOnlyInternetGateway => L<Paws::EC2::EgressOnlyInternetGateway>
Information about the egress-only Internet gateway.
=head2 _request_id => Str
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/EC2/CreateEgressOnlyInternetGatewayResult.pm | Perl | apache-2.0 | 810 |
# Copyright 2015 Electric Cloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##########################
# deletecfg.pl
##########################
use ElectricCommander;
use ElectricCommander::PropDB;
use constant {
SUCCESS => 0,
ERROR => 1,
};
my $opts;
my $PLUGIN_NAME = 'EC-S3';
my $projName = "@PLUGIN_KEY@-@PLUGIN_VERSION@";
if (!defined $PLUGIN_NAME) {
print "PLUGIN_NAME must be defined\n";
exit ERROR;
}
# get an EC object
my $ec = new ElectricCommander();
$ec->abortOnError(0);
my $opts;
$opts->{config} = "$[config]";
if (!defined $opts->{config} || "$opts->{config}" eq '') {
print "config parameter must exist and be non-blank\n";
exit ERROR;
}
# check to see if a config with this name already exists before
# we do anything else
my $xpath = $ec->getProperty("/myProject/s3_cfgs/$opts->{config}");
my $property = $xpath->findvalue("//response/property/propertyName");
if (!defined $property || "$property" eq '') {
my $errMsg = "Error: A configuration named '$opts->{config}' does not exist.";
$ec->setProperty("/myJob/configError", $errMsg);
print $errMsg;
exit ERROR;
}
$ec->deleteProperty("/myProject/s3_cfgs/$opts->{config}");
$ec->deleteCredential($projName, $opts->{config});
exit SUCCESS;
| electric-cloud/EC-S3 | src/main/resources/project/procedures/DeleteConfiguration/deletecfg.pl | Perl | apache-2.0 | 1,765 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::azure::network::frontdoor::mode::backendhealth;
use base qw(cloud::azure::custom::mode);
use strict;
use warnings;
sub get_metrics_mapping {
my ($self, %options) = @_;
my $metrics_mapping = {
'backendhealthpercentage' => {
'output' => 'Backend Health Percentage',
'label' => 'backend-health-percentage',
'nlabel' => 'frontdoor.backend.health.percentage',
'unit' => '%',
'min' => '0',
'max' => '100'
}
};
return $metrics_mapping;
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-metric:s' => { name => 'filter_metric' },
'resource:s' => { name => 'resource' },
'resource-group:s' => { name => 'resource_group' }
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
if (!defined($self->{option_results}->{resource}) || $self->{option_results}->{resource} eq '') {
$self->{output}->add_option_msg(short_msg => 'Need to specify either --resource <name> with --resource-group option or --resource <id>.');
$self->{output}->option_exit();
}
my $resource = $self->{option_results}->{resource};
my $resource_group = defined($self->{option_results}->{resource_group}) ? $self->{option_results}->{resource_group} : '';
if ($resource =~ /^\/subscriptions\/.*\/resourceGroups\/(.*)\/providers\/Microsoft\.Network\/frontdoors\/(.*)$/) {
$resource_group = $1;
$resource = $2;
}
$self->{az_resource} = $resource;
$self->{az_resource_group} = $resource_group;
$self->{az_resource_type} = 'frontdoors';
$self->{az_resource_namespace} = 'Microsoft.Network';
$self->{az_timeframe} = defined($self->{option_results}->{timeframe}) ? $self->{option_results}->{timeframe} : 900;
$self->{az_interval} = defined($self->{option_results}->{interval}) ? $self->{option_results}->{interval} : 'PT5M';
$self->{az_aggregations} = ['Average'];
if (defined($self->{option_results}->{aggregation})) {
$self->{az_aggregations} = [];
foreach my $stat (@{$self->{option_results}->{aggregation}}) {
if ($stat ne '') {
push @{$self->{az_aggregations}}, ucfirst(lc($stat));
}
}
}
foreach my $metric (keys %{$self->{metrics_mapping}}) {
next if (defined($self->{option_results}->{filter_metric}) && $self->{option_results}->{filter_metric} ne ''
&& $metric !~ /$self->{option_results}->{filter_metric}/);
push @{$self->{az_metrics}}, $metric;
}
}
1;
__END__
=head1 MODE
Check Azure Front Door backend health.
Example:
Using resource name :
perl centreon_plugins.pl --plugin=cloud::azure::network::frontdoor::plugin --mode=backendhealth --custommode=api
--resource=<frontdoor_id> --resource-group=<resourcegroup_id> --aggregation='average'
--warning-backend-health-percentage='100:' --critical-backend-health-percentage='90:'
Using resource id :
perl centreon_plugins.pl --plugin=cloud::azure::network::frontdoor::plugin --mode=backendhealth --custommode=api
--resource='/subscriptions/<subscription_id>/resourceGroups/<resourcegroup_id>/providers/Microsoft.Network/frontdoors/<frontdoor_id>'
--aggregation='average' --warning-backend-health-percentage='100:' --critical-backend-health-percentage='90:'
Default aggregation: 'average' / 'minimum', 'maximum' and 'total' are valid.
=over 8
=item B<--resource>
Set resource name or id (Required).
=item B<--resource-group>
Set resource group (Required if resource's name is used).
=item B<--warning-backend-health-percentage>
Warning threshold.
=item B<--critical-backend-health-percentage>
Critical threshold.
=back
=cut
| Tpo76/centreon-plugins | cloud/azure/network/frontdoor/mode/backendhealth.pm | Perl | apache-2.0 | 4,711 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::lenovo::flexsystem::snmp::mode::components::faultled;
use strict;
use warnings;
my %map_faultled_states = ( 1 => 'on', 2 => 'off' );
sub load {}
sub check_faultled {
my ($self, %options) = @_;
$self->{components}->{faultled}->{total}++;
$self->{output}->output_add(long_msg =>
sprintf(
"Fault LED state is %s",
$options{value}
)
);
my $exit = $self->get_severity(section => 'faultled', value => $options{value});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(
severity => $exit,
short_msg => sprintf(
"Fault LED state is %s",
$options{value}
)
);
}
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => 'checking fault LED');
$self->{components}->{faultled} = { name => 'faultled', total => 0, skip => 0 };
return if ($self->check_filter(section => 'faultled'));
my $oid_mmspFaultLED = '.1.3.6.1.4.1.20301.2.5.1.3.10.12.0';
my $results = $self->{snmp}->get_leef(oids => [$oid_mmspFaultLED]);
return if (!defined($results->{$oid_mmspFaultLED}));
check_faultled($self, value => $map_faultled_states{$results->{$oid_mmspFaultLED}});
}
1;
| centreon/centreon-plugins | network/lenovo/flexsystem/snmp/mode/components/faultled.pm | Perl | apache-2.0 | 2,091 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Bio::EnsEMBL::Analysis::Hive::RunnableDB::HiveCreateRefineGenesJobs;
use strict;
use warnings;
use base ('Bio::EnsEMBL::Hive::RunnableDB::JobFactory');
=head2 param_defaults
Arg [1] : None
Description: Returns the default parameters:
_ln_gene_ext => 'gene',
_ln_introns_ext => 'daf',
Returntype : Hash ref
Exceptions : None
=cut
sub param_defaults {
my ($self) = @_;
return {
%{$self->SUPER::param_defaults},
_ln_gene_ext => 'gene',
_ln_introns_ext => 'daf',
}
}
=head2 fetch_input
Arg [1] : None
Description: Creates input id based on a custom table 'csvfile_table' in the hive database
It will generate the parameters for creating the models with the RefineSolexaGenes
module. If you specify 'single_tissue' to 1 it will generate input ids for the merged
set AND for each samples in 'sample_column'
It stores the input ids in 'inputlist'
Returntype : None
Exceptions : None
=cut
sub fetch_input {
my $self = shift;
my @output_ids;
my $gene_ext = $self->param('_ln_gene_ext');
my $introns_ext = $self->param('_ln_introns_ext');
if ($self->param('single_tissue')) {
my $table_adaptor = $self->db->get_NakedTableAdaptor;
$table_adaptor->table_name($self->param('csvfile_table'));
my %tissue_hash;
my $results = $table_adaptor->fetch_all();
foreach my $result (@$results) {
$tissue_hash{$result->{$self->param('sample_column')}}->{$result->{$self->param('sample_id_column')}} = 1;
}
foreach my $key (keys %tissue_hash) {
push(@output_ids, [$self->param('iid'), [{file => $self->param('wide_intron_bam_file').'.bam', groupname => [keys %{$tissue_hash{$key}}], depth => 0, mixed_bam => 0}], $self->param('wide_species').'_'.$key.'_rnaseq_'.$gene_ext, $self->param('wide_species').'_'.$key.'_rnaseq_'.$introns_ext, "best_$key", "single_$key", '', '']);
}
}
push(@output_ids, [$self->param('iid'), [{file => $self->param('wide_intron_bam_file').'.bam', groupname => [], depth => 0, mixed_bam => 0}], $self->param('wide_species').'_merged_rnaseq_'.$gene_ext, $self->param('wide_species').'_merged_rnaseq_'.$introns_ext, "best", "single", '', '']);
$self->param('inputlist', \@output_ids);
$self->param('column_names', ['iid', 'intron_bam_files', 'logic_name', 'introns_logic_name', 'best_score', 'single_exon_model', 'other_isoforms', 'bad_models']);
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Hive/RunnableDB/HiveCreateRefineGenesJobs.pm | Perl | apache-2.0 | 3,249 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package os::linux::local::mode::traffic;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::misc;
use centreon::plugins::statefile;
use Digest::MD5 qw(md5_hex);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"hostname:s" => { name => 'hostname' },
"remote" => { name => 'remote' },
"ssh-option:s@" => { name => 'ssh_option' },
"ssh-path:s" => { name => 'ssh_path' },
"ssh-command:s" => { name => 'ssh_command', default => 'ssh' },
"timeout:s" => { name => 'timeout', default => 30 },
"sudo" => { name => 'sudo' },
"command:s" => { name => 'command', default => 'ifconfig' },
"command-path:s" => { name => 'command_path', default => '/sbin' },
"command-options:s" => { name => 'command_options', default => '-a 2>&1' },
"filter-state:s" => { name => 'filter_state', },
"warning-in:s" => { name => 'warning_in' },
"critical-in:s" => { name => 'critical_in' },
"warning-out:s" => { name => 'warning_out' },
"critical-out:s" => { name => 'critical_out' },
"units:s" => { name => 'units', default => 'B' },
"name:s" => { name => 'name' },
"regexp" => { name => 'use_regexp' },
"regexp-isensitive" => { name => 'use_regexpi' },
"speed:s" => { name => 'speed' },
"no-loopback" => { name => 'no_loopback', },
"skip" => { name => 'skip' },
});
$self->{result} = {};
$self->{hostname} = undef;
$self->{statefile_value} = centreon::plugins::statefile->new(%options);
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning-in', value => $self->{option_results}->{warning_in})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning 'in' threshold '" . $self->{option_results}->{warning_in} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical-in', value => $self->{option_results}->{critical_in})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical 'in' threshold '" . $self->{option_results}->{critical_in} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'warning-out', value => $self->{option_results}->{warning_out})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning 'out' threshold '" . $self->{option_results}->{warning_out} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical-out', value => $self->{option_results}->{critical_out})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical 'out' threshold '" . $self->{option_results}->{critical_out} . "'.");
$self->{output}->option_exit();
}
if (defined($self->{option_results}->{speed}) && $self->{option_results}->{speed} ne '' && $self->{option_results}->{speed} !~ /^[0-9]+(\.[0-9]+){0,1}$/) {
$self->{output}->add_option_msg(short_msg => "Speed must be a positive number '" . $self->{option_results}->{speed} . "' (can be a float also).");
$self->{output}->option_exit();
}
if (defined($self->{option_results}->{units}) && $self->{option_results}->{units} eq '%' &&
(!defined($self->{option_results}->{speed}) || $self->{option_results}->{speed} eq '')) {
$self->{output}->add_option_msg(short_msg => "To use percent, you need to set --speed option.");
$self->{output}->option_exit();
}
$self->{statefile_value}->check_options(%options);
$self->{hostname} = $self->{option_results}->{hostname};
if (!defined($self->{hostname})) {
$self->{hostname} = 'me';
}
}
sub manage_selection {
my ($self, %options) = @_;
my $stdout = centreon::plugins::misc::execute(output => $self->{output},
options => $self->{option_results},
sudo => $self->{option_results}->{sudo},
command => $self->{option_results}->{command},
command_path => $self->{option_results}->{command_path},
command_options => $self->{option_results}->{command_options});
while ($stdout =~ /^(\S+)(.*?)(\n\n|\n$)/msg) {
my ($interface_name, $values) = ($1, $2);
my $states = '';
$states .= 'R' if ($values =~ /RUNNING/ms);
$states .= 'U' if ($values =~ /UP/ms);
next if (defined($self->{option_results}->{no_loopback}) && $values =~ /LOOPBACK/ms);
next if (defined($self->{option_results}->{filter_state}) && $self->{option_results}->{filter_state} ne '' &&
$states !~ /$self->{option_results}->{filter_state}/);
next if (defined($self->{option_results}->{name}) && defined($self->{option_results}->{use_regexp}) && defined($self->{option_results}->{use_regexpi})
&& $interface_name !~ /$self->{option_results}->{name}/i);
next if (defined($self->{option_results}->{name}) && defined($self->{option_results}->{use_regexp}) && !defined($self->{option_results}->{use_regexpi})
&& $interface_name !~ /$self->{option_results}->{name}/);
next if (defined($self->{option_results}->{name}) && !defined($self->{option_results}->{use_regexp}) && !defined($self->{option_results}->{use_regexpi})
&& $interface_name ne $self->{option_results}->{name});
$values =~ /RX bytes:(\S+).*?TX bytes:(\S+)/msi;
$self->{result}->{$interface_name} = {state => $states, in => $1, out => $2};
}
if (scalar(keys %{$self->{result}}) <= 0) {
if (defined($self->{option_results}->{name})) {
$self->{output}->add_option_msg(short_msg => "No interface found for name '" . $self->{option_results}->{name} . "'.");
} else {
$self->{output}->add_option_msg(short_msg => "No interface found.");
}
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->manage_selection();
my $new_datas = {};
$self->{statefile_value}->read(statefile => "cache_linux_local_" . $self->{hostname} . '_' . $self->{mode} . '_' . (defined($self->{option_results}->{name}) ? md5_hex($self->{option_results}->{name}) : md5_hex('all')));
$new_datas->{last_timestamp} = time();
my $old_timestamp = $self->{statefile_value}->get(name => 'last_timestamp');
if (!defined($self->{option_results}->{name}) || defined($self->{option_results}->{use_regexp})) {
$self->{output}->output_add(severity => 'OK',
short_msg => 'All traffic are ok.');
}
foreach my $name (sort(keys %{$self->{result}})) {
if ($self->{result}->{$name}->{state} !~ /RU/) {
if (!defined($self->{option_results}->{skip})) {
$self->{output}->output_add(severity => 'CRITICAL',
short_msg => "Interface '" . $name . "' is not up or/and running");
} else {
# Avoid getting "buffer creation..." alone
if (defined($self->{option_results}->{name}) && !defined($self->{option_results}->{use_regexp})) {
$self->{output}->output_add(severity => 'OK',
short_msg => "Interface '" . $name . "' is not up or/and running (normal state)");
}
$self->{output}->output_add(long_msg => "Skip interface '" . $name . "': not up or/and running.");
}
next;
}
# Some interface are running but not have bytes in/out
if (!defined($self->{result}->{$name}->{in})) {
if (defined($self->{option_results}->{name}) && !defined($self->{option_results}->{use_regexp})) {
$self->{output}->output_add(severity => 'OK',
short_msg => "Interface '" . $name . "' is up and running but can't get traffic (no values)");
}
$self->{output}->output_add(long_msg => "Skip interface '" . $name . "': can't get traffic.");
next;
}
$new_datas->{'in_' . $name} = $self->{result}->{$name}->{in} * 8;
$new_datas->{'out_' . $name} = $self->{result}->{$name}->{out} * 8;
my $old_in = $self->{statefile_value}->get(name => 'in_' . $name);
my $old_out = $self->{statefile_value}->get(name => 'out_' . $name);
if (!defined($old_timestamp) || !defined($old_in) || !defined($old_out)) {
next;
}
if ($new_datas->{'in_' . $name} < $old_in) {
# We set 0. Has reboot.
$old_in = 0;
}
if ($new_datas->{'out_' . $name} < $old_out) {
# We set 0. Has reboot.
$old_out = 0;
}
my $time_delta = $new_datas->{last_timestamp} - $old_timestamp;
if ($time_delta <= 0) {
# At least one second. two fast calls ;)
$time_delta = 1;
}
my $in_absolute_per_sec = ($new_datas->{'in_' . $name} - $old_in) / $time_delta;
my $out_absolute_per_sec = ($new_datas->{'out_' . $name} - $old_out) / $time_delta;
my ($exit, $interface_speed, $in_prct, $out_prct);
if (defined($self->{option_results}->{speed}) && $self->{option_results}->{speed} ne '') {
$interface_speed = $self->{option_results}->{speed} * 1000000;
$in_prct = $in_absolute_per_sec * 100 / ($self->{option_results}->{speed} * 1000000);
$out_prct = $out_absolute_per_sec * 100 / ($self->{option_results}->{speed} * 1000000);
if ($self->{option_results}->{units} eq '%') {
my $exit1 = $self->{perfdata}->threshold_check(value => $in_prct, threshold => [ { label => 'critical-in', 'exit_litteral' => 'critical' }, { label => 'warning-in', exit_litteral => 'warning' } ]);
my $exit2 = $self->{perfdata}->threshold_check(value => $out_prct, threshold => [ { label => 'critical-out', 'exit_litteral' => 'critical' }, { label => 'warning-out', exit_litteral => 'warning' } ]);
$exit = $self->{output}->get_most_critical(status => [ $exit1, $exit2 ]);
}
$in_prct = sprintf("%.2f", $in_prct);
$out_prct = sprintf("%.2f", $out_prct);
} else {
$in_prct = '-';
$out_prct = '-';
}
if ($self->{option_results}->{units} ne '%') {
my $exit1 = $self->{perfdata}->threshold_check(value => $in_absolute_per_sec, threshold => [ { label => 'critical-in', 'exit_litteral' => 'critical' }, { label => 'warning-in', exit_litteral => 'warning' } ]);
my $exit2 = $self->{perfdata}->threshold_check(value => $out_absolute_per_sec, threshold => [ { label => 'critical-out', 'exit_litteral' => 'critical' }, { label => 'warning-out', exit_litteral => 'warning' } ]);
$exit = $self->{output}->get_most_critical(status => [ $exit1, $exit2 ]);
}
###########
# Manage Output
###########
my ($in_value, $in_unit) = $self->{perfdata}->change_bytes(value => $in_absolute_per_sec, network => 1);
my ($out_value, $out_unit) = $self->{perfdata}->change_bytes(value => $out_absolute_per_sec, network => 1);
$self->{output}->output_add(long_msg => sprintf("Interface '%s' Traffic In : %s/s (%s %%), Out : %s/s (%s %%) ", $name,
$in_value . $in_unit, $in_prct,
$out_value . $out_unit, $out_prct));
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1) || (defined($self->{option_results}->{name}) && !defined($self->{option_results}->{use_regexp}))) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Interface '%s' Traffic In : %s/s (%s %%), Out : %s/s (%s %%) ", $name,
$in_value . $in_unit, $in_prct,
$out_value . $out_unit, $out_prct));
}
my $extra_label = '';
$extra_label = '_' . $name if (!defined($self->{option_results}->{name}) || defined($self->{option_results}->{use_regexp}));
$self->{output}->perfdata_add(label => 'traffic_in' . $extra_label, unit => 'b/s',
value => sprintf("%.2f", $in_absolute_per_sec),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-in', total => $interface_speed),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-in', total => $interface_speed),
min => 0, max => $interface_speed);
$self->{output}->perfdata_add(label => 'traffic_out' . $extra_label, unit => 'b/s',
value => sprintf("%.2f", $out_absolute_per_sec),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-out', total => $interface_speed),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-out', total => $interface_speed),
min => 0, max => $interface_speed);
}
$self->{statefile_value}->write(data => $new_datas);
if (!defined($old_timestamp)) {
$self->{output}->output_add(severity => 'OK',
short_msg => "Buffer creation...");
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Traffic
=over 8
=item B<--remote>
Execute command remotely in 'ssh'.
=item B<--hostname>
Hostname to query (need --remote).
=item B<--ssh-option>
Specify multiple options like the user (example: --ssh-option='-l=centreon-engine' --ssh-option='-p=52').
=item B<--ssh-path>
Specify ssh command path (default: none)
=item B<--ssh-command>
Specify ssh command (default: 'ssh'). Useful to use 'plink'.
=item B<--timeout>
Timeout in seconds for the command (Default: 30).
=item B<--sudo>
Use 'sudo' to execute the command.
=item B<--command>
Command to get information (Default: 'ifconfig').
Can be changed if you have output in a file.
=item B<--command-path>
Command path (Default: '/sbin').
=item B<--command-options>
Command options (Default: '-a 2>&1').
=item B<--warning-in>
Threshold warning in percent for 'in' traffic.
=item B<--critical-in>
Threshold critical in percent for 'in' traffic.
=item B<--warning-out>
Threshold warning in percent for 'out' traffic.
=item B<--critical-out>
Threshold critical in percent for 'out' traffic.
=item B<--units>
Units of thresholds (Default: 'B') ('%', 'B').
Percent can be used only if --speed is set.
=item B<--name>
Set the interface name (empty means 'check all interfaces')
=item B<--regexp>
Allows to use regexp to filter intefaces (with option --name).
=item B<--regexp-isensitive>
Allows to use regexp non case-sensitive (with --regexp).
=item B<--filter-state>
Filter interfaces type (regexp can be used).
=item B<--skip>
Skip errors on interface status (not up and running).
=item B<--speed>
Set interface speed (in Mb).
=item B<--no-loopback>
Don't display loopback interfaces.
=back
=cut | s-duret/centreon-plugins | os/linux/local/mode/traffic.pm | Perl | apache-2.0 | 17,458 |
#!/usr/bin/env perl
# See the NOTICE file distributed with this work for additional information
# regarding copyright ownership.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
copy_all_core_databases.pl
=head1 DESCRIPTION
This script automatically discovers the core databases available on the
staging server and submits a job to copy them onto the vertannot-staging
server.
The script doesn't do the copy itself but uses the Ensembl Production
REST API. For convenience you will need a checkout of ensembl-prodinf-tools
under $ENSEMBL_ROOT_DIR.
It can work without any arguments if your environment is set properly,
i.e. ENSEMBL_ROOT_DIR and ENSADMIN_PSW are defined, otherwise the
options are listed below.
=head1 OPTIONS
=head2 GETTING HELP
=over
=item B<[--help]>
Prints help message and exits.
=back
=head2 GENERAL CONFIGURATION
=over
=item B<[-s|--source_host]>
Where to get the databases from. Defaults to mysql-ens-sta-1 for vertebrates
and mysql-ens-sta-3 for non-vertebrate divisions.
=item B<[-s|--target_host]>
Where to copy the databases to. Defaults to mysql-vertannot-staging-ensadmin.
=item B<[-u|--endpoint_uri]>
The URI of the Ensembl Production Self-Service REST API. Defaults to
http://production-services.ensembl.org/api/${division}/db/
=item B<[-c|--db_copy_client]>
Path to the db_copy_client.py script. Defaults to
${ENSEMBL_ROOT_DIR}/ensembl-prodinf-tools/src/scripts/dbcopy_client.py
=back
=cut
use strict;
use warnings;
use Getopt::Long;
use Pod::Usage;
use Bio::EnsEMBL::ApiVersion;
use Bio::EnsEMBL::Registry;
## Command-line options
my ($db_copy_client, $endpoint_uri, $source_host, $target_host, $force, $division, $release, $dry_mode, $help);
GetOptions(
's|source_host=s' => \$source_host,
't|target_host=s' => \$target_host,
'u|endpoint_uri=s' => \$endpoint_uri,
'c|db_copy_client=s' => \$db_copy_client,
'f|force!' => \$force,
'd|division=s' => \$division,
'r|release=i' => \$release,
'y|dry_mode!' => \$dry_mode,
'h|help' => \$help,
);
if ($help) {
pod2usage({-exitvalue => 0, -verbose => 2});
}
if (not $db_copy_client) {
if (not $ENV{ENSEMBL_ROOT_DIR}) {
die "--db_copy_client is not given, and cannot find \$ENSEMBL_ROOT_DIR in the environment\n";
}
$db_copy_client = $ENV{ENSEMBL_ROOT_DIR} . '/ensembl-prodinf-tools/src/scripts/dbcopy_client.py';
}
die "'$db_copy_client' is not executable (or doesn't exist ?)\n" unless -x $db_copy_client;
die "--division <division> must be provided\n" unless $division;
$release ||= software_version();
$endpoint_uri ||= "http://production-services.ensembl.org/api/$division/db/";
$source_host ||= $division eq 'vertebrates' ? 'mysql-ens-sta-1:4519' : 'mysql-ens-sta-3:4160';
$target_host ||= "mysql-ens-vertannot-staging:4573";
Bio::EnsEMBL::Registry->load_registry_from_url('mysql://ensro@' . $target_host . '/');
my %existing_target_species; # Hash of Registry names, not production names (usually the same, though)
foreach my $db_adaptor (@{Bio::EnsEMBL::Registry->get_all_DBAdaptors(-GROUP => 'core')}) {
push @{ $existing_target_species{ $db_adaptor->species } }, $db_adaptor->dbc->dbname;
}
my @databases_to_copy;
my @db_clash;
my @existing_dbs;
print "Running on check meta mode\n";
my $meta_script = "\$ENSEMBL_ROOT_DIR/ensembl-metadata/misc_scripts/get_list_databases_for_division.pl";
my $metadata_script_options = "\$(mysql-ens-meta-prod-1 details script) --division $division --release $release";
my $cmd = "perl $meta_script $metadata_script_options | grep _core_";
my $meta_run = qx/$cmd/;
my @dbs_from_meta = split( /\s+/, $meta_run );
my %meta_hash;
my $repeated_db = 0;
foreach my $db (@dbs_from_meta) {
my $species_name = $db;
$species_name =~ s/_core_.*//;
if (exists $meta_hash{$species_name}){
print "\tMultiple databases for $species_name\t$db\t$meta_hash{$species_name}\n";
$repeated_db = 1;
}
else{
$meta_hash{$species_name} = $db;
}
push @databases_to_copy, $db;
}
die "There are multiple databases for the same species, sort out with Production before progressing" if $repeated_db;
foreach my $species_name (keys %meta_hash){
if ($existing_target_species{$species_name}) {
my $all_dbs = $existing_target_species{ $species_name };
my @same_dbs = grep {$_ eq $meta_hash{$species_name}} @$all_dbs;
my @diff_dbs = grep {$_ ne $meta_hash{$species_name}} @$all_dbs;
if (@same_dbs) {
push @existing_dbs, $meta_hash{$species_name};
}
if (@diff_dbs) {
push @db_clash, [$meta_hash{$species_name}, \@diff_dbs];
}
}
}
if (@existing_dbs) {
warn "These databases already exist on '$target_host' ! Check with the genebuilders that the assembly and geneset it contains are correct.\n";
warn join("\n", map {"\t$_"} @existing_dbs), "\n";
}
if (@db_clash) {
warn "These species have databases on '$target_host' with a different name ! The Registry may be confused ! Check with the genebuilders what they are and whether they can be dropped.\n";
foreach my $a (@db_clash) {
warn "\t", $a->[0], "\t", join(" ", @{$a->[1]}), "\n";
}
}
print "\n";
die "Add the --force option if you want to carry on with the copy of the databases\n" if !$force && (@existing_dbs || @db_clash);
my @base_cmd = ($db_copy_client, '-a' => 'submit', '-u' => $endpoint_uri);
if ($force) {
push @base_cmd, ('-w' => 'DROP');
}
my @cmd = ( @base_cmd, '-s' => $source_host, '-t' => $target_host, '-i' => join(',', @databases_to_copy), '-r' => $ENV{USER}, '-e' => $ENV{USER} . '@ebi.ac.uk' );
if ($dry_mode) {
print join( " ", @cmd ), "\n";
} elsif ( system(@cmd) ) {
die "Could not run the command: ", join( " ", @cmd ), "\n";
}
| Ensembl/ensembl-compara | scripts/pipeline/copy_all_core_databases.pl | Perl | apache-2.0 | 6,461 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.