code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
#!/usr/bin/env perl
use strict;
use warnings;
use v5.20;
use Dancer;
use Dancer::Plugin::SecureHeaders;
use JSON::Parse qw(valid_json parse_json);
use Data::Printer;
use lib '../lib';
use Product;
use Category;
use User;
use Order;
#setting( port => 2000 );
setting( log => 'debug' );
setting( content_type => 'application/json' );
setting( charset => 'UTF-8' );
setting( logger => 'console' );
setting( traces => 1 );
setting( server_tokens => 0 );
setting( img_path => '../admin/public/i/' );
# Unbuffered output
#$| = 1;
post '/login' => sub {
my $email = params->{'email'};
my $pass = params->{'pass'};
my $status = { ok => 0, status => "User not authenticated", code => 403 };
my $user = User->new( { email => $email, token => $pass } );
my $result = $user->authenticate();
if( $result->{'ok'} ) {
$status->{'ok'} = 1;
$status->{'code'} = 200;
}
status( $status->{'code'} );
return $status->{'status'} unless $status->{'ok'};
return $result->{'user'};
};
prefix '/category' => sub {
get '/list' => sub {
my $result = Category->list();
my $status = $result->[0];
my $categories = $result->[1];
status( $status->{'code'} );
return $categories if $status->{'ok'};
return $status->{'status'};
};
get '/:id' => sub {
my $category = Category->new( { id => params->{'id'} } );
my $result = $category->get();
status( $result->[0]->{'code'} );
return '{"category":'.$result->[1].'}' unless $result->[2];
return '{"category":'.$result->[1].',"products":'.$result->[2].'}';
};
get '/:id/starred' => sub {
my $category = Category->new( { id => params->{'id'} } );
my $result = $category->get_starred();
my $status = $result->[0];
my $products = $result->[1];
status( $status->{'code'} );
return $products if $status->{'ok'};
return $status->{'status'};
};
post '/' => sub {
# Create a new category with params->{'category'}
my $status = { ok => 0, status => 'Please, give me a json-formatted category to be added', code => 400 };
my $input = params->{'category'};
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
my $category = Category->new( parse_json( $input ) );
$status = $category->save();
status( $status->{'code'} );
return $status->{'cid'} if $status->{'ok'};
return $status->{'status'};
};
post '/:id' => sub {
# update category :id using data in params->{'category'}
my $id = param 'id';
my $input = param 'category';
my $status = { ok => 0, status => 'Please, give me a json-formatted category', code => 400 };
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
$input = parse_json( $input );
$input->{'id'} = $id;
my $category = Category->new( $input );
$status = $category->update();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
del '/:id' => sub {
my $input = params->{'id'};
my $category = Category->new( { id => $input } );
my $status = $category->delete();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:id' => sub {
return;
};
post '/:id/products' => sub {
my $id = param 'id';
my @products = parse_json( param 'products' );
my $category = Category->new( { id => $id } );
my $status;
for my $product ( @{$products[0]} ) {
# XXX: don't mask old errors!!!
$status = $category->add_product( $product );
}
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
get '/:id/add/:pid' => sub {
# add product :pid to category :id
my $category = Category->new( { id => params->{'id'} } );
my $status = $category->add_product( params->{'pid'} );
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
get '/:id/del/:pid' => sub {
# delete product :pid from category :id
my $category = Category->new( { id => params->{'id'} } );
my $status = $category->del_product( params->{'pid'} );
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
get '/:id/star' => sub {
my $category = Category->new( { id => params->{'id'} } );
my $status = $category->toggle_star();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
del '/:id/products' => sub {
my $id = param 'id';
my $category = Category->new( { id => $id } );
my $status = $category->del_products();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
del '/:id/images/:image' => sub {
my $id = params->{'id'};
my $image_key = params->{'image'};
# XXX: This sucks.
my $category = Category->new( { id => $id } );
my $status = $category->get();
$category = Category->new( parse_json( $status->[1] ) );
$status = $category->del_image( $image_key, config->{'img_path'} );
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:id/images/:image' => sub {
# ajax first issues a options request, than what I asked.
# It just needs to see the headers.
return;
};
};
prefix '/product' => sub {
get '/list' => sub {
my $result = Product->list();
my $status = $result->[0];
my $products = $result->[1];
status( $status->{'code'} );
return $products if $status->{'ok'};
return $status->{'status'};
};
get '/list/:id' => sub {
# :id is a category's id
my $result = Product->list( params->{'id'} );
my $status = $result->[0];
my $products = $result->[1];
status( $status->{'code'} );
return $products if $status->{'ok'};
return $status->{'status'};
};
get '/:id' => sub {
my $product = Product->new( { id => params->{'id'} } );
my $result = $product->get();
status( $result->[0]->{'code'} );
return $result->[1];
};
post '/' => sub {
# create a new product with params->{'product'}
my $status = { ok => 0, status => 'Please, give me a json-formatted product to be added', code => 400 };
my $input = params->{'product'};
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
my $product = Product->new( parse_json( $input ) );
$status = $product->save();
status( $status->{'code'} );
return $status->{'pid'} if $status->{'ok'};
return $status->{'status'};
};
post '/:id' => sub {
# update product with id :id using data in params->{'product'}
my $id = params->{'id'};
my $input = params->{'product'};
my $status = { ok => 0, status => 'Please, give me a json-formatted product', code => 400 };
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
$input = parse_json( $input );
$input->{'id'} = $id;
my $product = Product->new( $input );
$status = $product->update();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
get '/:id/star' => sub {
my $product = Product->new( { id => params->{'id'} } );
my $status = $product->toggle_star();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
del '/:id' => sub {
my $input = params->{'id'};
my $product = Product->new( { id => $input } );
my $status = $product->delete();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:id' => sub {
return;
};
del '/:id/categories' => sub {
# Remove all associations of this product with its categories.
my $input = params->{'id'};
my $product = Product->new( { id => $input } );
my $status = $product->del_categories();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
get '/:id/categories' => sub {
# Get categories associated with this product.
my $input = params->{'id'};
my $product = Product->new( { id => $input } );
my $status = $product->get_categories();
status( $status->{'code'} );
return $status->{'categories'} if $status->{'ok'};
return $status->{'status'};
};
del '/:id/images/:image' => sub {
my $id = params->{'id'};
my $image_key = params->{'image'};
# XXX: This sucks.
my $product = Product->new( { id => $id } );
my $status = $product->get();
$product = Product->new( parse_json( $status->[1] ) );
$status = $product->del_image( $image_key, config->{'img_path'} );
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:id/images/:image' => sub {
# ajax first issues a options request, then what I asked.
# It just needs to see the headers.
return;
};
};
prefix '/user' => sub {
get '/list' => sub {
my $result = User->list();
my $status = $result->[0];
my $users = $result->[1];
status( $status->{'code'} );
return $users if $status->{'ok'};
return $status->{'status'};
};
get '/:email' => sub {
my $user = User->new( { email => params->{'email'} } );
my $status = $user->get();
status( $status->{'code'} );
return $status->{'user'} if $status->{'ok'};
return $status->{'status'};
};
post '/' => sub {
# create a new user with params->{'user'}
my $status = { ok => 0, status => 'Please, give me a json-formatted user to be added', code => 400 };
my $input = params->{'user'};
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
my $user = User->new( parse_json( $input ) );
$status = $user->save();
status( $status->{'code'} );
return $status->{'uid'} if $status->{'ok'};
return $status->{'status'};
};
post '/:email' => sub {
# update user with email :email using data in params->{'user'}
my $email = params->{'email'};
my $input = params->{'user'};
my $status = { ok => 0, status => 'Please, give me a json-formatted user', code => 400 };
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
$input = parse_json( $input );
$input->{'email'} = $email;
my $user = User->new( $input );
$status = $user->update();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
del '/:email' => sub {
my $input = params->{'email'};
my $user = User->new( { email => $input } );
my $status = $user->delete();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:email' => sub {
# ajax first issues a options request, than what I asked.
# It just needs to see the headers.
return;
};
# TODO - cart:
# Cart is client-side, then stored server-side for resiliency.
get '/:email/cart' => sub {
my $email = params->{'email'};
my $user = User->new( { email => $email } );
#TODO
# get user's cart
my $cart = $user->getCart();
p $cart;
# return it (as json)
return $cart;
};
post '/:email/cart/update' => sub {
my $email = params->{'email'};
my $cart = params->{'cart'};
my $status = { ok => 0, status => 'Please, give me an email and a json cart', code => 400 };
if( !valid_json( $cart ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
my $user = User->new( { email => $email, cart => $cart } );
$status = $user->update();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:email/cart/update' => sub {
return;
};
=cut
## User authentication, for frontend users and client administrators
#
# NOTE This is *not* api authentication. That's done by the webserver. NOTE
#
post '/activate' => sub {
my $email = params->{'email'};
my $user = User->new( { email => $email } );
my $status = $user->newToken();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
post '/activate/:token' => sub {
my $email = params->{'email'};
my $token = params->{'token'};
my $user = User->new( { email => $email } );
my $status = $user->activate( $token );
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
post '/deactivate/:token' => sub {
my $email = params->{'email'};
my $token = params->{'token'};
my $user = User->new( { email => $email } );
my $status = $user->deactivate( $token );
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
=cut
};
prefix '/order' => sub {
get '/list' => sub {
my $result = Order->list();
my $status = $result->[0];
my $users = $result->[1];
status( $status->{'code'} );
return $users if $status->{'ok'};
return $status->{'status'};
};
get '/:id' => sub {
my $order = Order->new( { id => params->{'id'} } );
my $status = $order->get();
status( $status->{'code'} );
return $status->{'order'} if $status->{'ok'};
return $status->{'status'};
};
post '/' => sub {
# create a new order with params->{'order'}
my $status = { ok => 0, status => 'Please, give me a json-formatted order to be added', code => 400 };
my $input = params->{'order'};
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
my $order = Order->new( parse_json( $input ) );
$status = $order->save();
status( $status->{'code'} );
return $status->{'oid'} if $status->{'ok'};
return $status->{'status'};
};
post '/:id' => sub {
# update order with id :id using data in params->{'order'}
my $id = params->{'id'};
my $input = params->{'order'};
my $status = { ok => 0, status => 'Please, give me a json-formatted order', code => 400 };
if( !valid_json( $input ) ) {
status( $status->{'code'} );
return $status->{'status'};
}
$input = parse_json( $input );
$input->{'id'} = $id;
my $order = Order->new( $input );
$status = $order->update();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
del '/:id' => sub {
my $input = params->{'id'};
my $order = Order->new( { id => $input } );
my $status = $order->delete();
status( $status->{'code'} );
return if $status->{'ok'};
return $status->{'status'};
};
options '/:id' => sub {
# ajax first issues a options request, than what I asked.
# It just needs to see the headers.
return;
};
};
start;
| Mikrobit/shop | api/api.pl | Perl | bsd-3-clause | 16,559 |
=head1 NAME
rbpf-slam - Robot map building with Rao-Blackwellized Particle Filters
=head1 SYNOPSIS
rbpf-slam I<config_file.ini>
=head1 DESCRIPTION
B<rbpf-slam> is a command-line application which processes a rawlog and
creates a map using the particle filter methods specified in the config file.
=head1 BUGS
Please report bugs at https://github.com/MRPT/mrpt/issues
=head1 SEE ALSO
The application wiki page at https://www.mrpt.org/Applications
=head1 AUTHORS
B<rbpf-slam> is part of the Mobile Robot Programming Toolkit (MRPT), and was originally
written by the MAPIR laboratory (University of Malaga).
This manual page was written by Jose Luis Blanco <joseluisblancoc@gmail.com>.
=head1 COPYRIGHT
This program is free software; you can redistribute it and/or modify it
under the terms of the BSD License.
On Debian GNU/Linux systems, the complete text of the BSD License can be
found in `/usr/share/common-licenses/BSD'.
=cut
| MRPT/mrpt | doc/man-pages/pod/rbpf-slam.pod | Perl | bsd-3-clause | 949 |
package Explain::Controller;
use Mojo::Base 'Mojolicious::Controller';
use English -no_match_vars;
use Pg::Explain;
use Encode;
use Email::Valid;
use Config;
sub logout {
my $self = shift;
delete $self->session->{ 'user' };
delete $self->session->{ 'admin' };
$self->redirect_to( 'new-explain' );
}
sub user_history {
my $self = shift;
$self->redirect_to( 'history' ) unless $self->session->{ 'user' };
my @args = ( $self->session->{ 'user' } );
if ( ( $self->param( 'direction' ) )
&& ( $self->param( 'direction' ) =~ m{\A(?:before|after)\z} )
&& ( $self->param( 'key' ) ) )
{
push @args, $self->param( 'direction' ) eq 'before' ? 'DESC' : 'ASC';
push @args, $self->param( 'key' );
}
my $data = $self->database->get_user_history( @args );
$self->stash->{ 'plans' } = $data;
return $self->render();
}
sub user {
my $self = shift;
my $old = $self->req->param( 'old-pw' );
my $new = $self->req->param( 'new-pw' );
my $new2 = $self->req->param( 'new-pw2' );
return $self->render unless defined $old;
if ( ( !defined $new )
|| ( !defined $new2 )
|| ( $new ne $new2 ) )
{
$self->stash->{ 'message' } = 'You have to provide two identical copies of new password!';
return;
}
my $status = $self->database->user_change_password( $self->session->{ 'user' }, $old, $new );
if ( $status ) {
$self->flash( 'message' => 'Password changed.' );
$self->redirect_to( 'new-explain' );
}
$self->stash->{ 'message' } = 'Changing the password failed.';
}
sub plan_change {
my $self = shift;
unless ( $self->session->{ 'user' } ) {
$self->app->log->error( 'User tried to access plan change without being logged!' );
$self->redirect_to( 'new-explain' );
}
$self->redirect_to( 'new-explain' ) unless $self->req->param( 'return' );
my $plan = $self->database->get_plan_data( $self->param( 'id' ) );
if ( ( !defined $plan->{ 'added_by' } )
|| ( $plan->{ 'added_by' } ne $self->session->{ 'user' } ) )
{
$self->app->log->error( 'User tried to access plan change for plan [' . $plan->{ 'id' } . '] of another user: ' . $self->session->{ 'user' } );
$self->redirect_to( 'logout' );
}
# All looks fine. Current plan data are in $plan.
if ( ( $self->req->param( 'delete' ) )
&& ( $self->req->param( 'delete' ) eq 'yes' ) )
{
$self->database->delete_plan( $plan->{ 'id' }, $plan->{ 'delete_key' } );
return $self->redirect_to( $self->req->param( 'return' ) );
}
my %changes = ();
if ( $plan->{ 'title' } ne ( $self->req->param( 'title' ) // '' ) ) {
$changes{ 'title' } = ( $self->req->param( 'title' ) // '' );
}
if ( ( $plan->{ 'is_public' } )
&& ( !$self->req->param( 'is_public' ) ) )
{
$changes{ 'is_public' } = 0;
}
elsif (( !$plan->{ 'is_public' } )
&& ( $self->req->param( 'is_public' ) ) )
{
$changes{ 'is_public' } = 1;
}
if ( ( !$plan->{ 'is_anonymized' } )
&& ( $self->req->param( 'is_anonymized' ) ) )
{
my $explain = Pg::Explain->new( source => $plan->{ 'plan' } );
$explain->anonymize();
$changes{ 'plan' } = $explain->as_text();
$changes{ 'is_anonymized' } = 1;
}
return $self->redirect_to( $self->req->param( 'return' ) ) if 0 == scalar keys %changes;
$self->database->update_plan( $plan->{ 'id' }, \%changes );
return $self->redirect_to( $self->req->param( 'return' ) );
}
sub login {
my $self = shift;
# If there is no username - there is nothing to do
my $username = $self->req->param( 'username' );
return $self->render unless defined $username;
if ( 30 < length( $username ) ) {
$self->stash->{ 'message' } = 'Username cannot be longer than 30 characters. Really?!';
return;
}
my $password = $self->req->param( 'password' );
my $password2 = $self->req->param( 'password2' );
if ( ( !defined $password ) || ( '' eq $password ) ) {
$self->stash->{ 'message' } = 'There has to be some password!';
return;
}
# Registration
if ( $self->req->param( 'is_registration' ) ) {
if ( ( !defined $password2 )
|| ( $password2 ne $password ) )
{
$self->stash->{ 'message' } = 'You have to repeat password correctly!';
return;
}
my $status = $self->database->user_register( $username, $password );
if ( $status ) {
$self->flash( 'message' => 'User registered.' );
$self->session( 'user' => $username );
$self->redirect_to( 'new-explain' );
}
$self->stash->{ 'message' } = 'Registration failed.';
return;
}
if ( my $user = $self->database->user_login( $username, $password ) ) {
$self->flash( 'message' => 'User logged in.' );
$self->session( 'user' => $username );
$self->session( 'admin' => $user->{ 'admin' } );
$self->redirect_to( 'new-explain' );
}
$self->stash->{ 'message' } = 'Bad username or password.';
return;
}
sub new_optimization {
my $self = shift;
my $original_plan_id = $self->req->param( 'original' ) // '';
return $self->redirect_to( 'new-explain' ) unless $original_plan_id =~ m{\A[a-zA-Z0-9]+\z};
my ( $original_plan, $original_title ) = $self->database->get_plan( $original_plan_id );
return $self->redirect_to( 'new-explain', status => 404 ) unless $original_plan;
$self->stash->{ 'optimization' } = 1;
$self->stash->{ 'original_plan_id' } = $original_plan_id;
$self->stash->{ 'original_title' } = $original_title;
return $self->render( 'controller/index' );
}
sub index {
my $self = shift;
# plan
my $plan = $self->req->param( 'plan' );
# nothing to do...
return $self->render unless $plan;
# request entity too large
return $self->render( message => 'Your plan is too long.', status => 413 )
if 10_000_000 < length $plan;
# Get id of parent plan
my $parent_id = $self->req->param( 'optimization_for' );
if ( defined $parent_id ) {
$parent_id = undef unless $self->database->plan_exists( $parent_id );
}
# public
my $is_public = $self->req->param( 'is_public' ) ? 1 : 0;
# anonymization
my $is_anon = $self->req->param( 'is_anon' ) ? 1 : 0;
# plan title
my $title = $self->req->param( 'title' );
$title = '' unless defined $title;
$title = '' if 'Optional title' eq $title;
# try
eval {
# make "explain"
my $explain = Pg::Explain->new( source => $plan );
# something goes wrong...
die q|Can't create explain! Explain "top_node" is undef!|
unless defined $explain->top_node;
# Anonymize plan, when requested.
if ( $is_anon ) {
$explain->anonymize();
$plan = $explain->as_text();
}
};
# catch
if ( $EVAL_ERROR ) {
# log message
$self->app->log->info( $EVAL_ERROR );
# leave...
return $self->render( message => q|Failed to parse your plan| );
}
# save to database
my ( $id, $delete_key ) = $self->database->save_with_random_name( $title, $plan, $is_public, $is_anon, $self->session->{ 'user' }, $parent_id, );
# redirect to /show/:id
$self->flash( delete_key => $delete_key );
return $self->redirect_to( 'show', id => $id );
}
sub delete {
my $self = shift;
# value of "/:id" param
my $id = defined $self->stash->{ id } ? $self->stash->{ id } : '';
# value of "/:key" param
my $key = defined $self->stash->{ key } ? $self->stash->{ key } : '';
# missing or invalid
return $self->redirect_to( 'new-explain' ) unless $id =~ m{\A[a-zA-Z0-9]+\z};
return $self->redirect_to( 'new-explain' ) unless $key =~ m{\A[a-zA-Z0-9]+\z};
# delete plan in database
my $delete_worked = $self->database->delete_plan( $id, $key );
# not found in database
return $self->redirect_to( 'new-explain', status => 404 ) unless $delete_worked;
$self->flash( message => sprintf( 'Plan %s deleted.', $id ) );
return $self->redirect_to( 'new-explain' );
}
sub show {
my $self = shift;
# value of "/:id" param
my $id = defined $self->stash->{ id } ? $self->stash->{ id } : '';
# missing or invalid
return $self->redirect_to( 'new-explain' ) unless $id =~ m{\A[a-zA-Z0-9]+\z};
# get plan source from database
my ( $plan, $title ) = $self->database->get_plan( $id );
# not found in database
return $self->redirect_to( 'new-explain', status => 404 ) unless $plan;
# make explanation
my $explain = eval { Pg::Explain->new( source => $plan ); };
# plans are validated before save, so this should never happen
if ( $EVAL_ERROR ) {
$self->app->log->error( $EVAL_ERROR );
return $self->redirect_to( 'new-explain' );
}
# validate explain
eval { $explain->top_node; };
# as above, should never happen
if ( $EVAL_ERROR ) {
$self->app->log->error( $EVAL_ERROR );
return $self->redirect_to( 'new-explain' );
}
# Get stats from plan
my $stats = { 'tables' => {} };
my @elements = ( $explain->top_node );
while ( my $e = shift @elements ) {
push @elements, values %{ $e->ctes } if $e->ctes;
push @elements, @{ $e->sub_nodes } if $e->sub_nodes;
push @elements, @{ $e->initplans } if $e->initplans;
push @elements, @{ $e->subplans } if $e->subplans;
$stats->{ 'nodes' }->{ $e->type }->{ 'count' }++;
$stats->{ 'nodes' }->{ $e->type }->{ 'time' } += $e->total_exclusive_time if $e->total_exclusive_time;
next unless $e->scan_on;
next unless $e->scan_on->{ 'table_name' };
$stats->{ 'tables' }->{ $e->scan_on->{ 'table_name' } } ||= {};
my $S = $stats->{ 'tables' }->{ $e->scan_on->{ 'table_name' } };
$S->{ $e->{ 'type' } }->{ 'count' }++;
$S->{ ':total' }->{ 'count' }++;
if ( defined( my $t = $e->total_exclusive_time ) ) {
$S->{ $e->type }->{ 'time' } += $t;
$S->{ ':total' }->{ 'time' } += $t;
}
}
# put explain and title to stash
$self->stash->{ explain } = $explain;
$self->stash->{ title } = $title;
$self->stash->{ stats } = $stats;
# Fetch path of optimizations
$self->stash->{ optimization_path } = $self->database->get_optimization_path( $id );
$self->stash->{ suboptimizations } = $self->database->get_optimizations_for( $id );
# render will be called automatically
return;
}
sub history {
my $self = shift;
# date
my $date = $self->param( 'date' );
if ( ( $date ) && ( $date lt '2008-12-01' ) ) {
return $self->redirect_to( '/' );
}
# get result set from database
my $rs = $self->database->get_public_list_paged( $date );
# put result set to stash
$self->stash( rs => $rs );
return;
}
sub contact {
my $self = shift;
# nothing to do...
return unless $self->req->param( 'message' );
# invalid email address
return $self->render( error => 'Invalid email address' )
unless Email::Valid->address( $self->req->param( 'email' ) || '' );
# send
$self->send_mail(
{
msg => sprintf(
"\nMessage from: %s <%s>" . "\nPosted from: %s with %s" . "\n****************************************\n\n" . "%s",
$self->req->param( 'name' ) || '',
$self->req->param( 'email' ),
$self->tx->remote_address,
$self->req->headers->user_agent,
$self->req->param( 'message' )
)
}
);
# mail sent message
$self->flash( message => 'Mail sent' );
# get after post
$self->redirect_to( 'contact' );
}
sub info {
my $self = shift;
$self->redirect_to( 'new-explain' ) unless $self->session->{ 'user' };
$self->redirect_to( 'new-explain' ) unless $self->session->{ 'admin' };
my @versions = ();
for my $module ( sort keys %INC ) {
next if $module =~ m{^\.?/};
$module =~ s/\.pm$//;
$module =~ s#/#::#g;
push @versions, {
'module' => $module,
'version' => $module->VERSION,
};
}
$self->stash( 'modules' => \@versions );
$self->stash( 'perl' => {
'version' => $PERL_VERSION,
'binary' => $Config{'perlpath'} . $Config{'_exe'},
}
);
}
sub help {
# direct to template
return ( shift )->render;
}
1;
| depesz/explain.depesz.com | lib/Explain/Controller.pm | Perl | bsd-3-clause | 12,772 |
package DMOZ::Mapper::SingleNodeMapper;
use strict;
use warnings;
# computes perplexity of a set of DMOZ entries wrt specified model
use DMOZ::Mapper;
use base qw(DMOZ::Mapper);
# constructor
sub new {
my $that = shift;
my $target_path = shift;
my $wanted_fields = shift;
my $func = shift;
# instantiate super class
my $ref = $that->SUPER::new();
my @path_components = split /\//, $target_path;
$ref->{_target_path } = \@path_components;
$ref->{_path_cursor} = 0;
$ref->{_wanted_fields} = $wanted_fields;
$ref->{_func} = $func;
return $ref;
}
# begin method
sub begin {
my $this = shift;
my $hierarchy = shift;
$this->{_hierarchy} = $hierarchy;
$this->{_evaluated_function} = 0;
}
# pre process method
sub pre_process {
my $this = shift;
my $node = shift;
my $path = shift;
my $data = shift;
if ( $this->{_path_cursor} >= scalar(@{$this->{_target_path}}) ) {
# evaluate the target function
if ( ! $this->{_evaluated_function} ) {
$this->{_func}->($path,$data);
$this->{_evaluated_function} = 1;
}
return undef;
}
my @expected_name_components;
for (my $i=0; $i<=$this->{_path_cursor}; $i++) {
push @expected_name_components, $this->{_target_path}->[$i];
}
my $expected_name = join('/',@expected_name_components);
if ( $node->name() ne $expected_name ) {
return undef;
}
my @wanted_data;
foreach my $field (@{ $this->{_wanted_fields}}) {
push @wanted_data, $node->get($field);
}
$this->{_path_cursor}++;
return \@wanted_data;
}
# post process method
sub post_process {
my $this = shift;
my $node = shift;
my $path = shift;
my $data = shift;
my $recursion_outputs = shift;
# nothing
}
# end method
sub end {
my $this = shift;
my $hierarchy = shift;
}
1;
| ypetinot/web-summarization | models/topic-models/confusius-tm/src/DMOZ/Mapper/SingleNodeMapper.pm | Perl | apache-2.0 | 1,895 |
#!/usr/bin/perl -w
use strict;
use lib ".";
use PAsm;
my $USAGE = "confirm-starts.pl reads.fa graph.txt\n";
my $readfasta = shift @ARGV or die $USAGE;
open READS, "< $readfasta" or die "Can't open $readfasta ($!)\n";
my %readseq;
my $id;
my $seq;
## Load the reads
while (<READS>)
{
chomp;
($id, $seq) = split /\t/, $_;
$readseq{$id} = $seq;
}
my $numreads = scalar keys %readseq;
print STDERR "Loaded $numreads reads\n";
## Now scan the graph
my $nodeschecked = 0;
my $readschecked = 0;
my $fwd_good = 0;
my $fwd_err = 0;
my $rc_good = 0;
my $rc_err = 0;
while (<>)
{
chomp;
my $node = {};
my @vals = split /\t/, $_;
my $nodeid = shift @vals;
my $msgtype = shift @vals; ## nodemsg
if ($msgtype eq $NODEMSG)
{
parse_node($node, \@vals);
}
else
{
die "Unknown msg: $_\n";
}
$nodeschecked++;
my $nodestr = node_str($node);
if (exists $node->{$R5})
{
foreach my $readstr (@{$node->{$R5}})
{
$readschecked++;
my ($read, $offset) = split /:/, $readstr;
my $rc = 0;
my $readstr;
my $nodestr_s;
my $readstr_s;
if ($read =~ /^~/)
{
$rc = 1;
$read =~ s/^~//;
$readstr = $readseq{$read};
$nodestr_s = substr($nodestr, 0, $offset+1);
$readstr_s = substr($readstr, 0, length($nodestr_s));
$nodestr_s = substr(rc($nodestr_s), 0, length($readstr_s));
}
else
{
$readstr = $readseq{$read};
$nodestr_s = substr($nodestr, $offset, length($readstr));
$readstr_s = substr($readstr, 0, length($nodestr_s));
}
if (uc($nodestr_s) ne uc($readstr_s))
{
print ">$read $nodeid $rc $offset fail\n";
print "n: $nodestr_s $nodestr\n";
print "r: $readstr_s $readstr\n";
if ($rc) { $rc_err++; } else { $fwd_err++; }
}
else
{
#print ">$read $nodeid $rc $offset ok\n";
#print "n: $nodestr_s $nodestr\n";
#print "r: $readstr_s $readstr\n";
if ($rc) { $rc_good++; } else { $fwd_good++; }
}
}
}
}
print STDERR "Checked $readschecked reads in $nodeschecked nodes\n";
print STDERR "fwd: $fwd_good good $fwd_err err\n";
print STDERR "rev: $rc_good good $rc_err err\n";
| julianlau/contrail-emr | support/check-starts.pl | Perl | apache-2.0 | 2,265 |
package Paws::EC2::DescribeIdFormatResult;
use Moose;
has Statuses => (is => 'ro', isa => 'ArrayRef[Paws::EC2::IdFormat]', request_name => 'statusSet', traits => ['NameInRequest',]);
has _request_id => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::EC2::DescribeIdFormatResult
=head1 ATTRIBUTES
=head2 Statuses => ArrayRef[L<Paws::EC2::IdFormat>]
Information about the ID format for the resource.
=head2 _request_id => Str
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/EC2/DescribeIdFormatResult.pm | Perl | apache-2.0 | 487 |
#!/usr/bin/env perl
# ARM assembler distiller by <appro>.
my $flavour = shift;
my $output = shift;
open STDOUT,">$output" || die "can't open $output: $!";
$flavour = "linux32" if (!$flavour or $flavour eq "void");
my %GLOBALS;
my $dotinlocallabels=($flavour=~/linux/)?1:0;
################################################################
# directives which need special treatment on different platforms
################################################################
my $arch = sub {
if ($flavour =~ /linux/) { ".arch\t".join(',',@_); }
else { ""; }
};
my $fpu = sub {
if ($flavour =~ /linux/) { ".fpu\t".join(',',@_); }
else { ""; }
};
my $hidden = sub {
if ($flavour =~ /ios/) { ".private_extern\t".join(',',@_); }
else { ".hidden\t".join(',',@_); }
};
my $comm = sub {
my @args = split(/,\s*/,shift);
my $name = @args[0];
my $global = \$GLOBALS{$name};
my $ret;
if ($flavour =~ /ios32/) {
$ret = ".comm\t_$name,@args[1]\n";
$ret .= ".non_lazy_symbol_pointer\n";
$ret .= "$name:\n";
$ret .= ".indirect_symbol\t_$name\n";
$ret .= ".long\t0";
$name = "_$name";
} else { $ret = ".comm\t".join(',',@args); }
$$global = $name;
$ret;
};
my $globl = sub {
my $name = shift;
my $global = \$GLOBALS{$name};
my $ret;
SWITCH: for ($flavour) {
/ios/ && do { $name = "_$name";
last;
};
}
$ret = ".globl $name" if (!$ret);
$$global = $name;
$ret;
};
my $global = $globl;
my $extern = sub {
&$globl(@_);
return; # return nothing
};
my $type = sub {
if ($flavour =~ /linux/) { ".type\t".join(',',@_); }
else { ""; }
};
my $size = sub {
if ($flavour =~ /linux/) { ".size\t".join(',',@_); }
else { ""; }
};
my $inst = sub {
if ($flavour =~ /linux/) { ".inst\t".join(',',@_); }
else { ".long\t".join(',',@_); }
};
my $asciz = sub {
my $line = join(",",@_);
if ($line =~ /^"(.*)"$/)
{ ".byte " . join(",",unpack("C*",$1),0) . "\n.align 2"; }
else
{ ""; }
};
sub range {
my ($r,$sfx,$start,$end) = @_;
join(",",map("$r$_$sfx",($start..$end)));
}
sub expand_line {
my $line = shift;
my @ret = ();
pos($line)=0;
while ($line =~ m/\G[^@\/\{\"]*/g) {
if ($line =~ m/\G(@|\/\/|$)/gc) {
last;
}
elsif ($line =~ m/\G\{/gc) {
my $saved_pos = pos($line);
$line =~ s/\G([rdqv])([0-9]+)([^\-]*)\-\1([0-9]+)\3/range($1,$3,$2,$4)/e;
pos($line) = $saved_pos;
$line =~ m/\G[^\}]*\}/g;
}
elsif ($line =~ m/\G\"/gc) {
$line =~ m/\G[^\"]*\"/g;
}
}
$line =~ s/\b(\w+)/$GLOBALS{$1} or $1/ge;
return $line;
}
while($line=<>) {
if ($line =~ m/^\s*(#|@|\/\/)/) { print $line; next; }
$line =~ s|/\*.*\*/||; # get rid of C-style comments...
$line =~ s|^\s+||; # ... and skip white spaces in beginning...
$line =~ s|\s+$||; # ... and at the end
{
$line =~ s|[\b\.]L(\w{2,})|L$1|g; # common denominator for Locallabel
$line =~ s|\bL(\w{2,})|\.L$1|g if ($dotinlocallabels);
}
{
$line =~ s|(^[\.\w]+)\:\s*||;
my $label = $1;
if ($label) {
printf "%s:",($GLOBALS{$label} or $label);
}
}
if ($line !~ m/^[#@]/) {
$line =~ s|^\s*(\.?)(\S+)\s*||;
my $c = $1; $c = "\t" if ($c eq "");
my $mnemonic = $2;
my $opcode;
if ($mnemonic =~ m/([^\.]+)\.([^\.]+)/) {
$opcode = eval("\$$1_$2");
} else {
$opcode = eval("\$$mnemonic");
}
my $arg=expand_line($line);
if (ref($opcode) eq 'CODE') {
$line = &$opcode($arg);
} elsif ($mnemonic) {
$line = $c.$mnemonic;
$line.= "\t$arg" if ($arg);
}
}
print $line if ($line);
print "\n";
}
close STDOUT;
| GaloisInc/hacrypto | src/C/openssl/openssl-fips-ecp-2.0.11/crypto/perlasm/arm-xlate.pl | Perl | bsd-3-clause | 3,673 |
interface(root).
interface(node).
interfaceAttribute(node, w).
interfaceAttribute(node, lineh).
interfaceAttribute(node, canvas).
interfaceAttribute(node, absy).
interfaceAttribute(node, rely).
interfaceAttribute(node, h).
interfaceAttribute(node, x).
interfaceAttribute(node, render).
class(top, root).
class(leaf, node).
class(hbox, node).
class(vbox, node).
class(wrapbox, node).
classChild(top, child, node).
classChild(hbox, childs, node).
classChild(vbox, childs, node).
classChild(wrapbox, childs, node).
classField(gensymattrib, gensymattrib) :- false.
classField(top, gensymattrib).
classField(leaf, gensymattrib).
classField(hbox, gensymattrib).
classField(vbox, gensymattrib).
classField(wrapbox, gensymattrib).
classField(wrapbox, width).
interfaceField(root, w).
interfaceField(root, display).
interfaceField(root, refname).
interfaceField(root, h).
interfaceField(node, floating).
interfaceField(node, display).
interfaceField(node, refname).
assignment(top, child, lineh, child, h).
assignment(top, child, absy, self, gensymattrib).
assignment(top, child, rely, self, gensymattrib).
assignment(top, child, canvas, child, w).
assignment(top, child, canvas, child, h).
assignment(top, child, x, self, gensymattrib).
assignment(leaf, self, w, self, gensymattrib).
assignment(leaf, self, h, self, gensymattrib).
assignment(leaf, self, render, self, w).
assignment(leaf, self, render, self, canvas).
assignment(leaf, self, render, self, absy).
assignment(leaf, self, render, self, h).
assignment(leaf, self, render, self, x).
assignment(hbox, self, render, self, w).
assignment(hbox, self, render, self, canvas).
assignment(hbox, self, render, self, absy).
assignment(hbox, self, render, self, h).
assignment(hbox, self, render, self, x).
assignment(hbox, self, numchilds_step, self, gensymattrib).
assignment(hbox, self, numchilds_last, self, numchilds_step).
assignment(hbox, self, numchilds, self, numchilds_step).
assignment(hbox, self, w_step, self, numchilds_last).
assignment(hbox, self, w_step, self, childs_w_step).
assignment(hbox, self, w_step, self, gensymattrib).
assignment(hbox, self, w_last, self, w_step).
assignment(hbox, self, w, self, w_step).
assignment(hbox, self, h_step, self, childs_h_step).
assignment(hbox, self, h_step, self, gensymattrib).
assignment(hbox, self, h_last, self, h_step).
assignment(hbox, self, h, self, h_step).
assignment(hbox, self, childs_rely_step, self, gensymattrib).
assignment(hbox, self, childs_rely_last, self, childs_rely_step).
assignment(hbox, childs, rely, self, childs_rely_step).
assignment(hbox, self, childs_absy_step, self, absy).
assignment(hbox, self, childs_absy_step, self, childs_rely_step).
assignment(hbox, self, childs_absy_step, self, gensymattrib).
assignment(hbox, self, childs_absy_last, self, childs_absy_step).
assignment(hbox, childs, absy, self, childs_absy_step).
assignment(hbox, self, childs_x_step, self, x).
assignment(hbox, self, childs_x_step, self, childs_w_step).
assignment(hbox, self, childs_x_step, self, gensymattrib).
assignment(hbox, self, childs_x_last, self, childs_x_step).
assignment(hbox, childs, x, self, childs_x_step).
assignment(hbox, self, childs_canvas_step, self, render).
assignment(hbox, self, childs_canvas_step, self, gensymattrib).
assignment(hbox, self, childs_canvas_last, self, childs_canvas_step).
assignment(hbox, childs, canvas, self, childs_canvas_step).
assignment(hbox, self, childs_lineh_step, self, gensymattrib).
assignment(hbox, self, childs_lineh_last, self, childs_lineh_step).
assignment(hbox, childs, lineh, self, childs_lineh_step).
assignment(hbox, self, childs_h_step, childs, h).
assignment(hbox, self, childs_w_step, childs, w).
assignment(vbox, self, render, self, w).
assignment(vbox, self, render, self, canvas).
assignment(vbox, self, render, self, absy).
assignment(vbox, self, render, self, h).
assignment(vbox, self, render, self, x).
assignment(vbox, self, numchilds_step, self, gensymattrib).
assignment(vbox, self, numchilds_last, self, numchilds_step).
assignment(vbox, self, numchilds, self, numchilds_step).
assignment(vbox, self, h_step, self, numchilds_last).
assignment(vbox, self, h_step, self, childs_h_step).
assignment(vbox, self, h_step, self, gensymattrib).
assignment(vbox, self, h_last, self, h_step).
assignment(vbox, self, h, self, h_step).
assignment(vbox, self, w_step, self, childs_w_step).
assignment(vbox, self, w_step, self, gensymattrib).
assignment(vbox, self, w_last, self, w_step).
assignment(vbox, self, w, self, w_step).
assignment(vbox, self, childs_x_step, self, x).
assignment(vbox, self, childs_x_step, self, gensymattrib).
assignment(vbox, self, childs_x_last, self, childs_x_step).
assignment(vbox, childs, x, self, childs_x_step).
assignment(vbox, self, childs_rely_step, self, childs_h_step).
assignment(vbox, self, childs_rely_step, self, gensymattrib).
assignment(vbox, self, childs_rely_last, self, childs_rely_step).
assignment(vbox, childs, rely, self, childs_rely_step).
assignment(vbox, self, childs_absy_step, self, absy).
assignment(vbox, self, childs_absy_step, self, childs_rely_step).
assignment(vbox, self, childs_absy_step, self, gensymattrib).
assignment(vbox, self, childs_absy_last, self, childs_absy_step).
assignment(vbox, childs, absy, self, childs_absy_step).
assignment(vbox, self, childs_canvas_step, self, render).
assignment(vbox, self, childs_canvas_step, self, gensymattrib).
assignment(vbox, self, childs_canvas_last, self, childs_canvas_step).
assignment(vbox, childs, canvas, self, childs_canvas_step).
assignment(vbox, self, childs_lineh_step, self, gensymattrib).
assignment(vbox, self, childs_lineh_last, self, childs_lineh_step).
assignment(vbox, childs, lineh, self, childs_lineh_step).
assignment(vbox, self, childs_h_step, childs, h).
assignment(vbox, self, childs_w_step, childs, w).
assignment(wrapbox, self, render, self, w).
assignment(wrapbox, self, render, self, canvas).
assignment(wrapbox, self, render, self, absy).
assignment(wrapbox, self, render, self, h).
assignment(wrapbox, self, render, self, x).
assignment(wrapbox, self, w, self, width).
assignment(wrapbox, self, childs_x_step, self, x).
assignment(wrapbox, self, childs_x_step, self, w).
assignment(wrapbox, self, childs_x_step, self, childs_w_step).
assignment(wrapbox, self, childs_x_step, self, childs_w_step).
assignment(wrapbox, self, childs_x_step, self, x).
assignment(wrapbox, self, childs_x_step, self, gensymattrib).
assignment(wrapbox, self, childs_x_last, self, childs_x_step).
assignment(wrapbox, childs, x, self, childs_x_step).
assignment(wrapbox, self, childs_lineh_step, self, childs_x_step).
assignment(wrapbox, self, childs_lineh_step, self, childs_h_step).
assignment(wrapbox, self, childs_lineh_step, self, x).
assignment(wrapbox, self, childs_lineh_step, self, gensymattrib).
assignment(wrapbox, self, childs_lineh_last, self, childs_lineh_step).
assignment(wrapbox, childs, lineh, self, childs_lineh_step).
assignment(wrapbox, self, childs_rely_step, self, childs_x_step).
assignment(wrapbox, self, childs_rely_step, self, childs_lineh_step).
assignment(wrapbox, self, childs_rely_step, self, x).
assignment(wrapbox, self, childs_rely_step, self, gensymattrib).
assignment(wrapbox, self, childs_rely_last, self, childs_rely_step).
assignment(wrapbox, childs, rely, self, childs_rely_step).
assignment(wrapbox, self, childs_absy_step, self, absy).
assignment(wrapbox, self, childs_absy_step, self, childs_rely_step).
assignment(wrapbox, self, childs_absy_step, self, gensymattrib).
assignment(wrapbox, self, childs_absy_last, self, childs_absy_step).
assignment(wrapbox, childs, absy, self, childs_absy_step).
assignment(wrapbox, self, childs_canvas_step, self, render).
assignment(wrapbox, self, childs_canvas_step, self, gensymattrib).
assignment(wrapbox, self, childs_canvas_last, self, childs_canvas_step).
assignment(wrapbox, childs, canvas, self, childs_canvas_step).
assignment(wrapbox, self, h_step, self, childs_lineh_step).
assignment(wrapbox, self, h_step, self, childs_rely_step).
assignment(wrapbox, self, h_step, self, gensymattrib).
assignment(wrapbox, self, h_last, self, h_step).
assignment(wrapbox, self, h, self, h_step).
assignment(wrapbox, self, childs_h_step, childs, h).
assignment(wrapbox, self, childs_w_step, childs, w).
classAttribute(hbox, numchilds).
classAttribute(hbox, childs_h_step).
classAttribute(hbox, childs_absy_step).
classAttribute(hbox, childs_canvas_step).
classAttribute(hbox, childs_w_step).
classAttribute(hbox, childs_x_step).
classAttribute(hbox, childs_rely_step).
classAttribute(hbox, childs_lineh_step).
classAttribute(hbox, childs_lineh_step).
classAttribute(hbox, childs_lineh_last).
classAttribute(hbox, childs_x_step).
classAttribute(hbox, childs_x_last).
classAttribute(hbox, childs_absy_step).
classAttribute(hbox, childs_absy_last).
classAttribute(hbox, childs_rely_step).
classAttribute(hbox, childs_rely_last).
classAttribute(hbox, h_step).
classAttribute(hbox, h_last).
classAttribute(hbox, childs_canvas_step).
classAttribute(hbox, childs_canvas_last).
classAttribute(hbox, numchilds_step).
classAttribute(hbox, numchilds_last).
classAttribute(hbox, w_step).
classAttribute(hbox, w_last).
classAttribute(vbox, numchilds).
classAttribute(vbox, childs_h_step).
classAttribute(vbox, childs_absy_step).
classAttribute(vbox, childs_canvas_step).
classAttribute(vbox, childs_w_step).
classAttribute(vbox, childs_x_step).
classAttribute(vbox, childs_rely_step).
classAttribute(vbox, childs_lineh_step).
classAttribute(vbox, childs_lineh_step).
classAttribute(vbox, childs_lineh_last).
classAttribute(vbox, childs_absy_step).
classAttribute(vbox, childs_absy_last).
classAttribute(vbox, childs_rely_step).
classAttribute(vbox, childs_rely_last).
classAttribute(vbox, childs_x_step).
classAttribute(vbox, childs_x_last).
classAttribute(vbox, h_step).
classAttribute(vbox, h_last).
classAttribute(vbox, childs_canvas_step).
classAttribute(vbox, childs_canvas_last).
classAttribute(vbox, numchilds_step).
classAttribute(vbox, numchilds_last).
classAttribute(vbox, w_step).
classAttribute(vbox, w_last).
classAttribute(wrapbox, childs_h_step).
classAttribute(wrapbox, childs_absy_step).
classAttribute(wrapbox, childs_canvas_step).
classAttribute(wrapbox, childs_w_step).
classAttribute(wrapbox, childs_x_step).
classAttribute(wrapbox, childs_lineh_step).
classAttribute(wrapbox, childs_rely_step).
classAttribute(wrapbox, childs_absy_step).
classAttribute(wrapbox, childs_absy_last).
classAttribute(wrapbox, childs_rely_step).
classAttribute(wrapbox, childs_rely_last).
classAttribute(wrapbox, childs_lineh_step).
classAttribute(wrapbox, childs_lineh_last).
classAttribute(wrapbox, childs_x_step).
classAttribute(wrapbox, childs_x_last).
classAttribute(wrapbox, h_step).
classAttribute(wrapbox, h_last).
classAttribute(wrapbox, childs_canvas_step).
classAttribute(wrapbox, childs_canvas_last).
| Superconductor/superconductor | compiler/attrib-gram-evaluator-swipl/Tutorial/output/float.pl | Perl | bsd-3-clause | 10,862 |
:- dynamic final_positive/2.
| frapontillo/ai-dialysis-uniba | prolog/runs_ktv_qb/rules_6.pl | Perl | apache-2.0 | 31 |
package Mojolicious::Command::daemon;
use Mojo::Base 'Mojolicious::Command';
use Getopt::Long qw(GetOptionsFromArray :config no_auto_abbrev no_ignore_case);
use Mojo::Server::Daemon;
has description => 'Start application with HTTP and WebSocket server';
has usage => sub { shift->extract_usage };
sub run {
my ($self, @args) = @_;
my $daemon = Mojo::Server::Daemon->new(app => $self->app);
GetOptionsFromArray \@args,
'b|backlog=i' => sub { $daemon->backlog($_[1]) },
'c|clients=i' => sub { $daemon->max_clients($_[1]) },
'i|inactivity-timeout=i' => sub { $daemon->inactivity_timeout($_[1]) },
'l|listen=s' => \my @listen,
'p|proxy' => sub { $daemon->reverse_proxy(1) },
'r|requests=i' => sub { $daemon->max_requests($_[1]) };
$daemon->listen(\@listen) if @listen;
$daemon->run;
}
1;
=encoding utf8
=head1 NAME
Mojolicious::Command::daemon - Daemon command
=head1 SYNOPSIS
Usage: APPLICATION daemon [OPTIONS]
./myapp.pl daemon
./myapp.pl daemon -m production -l http://*:8080
./myapp.pl daemon -l http://127.0.0.1:8080 -l https://[::]:8081
./myapp.pl daemon -l 'https://*:443?cert=./server.crt&key=./server.key'
Options:
-b, --backlog <size> Listen backlog size, defaults to
SOMAXCONN
-c, --clients <number> Maximum number of concurrent
connections, defaults to 1000
-h, --help Show this summary of available options
--home <path> Path to home directory of your
application, defaults to the value of
MOJO_HOME or auto-detection
-i, --inactivity-timeout <seconds> Inactivity timeout, defaults to the
value of MOJO_INACTIVITY_TIMEOUT or 15
-l, --listen <location> One or more locations you want to
listen on, defaults to the value of
MOJO_LISTEN or "http://*:3000"
-m, --mode <name> Operating mode for your application,
defaults to the value of
MOJO_MODE/PLACK_ENV or "development"
-p, --proxy Activate reverse proxy support,
defaults to the value of
MOJO_REVERSE_PROXY
-r, --requests <number> Maximum number of requests per
keep-alive connection, defaults to 100
=head1 DESCRIPTION
L<Mojolicious::Command::daemon> starts applications with the
L<Mojo::Server::Daemon> backend.
This is a core command, that means it is always enabled and its code a good
example for learning to build new commands, you're welcome to fork it.
See L<Mojolicious::Commands/"COMMANDS"> for a list of commands that are
available by default.
=head1 ATTRIBUTES
L<Mojolicious::Command::daemon> inherits all attributes from
L<Mojolicious::Command> and implements the following new ones.
=head2 description
my $description = $daemon->description;
$daemon = $daemon->description('Foo');
Short description of this command, used for the command list.
=head2 usage
my $usage = $daemon->usage;
$daemon = $daemon->usage('Foo');
Usage information for this command, used for the help screen.
=head1 METHODS
L<Mojolicious::Command::daemon> inherits all methods from
L<Mojolicious::Command> and implements the following new ones.
=head2 run
$daemon->run(@ARGV);
Run this command.
=head1 SEE ALSO
L<Mojolicious>, L<Mojolicious::Guides>, L<http://mojolicious.org>.
=cut
| ashkanx/binary-mt | scripts/local/lib/perl5/Mojolicious/Command/daemon.pm | Perl | apache-2.0 | 3,843 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
2C80 2CFF
END
| Dokaponteam/ITF_Project | xampp/perl/lib/unicore/lib/Blk/Coptic.pl | Perl | mit | 421 |
#! /usr/bin/env perl
$nodename = shift;
my $file = "job_dagman_rescue_recov-$nodename.works";
if (-e $file) {
unlink $file or die "Unlink failed: $!";
print "$nodename succeeded\n";
} else {
system("touch $file");
print "$nodename failed\n";
exit 1;
}
| djw8605/htcondor | src/condor_tests/job_dagman_rescue_recov-node_conditional.pl | Perl | apache-2.0 | 260 |
###############################################################################
#
# Package: NaturalDocs::Topics::Type
#
###############################################################################
#
# A class storing information about a <TopicType>.
#
###############################################################################
# This file is part of Natural Docs, which is Copyright (C) 2003-2008 Greg Valure
# Natural Docs is licensed under the GPL
use strict;
use integer;
package NaturalDocs::Topics::Type;
use NaturalDocs::DefineMembers 'NAME', 'Name()',
'PLURAL_NAME', 'PluralName()', 'SetPluralName()',
'INDEX', 'Index()', 'SetIndex()',
'SCOPE', 'Scope()', 'SetScope()',
'PAGE_TITLE_IF_FIRST', 'PageTitleIfFirst()', 'SetPageTitleIfFirst()',
'BREAK_LISTS', 'BreakLists()', 'SetBreakLists()',
'CLASS_HIERARCHY', 'ClassHierarchy()', 'SetClassHierarchy()',
'CAN_GROUP_WITH';
# Dependency: New() depends on the order of these and that there are no parent classes.
use base 'Exporter';
our @EXPORT = ('SCOPE_NORMAL', 'SCOPE_START', 'SCOPE_END', 'SCOPE_ALWAYS_GLOBAL');
#
# Constants: Members
#
# The object is implemented as a blessed arrayref, with the following constants as its indexes.
#
# NAME - The topic's name.
# PLURAL_NAME - The topic's plural name.
# INDEX - Whether the topic is indexed.
# SCOPE - The topic's <ScopeType>.
# PAGE_TITLE_IF_FIRST - Whether the topic becomes the page title if it's first in a file.
# BREAK_LISTS - Whether list topics should be broken into individual topics in the output.
# CLASS_HIERARCHY - Whether the topic is part of the class hierarchy.
# CAN_GROUP_WITH - The existence hashref of <TopicTypes> the type can be grouped with.
#
###############################################################################
# Group: Types
#
# Constants: ScopeType
#
# The possible values for <Scope()>.
#
# SCOPE_NORMAL - The topic stays in the current scope without affecting it.
# SCOPE_START - The topic starts a scope.
# SCOPE_END - The topic ends a scope, returning it to global.
# SCOPE_ALWAYS_GLOBAL - The topic is always global, but it doesn't affect the current scope.
#
use constant SCOPE_NORMAL => 1;
use constant SCOPE_START => 2;
use constant SCOPE_END => 3;
use constant SCOPE_ALWAYS_GLOBAL => 4;
###############################################################################
# Group: Functions
#
# Function: New
#
# Creates and returns a new object.
#
# Parameters:
#
# name - The topic name.
# pluralName - The topic's plural name.
# index - Whether the topic is indexed.
# scope - The topic's <ScopeType>.
# pageTitleIfFirst - Whether the topic becomes the page title if it's the first one in a file.
# breakLists - Whether list topics should be broken into individual topics in the output.
#
sub New #(name, pluralName, index, scope, pageTitleIfFirst, breakLists)
{
my ($self, @params) = @_;
# Dependency: Depends on the parameter order matching the member order and that there are no parent classes.
my $object = [ @params ];
bless $object, $self;
return $object;
};
#
# Functions: Accessors
#
# Name - Returns the topic name.
# PluralName - Returns the topic's plural name.
# SetPluralName - Replaces the topic's plural name.
# Index - Whether the topic is indexed.
# SetIndex - Sets whether the topic is indexed.
# Scope - Returns the topic's <ScopeType>.
# SetScope - Replaces the topic's <ScopeType>.
# PageTitleIfFirst - Returns whether the topic becomes the page title if it's first in the file.
# SetPageTitleIfFirst - Sets whether the topic becomes the page title if it's first in the file.
# BreakLists - Returns whether list topics should be broken into individual topics in the output.
# SetBreakLists - Sets whether list topics should be broken into individual topics in the output.
# ClassHierarchy - Returns whether the topic is part of the class hierarchy.
# SetClassHierarchy - Sets whether the topic is part of the class hierarchy.
#
#
# Function: CanGroupWith
#
# Returns whether the type can be grouped with the passed <TopicType>.
#
sub CanGroupWith #(TopicType type) -> bool
{
my ($self, $type) = @_;
return ( defined $self->[CAN_GROUP_WITH] && exists $self->[CAN_GROUP_WITH]->{$type} );
};
#
# Function: SetCanGroupWith
#
# Sets the list of <TopicTypes> the type can be grouped with.
#
sub SetCanGroupWith #(TopicType[] types)
{
my ($self, $types) = @_;
$self->[CAN_GROUP_WITH] = { };
foreach my $type (@$types)
{ $self->[CAN_GROUP_WITH]->{$type} = 1; };
};
1;
| spaceappchile/team13 | web/vendor/jqplot/extras/NaturalDocs/Modules/NaturalDocs/Topics/Type.pm | Perl | mit | 5,141 |
package Archive::Tar::Constant;
BEGIN {
require Exporter;
$VERSION= '0.02';
@ISA = qw[Exporter];
@EXPORT = qw[
FILE HARDLINK SYMLINK CHARDEV BLOCKDEV DIR FIFO SOCKET UNKNOWN
BUFFER HEAD READ_ONLY WRITE_ONLY UNPACK PACK TIME_OFFSET ZLIB
BLOCK_SIZE TAR_PAD TAR_END ON_UNIX BLOCK CAN_READLINK MAGIC
TAR_VERSION UNAME GNAME CAN_CHOWN MODE CHECK_SUM UID GID
GZIP_MAGIC_NUM MODE_READ LONGLINK LONGLINK_NAME PREFIX_LENGTH
LABEL NAME_LENGTH STRIP_MODE ON_VMS
];
require Time::Local if $^O eq "MacOS";
}
use constant FILE => 0;
use constant HARDLINK => 1;
use constant SYMLINK => 2;
use constant CHARDEV => 3;
use constant BLOCKDEV => 4;
use constant DIR => 5;
use constant FIFO => 6;
use constant SOCKET => 8;
use constant UNKNOWN => 9;
use constant LONGLINK => 'L';
use constant LABEL => 'V';
use constant BUFFER => 4096;
use constant HEAD => 512;
use constant BLOCK => 512;
use constant BLOCK_SIZE => sub { my $n = int($_[0]/BLOCK); $n++ if $_[0] % BLOCK; $n * BLOCK };
use constant TAR_PAD => sub { my $x = shift || return; return "\0" x (BLOCK - ($x % BLOCK) ) };
use constant TAR_END => "\0" x BLOCK;
use constant READ_ONLY => sub { shift() ? 'rb' : 'r' };
use constant WRITE_ONLY => sub { $_[0] ? 'wb' . shift : 'w' };
use constant MODE_READ => sub { $_[0] =~ /^r/ ? 1 : 0 };
# Pointless assignment to make -w shut up
my $getpwuid; $getpwuid = 'unknown' unless eval { my $f = getpwuid (0); };
my $getgrgid; $getgrgid = 'unknown' unless eval { my $f = getgrgid (0); };
use constant UNAME => sub { $getpwuid || scalar getpwuid( shift() ) || '' };
use constant GNAME => sub { $getgrgid || scalar getgrgid( shift() ) || '' };
use constant UID => $>;
use constant GID => (split ' ', $) )[0];
use constant MODE => do { 0666 & (0777 & ~umask) };
use constant STRIP_MODE => sub { shift() & 0777 };
use constant CHECK_SUM => " ";
use constant UNPACK => 'A100 A8 A8 A8 A12 A12 A8 A1 A100 A6 A2 A32 A32 A8 A8 A155 x12';
use constant PACK => 'a100 a8 a8 a8 a12 a12 A8 a1 a100 a6 a2 a32 a32 a8 a8 a155 x12';
use constant NAME_LENGTH => 100;
use constant PREFIX_LENGTH => 155;
use constant TIME_OFFSET => ($^O eq "MacOS") ? Time::Local::timelocal(0,0,0,1,0,70) : 0;
use constant MAGIC => "ustar";
use constant TAR_VERSION => "00";
use constant LONGLINK_NAME => '././@LongLink';
### allow ZLIB to be turned off using ENV
### DEBUG only
use constant ZLIB => do { !$ENV{'PERL5_AT_NO_ZLIB'} and
eval { require IO::Zlib };
$ENV{'PERL5_AT_NO_ZLIB'} || $@ ? 0 : 1 };
use constant GZIP_MAGIC_NUM => qr/^(?:\037\213|\037\235)/;
use constant CAN_CHOWN => do { ($> == 0 and $^O ne "MacOS" and $^O ne "MSWin32") };
use constant CAN_READLINK => ($^O ne 'MSWin32' and $^O !~ /RISC(?:[ _])?OS/i and $^O ne 'VMS');
use constant ON_UNIX => ($^O ne 'MSWin32' and $^O ne 'MacOS' and $^O ne 'VMS');
use constant ON_VMS => $^O eq 'VMS';
1;
| leighpauls/k2cro4 | third_party/cygwin/lib/perl5/5.10/Archive/Tar/Constant.pm | Perl | bsd-3-clause | 3,403 |
use strict;
my %count;
foreach my $f (@ARGV) {
open(IN, $f) || die "$! $f";
my $header = <IN>;
my @a=split /\t/, $header;
my ($country, $lat, $lon)=(undef, undef, undef);
map {
if ($a[$_] eq "country") {$country=$_}
if ($a[$_] eq "latitude") {$lat=$_}
if ($a[$_] eq "longitude") {$lon=$_}
} (0 .. $#a);
unless ($country && $lat && $lon) {die "Can't find one of | $country | $lat | $lon |"}
while (<IN>) {
chomp;
my @a=split /\t/;
$count{$a[$country]}->{$a[$lat]}->{$a[$lon]}->{$f}++;
}
}
foreach my $c (keys %count) {
foreach my $lat (keys %{$count{$c}}) {
foreach my $lon (keys %{$count{$c}->{$lat}}) {
print join("\t", $c, $lat, $lon);
foreach my $f (@ARGV) {
if ($count{$c}->{$lat}->{$lon}->{$f}) {
print "\t", $count{$c}->{$lat}->{$lon}->{$f};
}
else {
print "\t0";
}
}
print "\n";
}
}
}
| linsalrob/crAssphage | bin/count_country_locations.pl | Perl | mit | 864 |
use strict;
use lib '..';
use lib '../..';
use lib '../../platform/lib';
use lib '../../platform-utils/lib';
use lib '../lib';
use lib '../../lib';
use Test::More;
use Data::Dumper;
use Eldhelm::Test::Mock::Worker;
use Eldhelm::Util::FileSystem;
my ($index, $configPath, $sourceContext, $className) = @ARGV;
unless ($className) {
plan skip_all => 'This test can not run without a class context';
} else {
plan 'no_plan';
}
my $config = do($configPath || '../../config.pl') or die 'Can not read config!';
my $worker = Eldhelm::Test::Mock::Worker->new(config => $config);
diag('Verifying construction');
my $model = Eldhelm::Util::Factory->instance($className);
ok($model->{table}, 'table defined');
eval {
my $desc = $model->desc;
ok(ref $desc eq 'ARRAY', 'table exists');
1;
} or do {
diag($@);
fail('table does not exist');
};
my $source = Eldhelm::Util::FileSystem->getFileContents($sourceContext);
my @allResources;
diag('Verifying models');
my %models = map { +$_ => 1 } $source =~ /getModel\((.*?)\)/g;
foreach (keys %models) {
my $val = eval("[$_]");
next unless $val;
my ($name, $args) = @$val;
push @allResources, $name;
eval {
my $m = $model->getModel($name, $args);
ok($m, "Model $name ok");
} or do {
note($@);
fail("Model $name is missing!");
};
}
diag(Dumper \@allResources); | wastedabuser/eldhelm-platform | test/t/400_model_basic_db.pl | Perl | mit | 1,323 |
#
# Copyright 2014 Mike Cappella (mike@cappella.us)
package Utils::PIF 1.04;
our @ISA = qw(Exporter);
our @EXPORT = qw(create_pif_record create_pif_file add_new_field explode_normalized get_items_from_1pif typename_to_typekey);
#our @EXPORT_OK = qw();
use v5.14;
use utf8;
use strict;
use warnings;
use diagnostics;
use JSON::PP;
use UUID::Tiny ':std';
use Date::Calc qw(check_date);
use Utils::Utils qw(verbose debug bail pluralize unfold_and_chop myjoin);
# UUID string used by the 1PIF format to separate individual entries.
# DaveT: "You know what, I wish we had thought that far ahead and made this an Easter Egg of sorts.
# Alas, the truth isn't that exciting :-)"
my $agilebits_1pif_entry_sep_uuid_str = '***5642bee8-a5ff-11dc-8314-0800200c9a66***';
our %typeMap = (
bankacct => { typeName => 'wallet.financial.BankAccountUS', title => 'Bank Account' },
creditcard => { typeName => 'wallet.financial.CreditCard', title => 'Credit Card' },
database => { typeName => 'wallet.computer.Database', title => 'Database' },
driverslicense => { typeName => 'wallet.government.DriversLicense', title => 'Drivers License' },
email => { typeName => 'wallet.onlineservices.Email.v2', title => 'Email' },
identity => { typeName => 'identities.Identity', title => 'Identity' },
login => { typeName => 'webforms.WebForm', title => 'Login' },
membership => { typeName => 'wallet.membership.Membership', title => 'Membership' },
note => { typeName => 'securenotes.SecureNote', title => 'Secure Note' },
outdoorlicense => { typeName => 'wallet.government.HuntingLicense', title => 'Outdoor License' },
passport => { typeName => 'wallet.government.Passport', title => 'Passport' },
rewards => { typeName => 'wallet.membership.RewardProgram', title => 'Reward Program' },
server => { typeName => 'wallet.computer.UnixServer', title => 'Server' },
socialsecurity => { typeName => 'wallet.government.SsnUS', title => 'Social Security Number' },
software => { typeName => 'wallet.computer.License', title => 'Software License' },
wireless => { typeName => 'wallet.computer.Router', title => 'Wireless Router' },
);
my %typenames_to_typekeys; # maps typeName --> key from %typeMap above
$typenames_to_typekeys{$typeMap{$_}{'typeName'}} = $_ for keys %typeMap;
sub typename_to_typekey {
return $typenames_to_typekeys{$_[0]};
}
our $sn_main = '.';
our $sn_branchInfo = 'branchInfo.Branch Information';
our $sn_contactInfo = 'contactInfo.Contact Information';
our $sn_details = 'details.Additional Details';
our $sn_smtp = 'SMTP.SMTP';
our $sn_eContactInfo = 'Contact Information.Contact Information';
our $sn_adminConsole = 'admin_console.Admin Console';
our $sn_hostProvider = 'hosting_provider_details.Hosting Provider';
our $sn_customer = 'customer.Customer';
our $sn_publisher = 'publisher.Publisher';
our $sn_order = 'order.Order';
our $sn_extra = 'extra.More Information';
our $sn_address = 'address.Address';
our $sn_internet = 'internet.Internet Details';
our $sn_identity = 'name.Identification';
our $k_string = 'string';
our $k_menu = 'menu';
our $k_concealed = 'concealed';
our $k_date = 'date';
our $k_gender = 'gender';
our $k_cctype = 'cctype';
our $k_monthYear = 'monthYear';
our $k_phone = 'phone';
our $k_url = 'URL';
our $k_email = 'email';
our $k_address = 'address';
my $f_nums = join('', "0" .. "9");
my $f_alphanums = join('', $f_nums, "A" .. "Z", "a" .. "z");
my $gFolders = {}; # global 1PIF folder tree for records mapping UUIDs to folder names
my %pif_table = (
# n=key section k=kind t=text label
bankacct => [
[ 'bankName', $sn_main, $k_string, 'bank name' ],
[ 'owner', $sn_main, $k_string, 'name on account' ],
[ 'accountType', $sn_main, $k_menu, 'type' ],
# implement converter functions for above 'menu' type?
[ 'routingNo', $sn_main, $k_string, 'routing number' ],
[ 'accountNo', $sn_main, $k_string, 'account number' ],
[ 'swift', $sn_main, $k_string, 'SWIFT' ],
[ 'iban', $sn_main, $k_string, 'IBAN' ],
[ 'telephonePin', $sn_main, $k_concealed, 'PIN', 'generate'=>'off' ],
[ 'branchPhone', $sn_branchInfo, $k_string, 'phone' ],
[ 'branchAddress', $sn_branchInfo, $k_string, 'address' ],
],
creditcard => [
[ 'cardholder', $sn_main, $k_string, 'cardholder name', 'guarded'=>'yes' ],
[ 'type', $sn_main, $k_cctype, 'type', 'guarded'=>'yes' ],
[ 'ccnum', $sn_main, $k_string, 'number', 'guarded'=>'yes', 'clipboardFilter'=>$f_nums ],
[ 'cvv', $sn_main, $k_concealed, 'verification number', 'guarded'=>'yes', 'generate'=>'off' ],
[ 'expiry', $sn_main, $k_monthYear, 'expiry date', 'guarded'=>'yes' ],
[ 'validFrom', $sn_main, $k_monthYear, 'valid from', 'guarded'=>'yes' ],
[ 'bank', $sn_contactInfo, $k_string, 'issuing bank' ],
[ 'phoneLocal', $sn_contactInfo, $k_phone, 'phone (local)' ],
[ 'phoneTollFree', $sn_contactInfo, $k_phone, 'phone (toll free)' ],
[ 'phoneIntl', $sn_contactInfo, $k_phone, 'phone (intl)' ],
[ 'website', $sn_contactInfo, $k_url, 'website' ],
[ 'pin', $sn_details, $k_concealed, 'PIN', 'guarded'=>'yes' ],
[ 'creditLimit', $sn_details, $k_string, 'credit limit' ],
[ 'cashLimit', $sn_details, $k_string, 'cash withdrawal limit' ],
[ 'interest', $sn_details, $k_string, 'interest rate' ],
[ 'issuenumber', $sn_details, $k_string, 'issue number' ],
],
database => [
[ 'database_type', $sn_main, $k_menu, 'type' ],
[ 'hostname', $sn_main, $k_string, 'server' ],
[ 'port', $sn_main, $k_string, 'port' ],
[ 'database', $sn_main, $k_string, 'database' ],
[ 'username', $sn_main, $k_string, 'username' ],
[ 'password', $sn_main, $k_concealed, 'password' ],
[ 'sid', $sn_main, $k_string, 'SID' ],
[ 'alias', $sn_main, $k_string, 'alias' ],
[ 'options', $sn_main, $k_string, 'connection options' ],
],
driverslicense => [
[ 'fullname', $sn_main, $k_string, 'full name' ],
[ 'address', $sn_main, $k_string, 'address' ],
[ 'birthdate', $sn_main, $k_date, 'date of birth' ],
# implement date conversions: explodes into key_dd, key_mm, key_yy; main value stored as integer
[ 'sex', $sn_main, $k_gender, 'sex' ],
# implement gender conversions
[ 'height', $sn_main, $k_string, 'height' ],
[ 'number', $sn_main, $k_string, 'number' ],
[ 'class', $sn_main, $k_string, 'license class' ],
[ 'conditions', $sn_main, $k_string, 'conditions / restrictions' ],
[ 'state', $sn_main, $k_string, 'state' ],
[ 'country', $sn_main, $k_string, 'country' ],
[ 'expiry_date', $sn_main, $k_monthYear, 'expiry date' ],
],
email => [
[ 'pop_type', $sn_main, $k_menu, 'type' ],
[ 'pop_username', $sn_main, $k_string, 'username' ],
[ 'pop_server', $sn_main, $k_string, 'server' ],
[ 'pop_port', $sn_main, $k_string, 'port number' ],
[ 'pop_password', $sn_main, $k_concealed, 'password' ],
[ 'pop_security', $sn_main, $k_menu, 'security' ],
[ 'pop_authentication', $sn_main, $k_menu, "auth\x{200b} method" ],
[ 'smtp_server', $sn_smtp, $k_string, 'SMTP server' ],
[ 'smtp_port', $sn_smtp, $k_string, 'port number' ],
[ 'smtp_username', $sn_smtp, $k_string, 'username' ],
[ 'smtp_password', $sn_smtp, $k_concealed, 'password' ],
[ 'smtp_security', $sn_smtp, $k_menu, 'security' ],
[ 'smtp_authentication',$sn_smtp, $k_menu, "auth\x{200b} method" ],
# handle menu types above?
[ 'provider', $sn_eContactInfo, $k_string, 'provider' ],
[ 'provider_website', $sn_eContactInfo, $k_string, 'provider\'s website' ],
[ 'phone_local', $sn_eContactInfo, $k_string, 'phone (local)' ],
[ 'phone_tollfree', $sn_eContactInfo, $k_string, 'phone (toll free)' ],
],
identity => [
[ 'firstname', $sn_identity, $k_string, 'first name', 'guarded'=>'yes' ],
[ 'initial', $sn_identity, $k_string, 'initial', 'guarded'=>'yes' ],
[ 'lastname', $sn_identity, $k_string, 'last name', 'guarded'=>'yes' ],
[ 'sex', $sn_identity, $k_menu, 'sex', 'guarded'=>'yes' ],
[ 'birthdate', $sn_identity, $k_date, 'birth date', 'guarded'=>'yes' ],
[ 'occupation', $sn_identity, $k_string, 'occupation', 'guarded'=>'yes' ],
[ 'company', $sn_identity, $k_string, 'company', 'guarded'=>'yes' ],
[ 'department', $sn_identity, $k_string, 'department', 'guarded'=>'yes' ],
[ 'jobtitle', $sn_identity, $k_string, 'job title', 'guarded'=>'yes' ],
[ 'address', $sn_address, $k_address, 'address', 'guarded'=>'yes' ],
# k_address types expand to city, country, state, street, zip
[ 'defphone', $sn_address, $k_phone, 'default phone', 'guarded'=>'yes' ],
[ 'homephone', $sn_address, $k_phone, 'home', 'guarded'=>'yes' ],
[ 'cellphone', $sn_address, $k_phone, 'cell', 'guarded'=>'yes' ],
[ 'busphone', $sn_address, $k_phone, 'business', 'guarded'=>'yes' ],
# *phone expands to *phone_local at top level (maybe due to phone type?)
[ 'username', $sn_internet, $k_string, 'username', 'guarded'=>'yes' ],
[ 'reminderq', $sn_internet, $k_string, 'reminder question', 'guarded'=>'yes' ],
[ 'remindera', $sn_internet, $k_string, 'reminder answer', 'guarded'=>'yes' ],
[ 'email', $sn_internet, $k_string, 'email', 'guarded'=>'yes' ],
[ 'website', $sn_internet, $k_string, 'website', 'guarded'=>'yes' ],
[ 'icq', $sn_internet, $k_string, 'ICQ', 'guarded'=>'yes' ],
[ 'skype', $sn_internet, $k_string, 'skype', 'guarded'=>'yes' ],
[ 'aim', $sn_internet, $k_string, 'AOL/AIM', 'guarded'=>'yes' ],
[ 'yahoo', $sn_internet, $k_string, 'Yahoo', 'guarded'=>'yes' ],
[ 'msn', $sn_internet, $k_string, 'MSN', 'guarded'=>'yes' ],
[ 'forumsig', $sn_internet, $k_string, 'forum signature', 'guarded'=>'yes' ],
],
login => [
[ 'username', undef, 'T', 'username' ],
[ 'password', undef, 'P', 'password' ],
[ 'url', undef, $k_string, 'website' ],
],
membership => [
[ 'org_name', $sn_main, $k_string, 'group' ],
[ 'website', $sn_main, $k_url, 'website' ],
[ 'phone', $sn_main, $k_phone, 'telephone' ],
[ 'member_name', $sn_main, $k_string, 'member name' ],
[ 'member_since', $sn_main, $k_monthYear, 'member since' ],
[ 'expiry_date', $sn_main, $k_monthYear, 'expiry date' ],
[ 'membership_no', $sn_main, $k_string, 'member ID' ],
[ 'pin', $sn_main, $k_concealed, 'password' ],
],
note => [
],
outdoorlicense => [
[ 'name', $sn_main, $k_string, 'full name' ],
[ 'valid_from', $sn_main, $k_date, 'valid from' ],
[ 'expires', $sn_main, $k_date, 'expires' ],
[ 'game', $sn_main, $k_string, 'approved wildlife' ],
[ 'quota', $sn_main, $k_string, 'maximum quota' ],
[ 'state', $sn_main, $k_string, 'state' ],
[ 'country', $sn_main, $k_string, 'country' ],
],
passport => [
[ 'type', $sn_main, $k_string, 'passport type' ],
[ 'issuing_country', $sn_main, $k_string, 'issuing country' ],
[ 'number', $sn_main, $k_string, 'number' ],
[ 'fullname', $sn_main, $k_string, 'full name' ],
[ 'sex', $sn_main, $k_gender, 'sex' ],
[ 'nationality', $sn_main, $k_string, 'nationality' ],
[ 'issuing_authority', $sn_main, $k_string, 'issuing authority' ],
[ 'birthdate', $sn_main, $k_date, 'date of birth' ],
[ 'birthplace', $sn_main, $k_string, 'place of birth' ],
[ 'issue_date', $sn_main, $k_date, 'issued on' ],
[ 'expiry_date', $sn_main, $k_date, 'expiry date' ],
],
rewards => [
[ 'company_name', $sn_main, $k_string, 'company name' ],
[ 'member_name', $sn_main, $k_string, 'member name' ],
[ 'membership_no', $sn_main, $k_string, 'member ID', 'clipboardFilter' => $f_alphanums ],
[ 'pin', $sn_main, $k_concealed, 'PIN' ],
[ 'additional_no', $sn_extra, $k_string, 'member ID (additional)' ],
[ 'member_since', $sn_extra, $k_monthYear, 'member since' ],
[ 'customer_service_phone',$sn_extra, $k_string, 'customer service phone' ],
[ 'reservations_phone', $sn_extra, $k_phone, 'phone for reserva\x{200b}tions' ],
[ 'website', $sn_extra, $k_url, 'website' ],
],
server => [
[ 'url', $sn_main, $k_string, 'URL' ],
[ 'username', $sn_main, $k_string, 'username' ],
[ 'password', $sn_main, $k_concealed, 'password' ],
[ 'admin_console_url', $sn_adminConsole, $k_string, 'admin console URL' ],
[ 'admin_console_username', $sn_adminConsole, $k_string, 'admin console username' ],
[ 'admin_console_password', $sn_adminConsole, $k_concealed, 'console password' ],
[ 'name', $sn_hostProvider, $k_string, 'name' ],
[ 'website', $sn_hostProvider, $k_string, 'website' ],
[ 'support_contact_url', $sn_hostProvider, $k_string, 'support URL' ],
[ 'support_contact_phone', $sn_hostProvider, $k_string, 'support phone' ],
],
socialsecurity => [
[ 'name', $sn_main, $k_string, 'name' ],
[ 'number', $sn_main, $k_concealed, 'number', 'generate'=>'off' ],
],
software => [
[ 'product_version', $sn_main, $k_string, 'version' ],
[ 'reg_code', $sn_main, $k_string, 'license key', 'guarded'=>'yes', 'multiline'=>'yes' ],
[ 'reg_name', $sn_customer, $k_string, 'licensed to' ],
[ 'reg_email', $sn_customer, $k_email, 'registered email' ],
[ 'company', $sn_customer, $k_string, 'company' ],
[ 'download_link', $sn_publisher, $k_url, 'download page' ],
[ 'publisher_name', $sn_publisher, $k_string, 'publisher' ],
[ 'publisher_website', $sn_publisher, $k_url, 'website' ],
[ 'retail_price', $sn_publisher, $k_string, 'retail price' ],
[ 'support_email', $sn_publisher, $k_email, 'support email' ],
[ 'order_date', $sn_order, $k_date, 'purchase date' ],
[ 'order_number', $sn_order, $k_string, 'order number' ],
[ 'order_total', $sn_order, $k_string, 'order total' ],
],
wireless => [
[ 'name', $sn_main, $k_string, 'base station name' ],
[ 'password', $sn_main, $k_concealed, 'base station password' ],
[ 'server', $sn_main, $k_string, 'server / IP address' ],
[ 'airport_id', $sn_main, $k_string, 'AirPort ID' ],
[ 'network_name', $sn_main, $k_string, 'network name' ],
[ 'wireless_security', $sn_main, $k_menu, 'wireless security' ],
[ 'wireless_password', $sn_main, $k_concealed, 'wireless network password' ],
[ 'disk_password', $sn_main, $k_concealed, 'attached storage password' ],
],
);
my %country_codes = (
ad => qr/^ad|Andorra$/i,
ae => qr/^ae|United Arab Emirates$/i,
af => qr/^af|Afghanistan$/i,
ag => qr/^ag|Antigua and Barbuda$/i,
al => qr/^al|Albania$/i,
am => qr/^am|Armenia$/i,
ao => qr/^ao|Angola$/i,
ar => qr/^ar|Argentina$/i,
at => qr/^at|Austria$/i,
au => qr/^au|Australia$/i,
az => qr/^az|Azerbaijan$/i,
ba => qr/^ba|Bosnia and Herzegovina$/i,
bb => qr/^bb|Barbados$/i,
bd => qr/^bd|Bangladesh$/i,
be => qr/^be|Belgium$/i,
bf => qr/^bf|Burkina Faso$/i,
bg => qr/^bg|Bulgaria$/i,
bh => qr/^bh|Bahrain$/i,
bi => qr/^bi|Burundi$/i,
bj => qr/^bj|Benin$/i,
bl => qr/^bl|Saint Barthélemy$/i,
bm => qr/^bm|Bermuda$/i,
bn => qr/^bn|Brunei Darussalam$/i,
bo => qr/^bo|Bolivia$/i,
br => qr/^br|Brazil$/i,
bs => qr/^bs|The Bahamas$/i,
bt => qr/^bt|Bhutan$/i,
bw => qr/^bw|Botswana$/i,
by => qr/^by|Belarus$/i,
bz => qr/^bz|Belize$/i,
ca => qr/^ca|Canada$/i,
cd => qr/^cd|Democratic Republic of the Congo$/i,
cf => qr/^cf|Central African Republic$/i,
cg => qr/^cg|Republic of the Congo$/i,
ch => qr/^ch|Switzerland$/i,
ci => qr/^ci|Côte d’Ivoire$/i,
cl => qr/^cl|Chile$/i,
cm => qr/^cm|Cameroon$/i,
cn => qr/^cn|China$/i,
co => qr/^co|Colombia$/i,
cr => qr/^cr|Costa Rica$/i,
cs => qr/^cs|Czech Republic$/i,
cu => qr/^cu|Cuba$/i,
cv => qr/^cv|Cape Verde$/i,
cy => qr/^cy|Cyprus$/i,
cz => qr/^cz|Czech Republic$/i,
de => qr/^de|Germany$/i,
dj => qr/^dj|Djibouti$/i,
dk => qr/^dk|Denmark$/i,
dm => qr/^dm|Dominica$/i,
do => qr/^do|Dominican Republic$/i,
dz => qr/^dz|Algeria$/i,
ec => qr/^ec|Ecuador$/i,
ee => qr/^ee|Estonia$/i,
eg => qr/^eg|Egypt$/i,
er => qr/^er|Eritrea$/i,
es => qr/^es|Spain$/i,
et => qr/^et|Ethiopia$/i,
fi => qr/^fi|Finland$/i,
fj => qr/^fj|Fiji$/i,
fk => qr/^fk|Falkland Islands$/i,
fm => qr/^fm|Micronesia$/i,
fo => qr/^fo|Faroe Islands$/i,
fr => qr/^fr|France$/i,
ga => qr/^ga|Gabon$/i,
gd => qr/^gd|Grenada$/i,
ge => qr/^ge|Georgia$/i,
gh => qr/^gh|Ghana$/i,
gi => qr/^gi|Gibraltar$/i,
gl => qr/^gl|Greenland$/i,
gm => qr/^gm|The Gambia$/i,
gn => qr/^gn|Guinea$/i,
gp => qr/^gp|Guadeloupe$/i,
gq => qr/^gq|Equatorial Guinea$/i,
gr => qr/^gr|Greece$/i,
gs => qr/^gs|South Georgia and South Sandwich Islands$/i,
gt => qr/^gt|Guatemala$/i,
gw => qr/^gw|Guinea-Bissau$/i,
gy => qr/^gy|Guyana$/i,
hk => qr/^hk|Hong Kong$/i,
hn => qr/^hn|Honduras$/i,
hr => qr/^hr|Croatia$/i,
ht => qr/^ht|Haiti$/i,
hu => qr/^hu|Hungary$/i,
id => qr/^id|Indonesia$/i,
ie => qr/^ie|Ireland$/i,
il => qr/^il|Israel$/i,
im => qr/^im|Isle of Man$/i,
in => qr/^in|India$/i,
iq => qr/^iq|Iraq$/i,
ir => qr/^ir|Iran$/i,
is => qr/^is|Iceland$/i,
it => qr/^it|Italy$/i,
jm => qr/^jm|Jamaica$/i,
jo => qr/^jo|Jordan$/i,
jp => qr/^jp|Japan$/i,
ke => qr/^ke|Kenya$/i,
kg => qr/^kg|Kyrgyzstan$/i,
kh => qr/^kh|Cambodia$/i,
ki => qr/^ki|Kiribati$/i,
km => qr/^km|Comoros$/i,
kn => qr/^kn|Saint Kitts and Nevis$/i,
kp => qr/^kp|North Korea$/i,
kr => qr/^kr|South Korea$/i,
kw => qr/^kw|Kuwait$/i,
ky => qr/^ky|Cayman Islands$/i,
kz => qr/^kz|Kazakhstan$/i,
la => qr/^la|Laos$/i,
lb => qr/^lb|Lebanon$/i,
lc => qr/^lc|Saint Lucia$/i,
li => qr/^li|Liechtenstein$/i,
lk => qr/^lk|Sri Lanka$/i,
lr => qr/^lr|Liberia$/i,
ls => qr/^ls|Lesotho$/i,
lt => qr/^lt|Lithuania$/i,
lu => qr/^lu|Luxembourg$/i,
lv => qr/^lv|Latvia$/i,
ly => qr/^ly|Libya$/i,
ma => qr/^ma|Morocco$/i,
mc => qr/^mc|Monaco$/i,
md => qr/^md|Moldova$/i,
me => qr/^me|Montenegro$/i,
mf => qr/^mf|Saint Martin$/i,
mg => qr/^mg|Madagascar$/i,
mh => qr/^mh|Marshall Islands$/i,
mk => qr/^mk|Macedonia$/i,
ml => qr/^ml|Mali$/i,
mm => qr/^mm|Myanmar$/i,
mn => qr/^mn|Mongolia$/i,
mo => qr/^mo|Macau$/i,
mq => qr/^mq|Martinique$/i,
mr => qr/^mr|Mauritania$/i,
mt => qr/^mt|Malta$/i,
mu => qr/^mu|Mauritius$/i,
mv => qr/^mv|Maldives$/i,
mw => qr/^mw|Malawi$/i,
mx => qr/^mx|Mexico$/i,
my => qr/^my|Malaysia$/i,
mz => qr/^mz|Mozambique$/i,
na => qr/^na|Namibia$/i,
nc => qr/^nc|New Caledonia$/i,
ne => qr/^ne|Niger$/i,
ng => qr/^ng|Nigeria$/i,
ni => qr/^ni|Nicaragua$/i,
nl => qr/^nl|Netherlands$/i,
no => qr/^no|Norway$/i,
np => qr/^np|Nepal$/i,
nr => qr/^nr|Nauru$/i,
nz => qr/^nz|New Zealand$/i,
om => qr/^om|Oman$/i,
pa => qr/^pa|Panama$/i,
pe => qr/^pe|Peru$/i,
pf => qr/^pf|French Polynesia$/i,
pg => qr/^pg|Papua New Guinea$/i,
ph => qr/^ph|Philippines$/i,
pk => qr/^pk|Pakistan$/i,
pl => qr/^pl|Poland$/i,
pr => qr/^pr|Puerto Rico$/i,
ps => qr/^ps|Palestinian Territories$/i,
pt => qr/^pt|Portugal$/i,
pw => qr/^pw|Palau$/i,
py => qr/^py|Paraguay$/i,
qa => qr/^qa|Qatar$/i,
re => qr/^re|Réunion$/i,
ro => qr/^ro|Romania$/i,
rs => qr/^rs|Serbia$/i,
ru => qr/^ru|Russia$/i,
rw => qr/^rw|Rwanda$/i,
sa => qr/^sa|Saudi Arabia$/i,
sb => qr/^sb|Solomon Islands$/i,
sc => qr/^sc|Seychelles$/i,
sd => qr/^sd|Sudan$/i,
se => qr/^se|Sweden$/i,
sg => qr/^sg|Singapore$/i,
sh => qr/^sh|Saint Helena$/i,
si => qr/^si|Slovenia$/i,
sk => qr/^sk|Slovakia$/i,
sl => qr/^sl|Sierra Leone$/i,
sm => qr/^sm|San Marino$/i,
sn => qr/^sn|Senegal$/i,
so => qr/^so|Somalia$/i,
sr => qr/^sr|Suriname$/i,
st => qr/^st|Sao Tome and Principe$/i,
sv => qr/^sv|El Salvador$/i,
sy => qr/^sy|Syria$/i,
sz => qr/^sz|Swaziland$/i,
td => qr/^td|Chad$/i,
tg => qr/^tg|Togo$/i,
th => qr/^th|Thailand$/i,
tj => qr/^tj|Tajikistan$/i,
tl => qr/^tl|Timor-Leste$/i,
tm => qr/^tm|Turkmenistan$/i,
tn => qr/^tn|Tunisia$/i,
to => qr/^to|Tonga$/i,
tr => qr/^tr|Turkey$/i,
tt => qr/^tt|Trinidad and Tobago$/i,
tv => qr/^tv|Tuvalu$/i,
tw => qr/^tw|Taiwan$/i,
tz => qr/^tz|Tanzania$/i,
ua => qr/^ua|Ukraine$/i,
ug => qr/^ug|Uganda$/i,
uk => qr/^uk|United Kingdom$/i,
us => qr/^us|United States$/i,
uy => qr/^uy|Uruguay$/i,
uz => qr/^uz|Uzbekistan$/i,
va => qr/^va|Vatican$/i,
vc => qr/^vc|Saint Vincent and the Grenadines$/i,
ve => qr/^ve|Venezuela$/i,
vi => qr/^vi|U.S. Virgin Islands$/i,
vn => qr/^vn|Vietnam$/i,
vu => qr/^vu|Vanuatu$/i,
ws => qr/^ws|Samoa$/i,
ye => qr/^ye|Yemen$/i,
yu => qr/^yu|Serbia and Montenegro$/i,
za => qr/^za|South Africa$/i,
zm => qr/^zm|Zambia$/i,
zw => qr/^zw|Zimbabwe$/i,
);
sub create_pif_record {
my ($type, $card) = @_;
my $rec = {};
# cycle in order through the defintions for the given type, testing if the key exists in the imported values hash %card.
my @ordered_sections = ();
my @to_notes;
my $defs = $pif_table{$type};
$rec->{'title'} = $card->{'title'} // 'Untitled';
debug "Title: ", $rec->{'title'};
# move out fields that are not defined in the pif_table, to be added to notes later
my %cardh;
while (my $f = pop @{$card->{'fields'}}) {
my @found = grep { $f->{'outkey'} eq $_->[0] } @$defs;
if (@found) {
# turn fields array into hash for easier processing
$cardh{$f->{'outkey'}} = $f;
push @to_notes, $f if $f->{'keep'};
@found > 1 and
die "Duplicate card key detected - please report: $f->{'outkey'}: ", map {$_->[0] . " "} @found;
}
else {
push @to_notes, $f;
}
}
for my $def (@$defs) {
my $key = $def->[0];
debug " key test($key)", ! exists $cardh{$key} ?
(', ', 'Not found') :
(': ', to_string($cardh{$key}{'value'}));
next if !exists $cardh{$key};
if ($type eq 'login') {
if ($cardh{$key}{'value'} ne '') {
if ($key eq 'username' or $key eq 'password') {
push @{$rec->{'secureContents'}{'fields'}}, {
'designation' => $key, name => $def->[3], 'type' => $def->[2], 'value' => $cardh{$key}{'value'}
};
}
elsif ($key eq 'url') {
push @{$rec->{'secureContents'}{'URLs'}}, { 'label' => $def->[3], 'url' => $cardh{$key}{'value'} };
# Need to add Location field so that the item appears in 1Password for Windows' extension.
$rec->{'location'} = $cardh{$key}{'value'};
}
}
}
if (my @kv_pairs = type_conversions($def->[2], $key, \%cardh)) {
# add key/value pairs to top level secureContents.
while (@kv_pairs) {
$rec->{'secureContents'}{$kv_pairs[0]} = $kv_pairs[1];
shift @kv_pairs; shift @kv_pairs;
}
# add entry to secureContents.sections when defined
if (defined $def->[1]) {
my $href = { 'n' => $key, 'k' => $def->[2], 't' => $def->[3], 'v' => $cardh{$key}{'value'} };
# add any attributes
$href->{'a'} = { @$def[4..$#$def] } if @$def > 4;
# maintain the section order for later output
my $section_name = join '.', 'secureContents', $def->[1];
push @ordered_sections, $section_name if !exists $rec->{'_sections'}{$section_name};
push @{$rec->{'_sections'}{join '.', 'secureContents', $def->[1]}}, $href;
}
}
else {
# failed kind conversions
push @to_notes, $cardh{$key};
delete $cardh{$key};
}
}
for (@ordered_sections) {
my (undef, $name, $title) = split /\./, $_;
my $href = { 'name' => $name, 'title' => $title, 'fields' => $rec->{'_sections'}{$_} };
push @{$rec->{'secureContents'}{'sections'}}, $href;
}
delete $rec->{'_sections'};
if (exists $card->{'notes'}) {
$rec->{'secureContents'}{'notesPlain'} = ref($card->{'notes'}) eq 'ARRAY' ? join("\n", @{$card->{'notes'}}) : $card->{'notes'};
debug " notes: ", unfold_and_chop $rec->{'secureContents'}{'notesPlain'};
}
$rec->{'typeName'} = $typeMap{$type}{'typeName'} // $typeMap{'note'}{'typeName'};
if (exists $card->{'tags'}) {
push @{$rec->{'openContents'}{'tags'}}, ref($card->{'tags'}) eq 'ARRAY' ? (@{$card->{'tags'}}) : $card->{'tags'};
debug " tags: ", unfold_and_chop ref($card->{'tags'}) eq 'ARRAY' ? join('; ', @{$card->{'tags'}}) : $card->{'tags'};
}
if ($main::opts{'folders'} and exists $card->{'folder'} and @{$card->{'folder'}}) {
add_to_folder_tree(\$gFolders, @{$card->{'folder'}});
my $uuid = uuid_from_path(\$gFolders, @{$card->{'folder'}});
$rec->{'folderUuid'} = $uuid if defined $uuid;
}
# map any remaninging fields to notes
if (exists $rec->{'secureContents'}{'notesPlain'} and $rec->{'secureContents'}{'notesPlain'} ne '' and @to_notes) {
$rec->{'secureContents'}{'notesPlain'} .= "\n"
}
for (@to_notes) {
my $valuekey = $_->{'keep'} ? 'valueorig' : 'value';
next if $_->{$valuekey} eq '';
debug " *unmapped card field pushed to notes: $_->{'inkey'}";
$rec->{'secureContents'}{'notesPlain'} .= "\n" if exists $rec->{'secureContents'}{'notesPlain'} and $rec->{'secureContents'}{'notesPlain'} ne '';
$rec->{'secureContents'}{'notesPlain'} .= join ': ', $_->{'inkey'}, $_->{$valuekey};
}
($rec->{'uuid'} = create_uuid_as_string(UUID::Tiny->UUID_RANDOM())) =~ s/-//g;
# force updatedAt and createdAt to be ints, not strings
if (exists $card->{'modified'} and defined $card->{'modified'}) {
$rec->{'updatedAt'} = 0 + $card->{'modified'} if $main::opts{'modified'};
}
# set the created time to 1/1/2000 to help trigger Watchtower checks, unless --nowatchtower was specified
$rec->{'createdAt'} = 946713600 if $type eq 'login' and $main::opts{'watchtower'};
# for output file comparison testing
if ($main::opts{'testmode'}) {
$rec->{'uuid'} = '0';
$rec->{'createdAt'} = 0 if exists $rec->{'createdAt'};
$rec->{'updatedAt'} = 0 if exists $rec->{'modified'};
}
return encode_json $rec;
}
sub create_pif_file {
my ($cardlist, $outfile, $types) = @_;
check_pif_table(); # check the pif table since a module may have added (incorrect) entries via add_new_field()
open my $outfh, ">", $outfile or
bail "Cannot create 1pif output file: $outfile\n$!";
my $ntotal = 0;
for my $type (keys %$cardlist) {
next if $types and not exists $types->{lc $type};
my $n;
for my $card (@{$cardlist->{$type}}) {
my $saved_title = $card->{'title'} // 'Untitled';
if (my $encoded = create_pif_record($type, $card)) {
print $outfh $encoded, "\n", $agilebits_1pif_entry_sep_uuid_str, "\n";
$n++;
}
else {
warn "PIF encoding failed for item '$saved_title', type '$type'";
}
}
$ntotal += $n;
verbose "Exported $n $type item", pluralize($n);
}
verbose "Exported $ntotal total item", pluralize($ntotal);
if ($gFolders) {
output_folder_records($outfh, $gFolders, undef);
}
close $outfh;
}
sub add_to_folder_tree {
my ($folder_tree, $folder_name) = (shift, shift);
return unless defined $folder_name;
if (exists $$folder_tree->{$folder_name}) {
add_to_folder_tree(\$$folder_tree->{$folder_name}{'children'}, @_);
}
else {
# create new folder_tree node
$$folder_tree->{$folder_name}{'children'} = {};
($$folder_tree->{$folder_name}{'uuid'} = create_uuid_as_string(UUID::Tiny->UUID_RANDOM(), 'cappella.us')) =~ s/-//g;
if (@_) {
add_to_folder_tree(\$$folder_tree->{$folder_name}{'children'}, @_);
}
}
}
sub uuid_from_path {
my $folder_tree = shift;
while (my $folder_name = shift @_) {
return undef if ! exists $$folder_tree->{$folder_name};
if (@_) {
$folder_tree = \$$folder_tree->{$folder_name}{'children'};
}
else {
return $$folder_tree->{$folder_name}{'uuid'};
}
}
return undef;
}
sub output_folder_records {
my ($outfh, $f, $parent_uuid) = @_;
return unless defined $f;
for (keys %$f) {
my $frec = {
uuid => $f->{$_}{'uuid'},
title => $_,
typeName => 'system.folder.Regular'
};
$frec->{'folderUuid'} = $parent_uuid if defined $parent_uuid;
print $outfh encode_json($frec), "\n", $agilebits_1pif_entry_sep_uuid_str, "\n";
output_folder_records($outfh, $f->{$_}{'children'}, $f->{$_}{'uuid'}) if $f->{$_}{'children'};
}
}
sub add_new_field {
# [ 'url', $sn_main, $k_string, 'URL' ],
#my ($type, $after, $key, $section, $kind, $text) = @_;
my ($type, $key, $section, $kind, $text) = (shift, shift, shift, shift, shift);
die "add_new_field: unsupported type '$type' in %pif_table" if !exists $pif_table{$type};
=cut
my $i = 0;
foreach (@{$pif_table{$type}}) {
if ($_->[0] eq $after) {
last;
}
$i++;
}
$DB::single = 1;
splice @{$pif_table{$type}}, $i+1, 0, [$key, $section, $kind, $text];
=cut
push @{$pif_table{$type}}, [$key, $section, $kind, $text, @_];
1;
}
# Performs various conversions on key, value pairs, depending upon type=k values.
# Some key/values will be exploded into multiple key/value pairs.
sub type_conversions {
my ($type, $key, $cref) = @_;
return () if not defined $type;
if ($type eq $k_date and $cref->{$key}{'value'} !~ /^-?\d+$/) {
return ();
}
if ($type eq $k_gender) {
return ( $key => $cref->{$key}{'value'} =~ /F/i ? 'female' : 'male' );
}
if ($type eq $k_monthYear) {
# monthYear types are split into two top level keys: keyname_mm and keyname_yy
# their value is stored as YYYYMM
# XXX validate the date w/a module?
if (my ($year, $month) = ($cref->{$key}{'value'} =~ /^(\d{4})(\d{2})$/)) {
if (check_date($year,$month,1)) { # validate the date
return ( join('_', $key, 'yy') => $year,
join('_', $key, 'mm') => $month );
}
}
}
elsif ($type eq $k_cctype) {
my %cctypes = (
mc => qr/(?:master(?:card)?)|\Amc\z/i,
visa => qr/visa/i,
amex => qr/american express|amex/i,
diners => qr/diners club|\Adc\z/i,
carteblanche => qr/carte blanche|\Acb\z/i,
discover => qr/discover/i,
jcb => qr/jcb/i,
maestro => qr/(?:(?:mastercard\s*)?maestro)|\Amm\z/i,
visaelectron => qr/(?:(?:visa\s*)?electron)|\Ave\z/i,
laser => qr/laser/i,
unionpay => qr/union\s*pay|\Aup\z/i,
);
if (my @matched = grep { $cref->{$key}{'value'} =~ $cctypes{$_} } keys %cctypes) {
return ( $key => $matched[0] );
}
}
elsif ($type eq $k_address and $key eq 'address') {
# address is expected to be in hash w/keys: street city state country zip
my $h = $cref->{'address'}{'value'};
# at the top level in secureContents, key 'address1' is used instead of key 'street'
$h->{'country'} = country_to_code($h->{'country'}) if $h->{'country'} and !exists $country_codes{$h->{'country'}};
my %ret = ( 'address1' => $h->{'street'}, map { exists $h->{$_} ? ($_ => $h->{$_}) : () } qw/city state zip country/ );
return %ret;
}
else {
return ( $key => $cref->{$key}{'value'} );
}
# unhandled - unmapped items will ultimately go to a card's notes field
return ();
}
# explodes normalized card data into one or more normalized cards, based on the 'outtype' value in
# the normalized card data. The exploded card list is returned as a per-type hash.
sub explode_normalized {
my ($itype, $norm_card) = @_;
my (%oc, $nc);
# special case - Notes cards type have no 'fields', but $norm_card->{'notes'} will contain the notes
if (not exists $norm_card->{'fields'}) {
for (qw/title tags notes folder modified/) {
# trigger the for() loop below
$oc{'note'}{$_} = 1 if exists $norm_card->{$_} and defined $norm_card->{$_} and $norm_card->{$_} ne '';
}
}
else {
while (my $field = pop @{$norm_card->{'fields'}}) {
push @{$oc{$field->{'outtype'}}{'fields'}}, { %$field };
}
}
# for each of the output card types
for my $type (keys %oc) {
my $new_title;
# look for and use any title replacements
if (my @found = grep { $_->{'as_title'}} @{$oc{$type}{'fields'}}) {
@found > 1 and die "More than one 'as_title' keywords found for type '$type' - please report";
$new_title = $found[0]->{'as_title'};
debug "\t\tnew title for exploded card type '$type': $new_title";
}
# add any supplimentatl title additions
my $added_title = myjoin('', map { $_->{'to_title'} } @{$oc{$type}{'fields'}});
$oc{$type}{'title'} = ($new_title || $norm_card->{'title'} || 'Untitled') . $added_title;
for (qw/tags notes folder modified/) {
$oc{$type}{$_} = $norm_card->{$_} if exists $norm_card->{$_} and defined $norm_card->{$_} and $norm_card->{$_} ne '';
}
}
return \%oc;
}
# Do some internal checking that the %pif_table has expected values.
sub check_pif_table {
my %all_nkeys;
my %valid_attrs = (
generate => 'off',
guarded => 'yes',
clipboardFilter => [ $f_nums, $f_alphanums ],
multiline => 'yes',
);
my $errors;
for my $type (keys %pif_table) {
for (@{$pif_table{$type}}) {
# report any typos or unsupported attributes/values
if (scalar @$_ > 4) {
my %a = (@$_)[4..$#$_];
for my $key (keys %a) {
if (! exists $valid_attrs{$key}) {
say "Internal error: unsupported attribute '$key'";
$errors++;
}
elsif (! grep { $a{$key} eq $_ } ref($valid_attrs{$key}) eq 'ARRAY' ? @{$valid_attrs{$key}} : ($valid_attrs{$key})) {
say "Internal error: type $type\{$_->[0]\} has an unsupported attribute value '$a{$key}' for attribute '$key'";
$errors++;
}
}
}
}
}
$errors and die "Errors in pif_table - please report";
}
sub get_items_from_1pif {
my $file = shift;
my @items;
open my $io, "<", $file
or bail "Unable to open 1PIF file: $file\n$!";
while ($_ = <$io>) {
chomp $_;
next if $_ eq $agilebits_1pif_entry_sep_uuid_str;
next if $_ =~ /"trashed":true[,}]/; # skip items in the trash
push @items, decode_json $_;
}
close $io;
return \@items
}
sub to_string {
return $_[0] if ref $_[0] eq '';
return join('; ', map { "$_: $_[0]->{$_}" } keys %{$_[0]});
}
sub country_to_code {
for (keys %country_codes) {
if ($_[0] =~ $country_codes{$_}) {
debug "\tcountry conversion: $_[0] --> $_";
return $_
}
}
return $_[0];
}
1;
| Bastika07/onepassword-utilities | convert_to_1p4/Utils/PIF.pm | Perl | mit | 34,703 |
#!/usr/bin/env perl
use strict;
use warnings FATAL => 'all';
use feature "say";
use Net::DBus;
# Switch the below two lines (uncomment) if using launchd:
# my $address = local $ENV{DBUS_SESSION_BUS_ADDRESS} = "launchd:env=DBUS_LAUNCHD_SESSION_BUS_SOCKET";
my $address = local $ENV{DBUS_SESSION_BUS_ADDRESS} = "unix:path=tst";
say "Using address: $address";
my $bus = Net::DBus->find;
my $service = $bus->get_service("com.website.service.identifier");
my $object = $service->get_object("/object/path");
say $object->test_method("foo", "bar");
| zbentley/dbus-osx-examples | examples/perl/net-dbus/client.pl | Perl | mit | 548 |
=pod
=head1 NAME
SSL_get_ciphers, SSL_get_cipher_list - get list of available SSL_CIPHERs
=head1 SYNOPSIS
#include <openssl/ssl.h>
STACK_OF(SSL_CIPHER) *SSL_get_ciphers(const SSL *ssl);
STACK_OF(SSL_CIPHER) *SSL_get_client_ciphers(const SSL *ssl);
const char *SSL_get_cipher_list(const SSL *ssl, int priority);
=head1 DESCRIPTION
SSL_get_ciphers() returns the stack of available SSL_CIPHERs for B<ssl>,
sorted by preference. If B<ssl> is NULL or no ciphers are available, NULL
is returned.
SSL_get_client_ciphers() returns the stack of available SSL_CIPHERS matching the
list sent by the client for B<ssl>. If B<ssl> is NULL, no ciphers are
available, or B<ssl> is not operating in server mode, NULL is returned.
SSL_get_cipher_list() returns a pointer to the name of the SSL_CIPHER
listed for B<ssl> with B<priority>. If B<ssl> is NULL, no ciphers are
available, or there are less ciphers than B<priority> available, NULL
is returned.
=head1 NOTES
The details of the ciphers obtained by SSL_get_ciphers() can be obtained using
the L<SSL_CIPHER_get_name(3)> family of functions.
Call SSL_get_cipher_list() with B<priority> starting from 0 to obtain the
sorted list of available ciphers, until NULL is returned.
Note: SSL_get_ciphers() and SSL_get_client_ciphers() return a pointer
to an internal cipher stack, which will be freed later on when the SSL
or SSL_SESSION object is freed. Therefore, the calling code B<MUST
NOT> free the return value itself.
=head1 RETURN VALUES
See DESCRIPTION
=head1 SEE ALSO
L<ssl(3)>, L<SSL_CTX_set_cipher_list(3)>,
L<SSL_CIPHER_get_name(3)>
=cut
| vbloodv/blood | extern/openssl.orig/doc/ssl/SSL_get_ciphers.pod | Perl | mit | 1,603 |
/*************************************************************************
File: experiment2.pl
Copyright (C) 2004 Patrick Blackburn & Johan Bos
This file is part of BB1, version 1.2 (August 2005).
BB1 is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
BB1 is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with BB1; if not, write to the Free Software Foundation, Inc.,
59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*************************************************************************/
:- use_module(comsemPredicates,[infix/0,prefix/0]).
/*========================================================================
Syntax-semantics rules
========================================================================*/
s(Sem)--> np(X,SemVP,Sem), vp(X,SemVP).
np(X,Scope,Sem)--> det(X,Restr,Scope,Sem), noun(X,Restr).
np(SemPN,Sem,Sem)--> pn(SemPN).
vp(X,Sem)--> iv(X,Sem).
vp(X,Sem)--> tv(X,Y,SemTV), np(Y,SemTV,Sem).
/*========================================================================
Proper Names
========================================================================*/
pn(vincent)--> [vincent].
pn(mia)--> [mia].
/*========================================================================
Transitive Verbs
========================================================================*/
tv(Y,Z,love(Y,Z))--> [loves].
tv(Y,Z,like(Y,Z))--> [likes].
/*========================================================================
Intransitive Verbs
========================================================================*/
iv(Y,snort(Y))--> [snorts].
iv(Y,walk(Y))--> [walks].
/*========================================================================
Determiners
========================================================================*/
det(X,Restr,Scope,some(X,and(Restr,Scope)))--> [a].
det(X,Restr,Scope,all(X,imp(Restr,Scope)))--> [every].
/*========================================================================
Nouns
========================================================================*/
noun(X,woman(X))--> [woman].
noun(X,footmassage(X))--> [foot,massage].
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/CURT/bb1/experiment2.pl | Perl | mit | 2,604 |
=pod
=head1 NAME
SSL_new - create a new SSL structure for a connection
=head1 SYNOPSIS
#include <openssl/ssl.h>
SSL *SSL_new(SSL_CTX *ctx);
=head1 DESCRIPTION
SSL_new() creates a new B<SSL> structure which is needed to hold the
data for a TLS/SSL connection. The new structure inherits the settings
of the underlying context B<ctx>: connection method,
options, verification settings, timeout settings.
=head1 RETURN VALUES
The following return values can occur:
=over 4
=item NULL
The creation of a new SSL structure failed. Check the error stack to
find out the reason.
=item Pointer to an SSL structure
The return value points to an allocated SSL structure.
=back
=head1 SEE ALSO
L<SSL_free(3)>, L<SSL_clear(3)>,
L<SSL_CTX_set_options(3)>,
L<SSL_get_SSL_CTX(3)>,
L<ssl(3)>
=cut
| vbloodv/blood | extern/openssl.orig/doc/ssl/SSL_new.pod | Perl | mit | 800 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from debian/tzdata/europe. Olson data version 2008c
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Asia::Irkutsk;
use strict;
use Class::Singleton;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Asia::Irkutsk::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY,
59295517360,
DateTime::TimeZone::NEG_INFINITY,
59295542400,
25040,
0,
'LMT'
],
[
59295517360,
60559808560,
59295542400,
60559833600,
25040,
0,
'IMT'
],
[
60559808560,
60888128400,
60559833760,
60888153600,
25200,
0,
'IRKT'
],
[
60888128400,
62490585600,
60888157200,
62490614400,
28800,
0,
'IRKT'
],
[
62490585600,
62506393200,
62490618000,
62506425600,
32400,
1,
'IRKST'
],
[
62506393200,
62522121600,
62506422000,
62522150400,
28800,
0,
'IRKT'
],
[
62522121600,
62537929200,
62522154000,
62537961600,
32400,
1,
'IRKST'
],
[
62537929200,
62553657600,
62537958000,
62553686400,
28800,
0,
'IRKT'
],
[
62553657600,
62569465200,
62553690000,
62569497600,
32400,
1,
'IRKST'
],
[
62569465200,
62585280000,
62569494000,
62585308800,
28800,
0,
'IRKT'
],
[
62585280000,
62601012000,
62585312400,
62601044400,
32400,
1,
'IRKST'
],
[
62601012000,
62616736800,
62601040800,
62616765600,
28800,
0,
'IRKT'
],
[
62616736800,
62632461600,
62616769200,
62632494000,
32400,
1,
'IRKST'
],
[
62632461600,
62648186400,
62632490400,
62648215200,
28800,
0,
'IRKT'
],
[
62648186400,
62663911200,
62648218800,
62663943600,
32400,
1,
'IRKST'
],
[
62663911200,
62679636000,
62663940000,
62679664800,
28800,
0,
'IRKT'
],
[
62679636000,
62695360800,
62679668400,
62695393200,
32400,
1,
'IRKST'
],
[
62695360800,
62711085600,
62695389600,
62711114400,
28800,
0,
'IRKT'
],
[
62711085600,
62726810400,
62711118000,
62726842800,
32400,
1,
'IRKST'
],
[
62726810400,
62742535200,
62726839200,
62742564000,
28800,
0,
'IRKT'
],
[
62742535200,
62758260000,
62742567600,
62758292400,
32400,
1,
'IRKST'
],
[
62758260000,
62773984800,
62758288800,
62774013600,
28800,
0,
'IRKT'
],
[
62773984800,
62790314400,
62774017200,
62790346800,
32400,
1,
'IRKST'
],
[
62790314400,
62806039200,
62790343200,
62806068000,
28800,
0,
'IRKT'
],
[
62806039200,
62821767600,
62806068000,
62821796400,
28800,
1,
'IRKST'
],
[
62821767600,
62831444400,
62821792800,
62831469600,
25200,
0,
'IRKT'
],
[
62831444400,
62837478000,
62831473200,
62837506800,
28800,
0,
'IRKT'
],
[
62837478000,
62853199200,
62837510400,
62853231600,
32400,
1,
'IRKST'
],
[
62853199200,
62868938400,
62853228000,
62868967200,
28800,
0,
'IRKT'
],
[
62868938400,
62884663200,
62868970800,
62884695600,
32400,
1,
'IRKST'
],
[
62884663200,
62900388000,
62884692000,
62900416800,
28800,
0,
'IRKT'
],
[
62900388000,
62916112800,
62900420400,
62916145200,
32400,
1,
'IRKST'
],
[
62916112800,
62931837600,
62916141600,
62931866400,
28800,
0,
'IRKT'
],
[
62931837600,
62947562400,
62931870000,
62947594800,
32400,
1,
'IRKST'
],
[
62947562400,
62963892000,
62947591200,
62963920800,
28800,
0,
'IRKT'
],
[
62963892000,
62982036000,
62963924400,
62982068400,
32400,
1,
'IRKST'
],
[
62982036000,
62995341600,
62982064800,
62995370400,
28800,
0,
'IRKT'
],
[
62995341600,
63013485600,
62995374000,
63013518000,
32400,
1,
'IRKST'
],
[
63013485600,
63026791200,
63013514400,
63026820000,
28800,
0,
'IRKT'
],
[
63026791200,
63044935200,
63026823600,
63044967600,
32400,
1,
'IRKST'
],
[
63044935200,
63058240800,
63044964000,
63058269600,
28800,
0,
'IRKT'
],
[
63058240800,
63076989600,
63058273200,
63077022000,
32400,
1,
'IRKST'
],
[
63076989600,
63089690400,
63077018400,
63089719200,
28800,
0,
'IRKT'
],
[
63089690400,
63108439200,
63089722800,
63108471600,
32400,
1,
'IRKST'
],
[
63108439200,
63121140000,
63108468000,
63121168800,
28800,
0,
'IRKT'
],
[
63121140000,
63139888800,
63121172400,
63139921200,
32400,
1,
'IRKST'
],
[
63139888800,
63153194400,
63139917600,
63153223200,
28800,
0,
'IRKT'
],
[
63153194400,
63171338400,
63153226800,
63171370800,
32400,
1,
'IRKST'
],
[
63171338400,
63184644000,
63171367200,
63184672800,
28800,
0,
'IRKT'
],
[
63184644000,
63202788000,
63184676400,
63202820400,
32400,
1,
'IRKST'
],
[
63202788000,
63216093600,
63202816800,
63216122400,
28800,
0,
'IRKT'
],
[
63216093600,
63234842400,
63216126000,
63234874800,
32400,
1,
'IRKST'
],
[
63234842400,
63247543200,
63234871200,
63247572000,
28800,
0,
'IRKT'
],
[
63247543200,
63266292000,
63247575600,
63266324400,
32400,
1,
'IRKST'
],
[
63266292000,
63278992800,
63266320800,
63279021600,
28800,
0,
'IRKT'
],
[
63278992800,
63297741600,
63279025200,
63297774000,
32400,
1,
'IRKST'
],
[
63297741600,
63310442400,
63297770400,
63310471200,
28800,
0,
'IRKT'
],
[
63310442400,
63329191200,
63310474800,
63329223600,
32400,
1,
'IRKST'
],
[
63329191200,
63342496800,
63329220000,
63342525600,
28800,
0,
'IRKT'
],
[
63342496800,
63360640800,
63342529200,
63360673200,
32400,
1,
'IRKST'
],
[
63360640800,
63373946400,
63360669600,
63373975200,
28800,
0,
'IRKT'
],
[
63373946400,
63392090400,
63373978800,
63392122800,
32400,
1,
'IRKST'
],
[
63392090400,
63405396000,
63392119200,
63405424800,
28800,
0,
'IRKT'
],
[
63405396000,
63424144800,
63405428400,
63424177200,
32400,
1,
'IRKST'
],
[
63424144800,
63436845600,
63424173600,
63436874400,
28800,
0,
'IRKT'
],
[
63436845600,
63455594400,
63436878000,
63455626800,
32400,
1,
'IRKST'
],
[
63455594400,
63468295200,
63455623200,
63468324000,
28800,
0,
'IRKT'
],
[
63468295200,
63487044000,
63468327600,
63487076400,
32400,
1,
'IRKST'
],
[
63487044000,
63500349600,
63487072800,
63500378400,
28800,
0,
'IRKT'
],
[
63500349600,
63518493600,
63500382000,
63518526000,
32400,
1,
'IRKST'
],
[
63518493600,
63531799200,
63518522400,
63531828000,
28800,
0,
'IRKT'
],
[
63531799200,
63549943200,
63531831600,
63549975600,
32400,
1,
'IRKST'
],
[
63549943200,
63563248800,
63549972000,
63563277600,
28800,
0,
'IRKT'
],
[
63563248800,
63581392800,
63563281200,
63581425200,
32400,
1,
'IRKST'
],
[
63581392800,
63594698400,
63581421600,
63594727200,
28800,
0,
'IRKT'
],
[
63594698400,
63613447200,
63594730800,
63613479600,
32400,
1,
'IRKST'
],
[
63613447200,
63626148000,
63613476000,
63626176800,
28800,
0,
'IRKT'
],
[
63626148000,
63644896800,
63626180400,
63644929200,
32400,
1,
'IRKST'
],
[
63644896800,
63657597600,
63644925600,
63657626400,
28800,
0,
'IRKT'
],
[
63657597600,
63676346400,
63657630000,
63676378800,
32400,
1,
'IRKST'
],
[
63676346400,
63689652000,
63676375200,
63689680800,
28800,
0,
'IRKT'
],
[
63689652000,
63707796000,
63689684400,
63707828400,
32400,
1,
'IRKST'
],
];
sub olson_version { '2008c' }
sub has_dst_changes { 39 }
sub _max_year { 2018 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
sub _last_offset { 28800 }
my $last_observance = bless( {
'format' => 'IRK%sT',
'gmtoff' => '8:00',
'local_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 727216,
'local_rd_secs' => 10800,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 727216,
'utc_rd_secs' => 10800,
'utc_year' => 1993
}, 'DateTime' ),
'offset_from_std' => 0,
'offset_from_utc' => 28800,
'until' => [],
'utc_start_datetime' => bless( {
'formatter' => undef,
'local_rd_days' => 727215,
'local_rd_secs' => 68400,
'offset_modifier' => 0,
'rd_nanosecs' => 0,
'tz' => bless( {
'name' => 'floating',
'offset' => 0
}, 'DateTime::TimeZone::Floating' ),
'utc_rd_days' => 727215,
'utc_rd_secs' => 68400,
'utc_year' => 1993
}, 'DateTime' )
}, 'DateTime::TimeZone::OlsonDB::Observance' )
;
sub _last_observance { $last_observance }
my $rules = [
bless( {
'at' => '2:00s',
'from' => '1996',
'in' => 'Oct',
'letter' => '',
'name' => 'Russia',
'offset_from_std' => 0,
'on' => 'lastSun',
'save' => '0',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' ),
bless( {
'at' => '2:00s',
'from' => '1993',
'in' => 'Mar',
'letter' => 'S',
'name' => 'Russia',
'offset_from_std' => 3600,
'on' => 'lastSun',
'save' => '1:00',
'to' => 'max',
'type' => undef
}, 'DateTime::TimeZone::OlsonDB::Rule' )
]
;
sub _rules { $rules }
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/TimeZone/Asia/Irkutsk.pm | Perl | mit | 9,127 |
package MIP::Recipes::Build::Transcript_annotation_prerequisites;
use 5.026;
use Carp;
use charnames qw{ :full :short };
use English qw{ -no_match_vars };
use File::Spec::Functions qw{ catdir catfile };
use open qw{ :encoding(UTF-8) :std };
use Params::Check qw{ allow check last_error };
use warnings;
use warnings qw{ FATAL utf8 };
## CPANM
use autodie qw{ :all };
use Readonly;
## MIPs lib/
use MIP::Constants qw{ $DASH $DOT $NEWLINE $PIPE $SPACE $UNDERSCORE };
BEGIN {
require Exporter;
use base qw{ Exporter };
# Functions and variables which can be optionally exported
our @EXPORT_OK = qw{ build_transcript_annotation_prerequisites };
}
sub build_transcript_annotation_prerequisites {
## Function : Creates the transcript annotatin refFlat file.
## Returns :
## Arguments: $active_parameter_href => Active parameters for this analysis hash {REF}
## : $case_id => Family ID
## : $filehandle => Filehandle to write to
## : $file_info_href => File info hash {REF}
## : $job_id_href => Job id hash {REF}
## : $log => Log object
## : $parameter_build_suffixes_ref => Exome target bed associated file endings
## : $parameter_href => Parameter hash {REF}
## : $profile_base_command => Submission profile base command
## : $recipe_name => Program name
## : $sample_info_href => Info on samples and case hash {REF}
## : $temp_directory => Temporary directory
my ($arg_href) = @_;
## Flatten argument(s)
my $active_parameter_href;
my $filehandle;
my $file_info_href;
my $job_id_href;
my $log;
my $parameter_build_suffixes_ref;
my $parameter_href;
my $profile_base_command;
my $recipe_name;
my $sample_info_href;
## Default(s)
my $case_id;
my $temp_directory;
my $tmpl = {
active_parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$active_parameter_href,
strict_type => 1,
},
case_id => {
default => $arg_href->{active_parameter_href}{case_id},
store => \$case_id,
strict_type => 1,
},
filehandle => { store => \$filehandle, },
file_info_href => {
default => {},
defined => 1,
required => 1,
store => \$file_info_href,
strict_type => 1,
},
job_id_href => {
default => {},
defined => 1,
required => 1,
store => \$job_id_href,
strict_type => 1,
},
log => { store => \$log, },
parameter_build_suffixes_ref => {
default => [],
defined => 1,
required => 1,
store => \$parameter_build_suffixes_ref,
strict_type => 1,
},
parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$parameter_href,
strict_type => 1,
},
profile_base_command => {
default => q{sbatch},
store => \$profile_base_command,
strict_type => 1,
},
recipe_name => {
defined => 1,
required => 1,
store => \$recipe_name,
strict_type => 1,
},
sample_info_href => {
default => {},
defined => 1,
required => 1,
store => \$sample_info_href,
strict_type => 1,
},
temp_directory => {
default => $arg_href->{active_parameter_href}{temp_directory},
store => \$temp_directory,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Language::Shell qw{ check_exist_and_move_file };
use MIP::Processmanagement::Processes qw{ submit_recipe };
use MIP::Program::Ucsc qw{ ucsc_gtf_to_genepred };
use MIP::Recipe qw{ parse_recipe_prerequisites };
use MIP::Script::Setup_script qw{ setup_script };
## Constants
Readonly my $MAX_RANDOM_NUMBER => 100_00;
my $recipe_file_path;
my $submit_switch;
## Unpack parameters
my %recipe = parse_recipe_prerequisites(
{
active_parameter_href => $active_parameter_href,
parameter_href => $parameter_href,
recipe_name => $recipe_name,
}
);
## Generate a random integer.
my $random_integer = int rand $MAX_RANDOM_NUMBER;
my $annotation_file_path = $active_parameter_href->{transcript_annotation};
my $annotation_file_path_random = $annotation_file_path . $UNDERSCORE . $random_integer;
## No supplied filehandle i.e. create new sbatch script
if ( not defined $filehandle ) {
$submit_switch = 1;
## Create anonymous filehandle
$filehandle = IO::Handle->new();
## Creates recipe directories (info & data & script), recipe script filenames and writes sbatch header
($recipe_file_path) = setup_script(
{
active_parameter_href => $active_parameter_href,
filehandle => $filehandle,
directory_id => $case_id,
job_id_href => $job_id_href,
recipe_directory => $recipe_name,
recipe_name => $recipe_name,
source_environment_commands_ref => $recipe{load_env_ref},
}
);
}
$log->warn( q{Will try to create required }
. $annotation_file_path
. q{ associated file(s) before executing }
. $recipe_name );
my %build_transcript_annotation = (
q{.bed} => {
extra_arg_href => {},
method => \&_build_bed,
},
q{.refflat} => {
extra_arg_href => {
annotation_file_path_random => $annotation_file_path_random,
},
method => \&_build_refflat,
},
q{.rrna.interval_list} => {
extra_arg_href => {
active_parameter_href => $active_parameter_href,
annotation_file_path_random => $annotation_file_path_random,
},
method => \&_build_rrna_interval_list,
},
);
ANNOTATION_SUFFIX:
foreach my $annotation_suffix ( @{$parameter_build_suffixes_ref} ) {
my $intended_file_path = $annotation_file_path . $annotation_suffix;
my $temp_file_path = $annotation_file_path_random . $annotation_suffix;
## Build annotaion
$build_transcript_annotation{$annotation_suffix}{method}->(
{
%{ $build_transcript_annotation{$annotation_suffix}{extra_arg_href} },
annotation_file_path => $annotation_file_path,
filehandle => $filehandle,
temp_file_path => $temp_file_path,
}
);
## Checks if a file exists and moves the file in place if file is lacking or has a size of 0 bytes.
check_exist_and_move_file(
{
filehandle => $filehandle,
intended_file_path => $intended_file_path,
temporary_file_path => $temp_file_path,
}
);
}
## Only create once
$parameter_href->{transcript_annotation_file_endings}{build_file} = 0;
## Unless filehandle was supplied close filehandle and submit
if ($submit_switch) {
close $filehandle;
if ( $recipe{mode} == 1 ) {
submit_recipe(
{
base_command => $profile_base_command,
dependency_method => q{island_to_samples},
case_id => $case_id,
job_id_href => $job_id_href,
log => $log,
job_id_chain => q{MAIN},
recipe_file_path => $recipe_file_path,
sample_ids_ref => \@{ $active_parameter_href->{sample_ids} },
submission_profile => $active_parameter_href->{submission_profile},
}
);
}
}
return 1;
}
sub _build_bed {
## Function : Creates the transcript annotation bed file
## Returns :
## Arguments: $annotation_file_path => Annotation file path
## : $filehandle => Filehandle to write to
## : $temp_file_path => Temp file
my ($arg_href) = @_;
## Flatten argument(s)
my $annotation_file_path;
my $filehandle;
my $temp_file_path;
my $tmpl = {
annotation_file_path => {
required => 1,
store => \$annotation_file_path,
strict_type => 1,
},
filehandle => { store => \$filehandle, },
temp_file_path => {
required => 1,
store => \$temp_file_path,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Program::Gtf2bed qw{ gtf2bed };
say {$filehandle} q{## Converting to bed format};
gtf2bed(
{
filehandle => $filehandle,
infile_path => $annotation_file_path,
stdoutfile_path => $temp_file_path,
}
);
say {$filehandle} $NEWLINE;
return;
}
sub _build_refflat {
## Function : Creates the transcript annotation refflat file.
## Returns :
## Arguments: $annotation_file_path => Annotation file path
## : $annotation_file_path_random => Annotation suffix
## : $filehandle => Filehandle to write to
## : $temp_file_path => Temp file
my ($arg_href) = @_;
## Flatten argument(s)
my $annotation_file_path;
my $annotation_file_path_random;
my $filehandle;
my $temp_file_path;
my $tmpl = {
annotation_file_path => {
required => 1,
store => \$annotation_file_path,
strict_type => 1,
},
annotation_file_path_random => {
required => 1,
store => \$annotation_file_path_random,
strict_type => 1,
},
filehandle => { store => \$filehandle, },
temp_file_path => {
required => 1,
store => \$temp_file_path,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Language::Perl qw{ perl_nae_oneliners };
use MIP::Program::Gnu::Coreutils qw{ gnu_rm };
use MIP::Program::Ucsc qw{ ucsc_gtf_to_genepred };
## Set file names
my $temp_genepred_file_path = $annotation_file_path_random . $DOT . q{genePred};
say {$filehandle} q{## Convert gtf to extended genePred };
ucsc_gtf_to_genepred(
{
extended_genepred => 1,
filehandle => $filehandle,
gene_name_as_name2 => 1,
infile_path => $annotation_file_path,
outfile_path => $temp_genepred_file_path,
}
);
say {$filehandle} $NEWLINE;
say {$filehandle} q{## Convert genePred to refFlat};
perl_nae_oneliners(
{
filehandle => $filehandle,
oneliner_name => q{genepred_to_refflat},
stdinfile_path => $temp_genepred_file_path,
stdoutfile_path => $temp_file_path,
}
);
say {$filehandle} $NEWLINE;
say {$filehandle} q{## Remove temporary files};
gnu_rm(
{
filehandle => $filehandle,
force => 1,
infile_path => $temp_genepred_file_path,
}
);
print {$filehandle} $NEWLINE;
return;
}
sub _build_rrna_interval_list {
## Function : Creates the transcript annotation ribomal RNA interval_list
## Returns :
## Arguments: $active_parameter_href => Active parameter hash {REF}
## : $annotation_file_path => Annotation file path
## : $annotation_file_path_random => Annotation suffix
## : $filehandle => Filehandle to write to
## : $temp_file_path => Temp file
my ($arg_href) = @_;
## Flatten argument(s)
my $active_parameter_href;
my $annotation_file_path;
my $annotation_file_path_random;
my $filehandle;
my $temp_file_path;
my $tmpl = {
active_parameter_href => {
default => {},
required => 1,
store => \$active_parameter_href,
strict_type => 1,
},
annotation_file_path => {
required => 1,
store => \$annotation_file_path,
strict_type => 1,
},
annotation_file_path_random => {
required => 1,
store => \$annotation_file_path_random,
strict_type => 1,
},
filehandle => { store => \$filehandle, },
temp_file_path => {
required => 1,
store => \$temp_file_path,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Language::Perl qw{ perl_nae_oneliners };
use MIP::Program::Gtf2bed qw{ gtf2bed };
use MIP::Program::Picardtools qw{
picardtools_bedtointervallist
picardtools_createsequencedictionary
};
use MIP::Program::Gnu::Coreutils qw{ gnu_rm };
## Set file names
my $temp_rrna_bed_file_path = $annotation_file_path_random . $DOT . q{rrna.bed};
my $temp_dict_file_path = $annotation_file_path_random . $DOT . q{dict};
say {$filehandle} q{## Getting rRNA transcripts and converting to bed format};
perl_nae_oneliners(
{
filehandle => $filehandle,
oneliner_name => q{get_rrna_transcripts},
stdinfile_path => $annotation_file_path,
}
);
print {$filehandle} $PIPE . $SPACE;
gtf2bed(
{
filehandle => $filehandle,
infile_path => $DASH,
stdoutfile_path => $temp_rrna_bed_file_path,
}
);
say {$filehandle} $NEWLINE;
picardtools_createsequencedictionary(
{
filehandle => $filehandle,
java_jar => catfile( $active_parameter_href->{picardtools_path}, q{picard.jar} ),
java_use_large_pages => $active_parameter_href->{java_use_large_pages},
memory_allocation => q{Xmx2g},
outfile_path => $temp_dict_file_path,
referencefile_path => $active_parameter_href->{human_genome_reference},
temp_directory => $active_parameter_href->{temp_directory},
}
);
say {$filehandle} $NEWLINE;
say {$filehandle} q{## Convert bed to interval_list format};
picardtools_bedtointervallist(
{
filehandle => $filehandle,
infile_path => $temp_rrna_bed_file_path,
java_jar => catfile( $active_parameter_href->{picardtools_path}, q{picard.jar} ),
java_use_large_pages => $active_parameter_href->{java_use_large_pages},
memory_allocation => q{Xmx2g},
outfile_path => $temp_file_path,
sequence_dictionary => $temp_dict_file_path,
temp_directory => $active_parameter_href->{temp_directory},
}
);
say {$filehandle} $NEWLINE;
say {$filehandle} q{## Remove temporary files};
foreach my $temp_file ( $temp_dict_file_path, $temp_rrna_bed_file_path ) {
gnu_rm(
{
filehandle => $filehandle,
force => 1,
infile_path => $temp_file,
}
);
print {$filehandle} $NEWLINE;
}
return;
}
1;
| henrikstranneheim/MIP | lib/MIP/Recipes/Build/Transcript_annotation_prerequisites.pm | Perl | mit | 16,459 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::FeedItemSetLinkService::MutateFeedItemSetLinkResult;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/FeedItemSetLinkService/MutateFeedItemSetLinkResult.pm | Perl | apache-2.0 | 1,066 |
package Google::Ads::AdWords::v201809::IncomeRange;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201809::Criterion);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %id_of :ATTR(:get<id>);
my %type_of :ATTR(:get<type>);
my %Criterion__Type_of :ATTR(:get<Criterion__Type>);
my %incomeRangeType_of :ATTR(:get<incomeRangeType>);
__PACKAGE__->_factory(
[ qw( id
type
Criterion__Type
incomeRangeType
) ],
{
'id' => \%id_of,
'type' => \%type_of,
'Criterion__Type' => \%Criterion__Type_of,
'incomeRangeType' => \%incomeRangeType_of,
},
{
'id' => 'SOAP::WSDL::XSD::Typelib::Builtin::long',
'type' => 'Google::Ads::AdWords::v201809::Criterion::Type',
'Criterion__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'incomeRangeType' => 'Google::Ads::AdWords::v201809::IncomeRange::IncomeRangeType',
},
{
'id' => 'id',
'type' => 'type',
'Criterion__Type' => 'Criterion.Type',
'incomeRangeType' => 'incomeRangeType',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::IncomeRange
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
IncomeRange from the namespace https://adwords.google.com/api/adwords/cm/v201809.
Income range criterion allows to target and exclude predefined income percentile ranges. <p>A criterion of this type can only be created using an ID. A criterion of this type is only excludable. <span class="constraint AdxEnabled">This is disabled for AdX when it is contained within Operators: ADD, SET.</span>
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * incomeRangeType
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/IncomeRange.pm | Perl | apache-2.0 | 2,303 |
package Google::Ads::AdWords::v201809::SharedSetReturnValue;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201809' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201809::ListReturnValue);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %ListReturnValue__Type_of :ATTR(:get<ListReturnValue__Type>);
my %value_of :ATTR(:get<value>);
my %partialFailureErrors_of :ATTR(:get<partialFailureErrors>);
__PACKAGE__->_factory(
[ qw( ListReturnValue__Type
value
partialFailureErrors
) ],
{
'ListReturnValue__Type' => \%ListReturnValue__Type_of,
'value' => \%value_of,
'partialFailureErrors' => \%partialFailureErrors_of,
},
{
'ListReturnValue__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'value' => 'Google::Ads::AdWords::v201809::SharedSet',
'partialFailureErrors' => 'Google::Ads::AdWords::v201809::ApiError',
},
{
'ListReturnValue__Type' => 'ListReturnValue.Type',
'value' => 'value',
'partialFailureErrors' => 'partialFailureErrors',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201809::SharedSetReturnValue
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
SharedSetReturnValue from the namespace https://adwords.google.com/api/adwords/cm/v201809.
A container for return values from the {@link SharedSetService#mutate} call.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * value
=item * partialFailureErrors
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| googleads/googleads-perl-lib | lib/Google/Ads/AdWords/v201809/SharedSetReturnValue.pm | Perl | apache-2.0 | 2,048 |
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <dev@ensembl.org>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=cut
=head1 NAME
Bio::EnsEMBL::DBSQL::KaryotypeBandAdaptor
=head1 SYNOPSIS
$kary_adaptor = $db_adaptor->get_KaryotypeBandAdaptor();
foreach $band ( @{ $kary_adaptor->fetch_all_by_Slice($slice) } ) {
# do something with band
}
$band = $kary_adaptor->fetch_by_dbID($id);
my @bands = @{ $kary_adaptor->fetch_all_by_chr_name('X') };
my $band = $kary_adaptor->fetch_by_chr_band( '4', 'q23' );
=head1 DESCRIPTION
Database adaptor to provide access to KaryotypeBand objects
=head1 METHODS
=cut
package Bio::EnsEMBL::DBSQL::KaryotypeBandAdaptor;
use strict;
use vars qw(@ISA);
use Bio::EnsEMBL::KaryotypeBand;
use Bio::EnsEMBL::Utils::Exception qw(throw warning deprecate);
use Bio::EnsEMBL::DBSQL::BaseFeatureAdaptor;
@ISA = qw(Bio::EnsEMBL::DBSQL::BaseFeatureAdaptor);
#_tables
#
# Arg [1] : none
# Example : none
# Description: PROTECTED Implementation of abstract superclass method to
# provide the name of the tables to query
# Returntype : string
# Exceptions : none
# Caller : internal
sub _tables {
my $self = shift;
return (['karyotype','k'])
}
#_columns
# Arg [1] : none
# Example : none
# Description: PROTECTED Implementation of abstract superclass method to
# provide the name of the columns to query
# Returntype : list of strings
# Exceptions : none
# Caller : internal
sub _columns {
my $self = shift;
#warning _objs_from_sth implementation depends on ordering
return qw (
k.karyotype_id
k.seq_region_id
k.seq_region_start
k.seq_region_end
k.band
k.stain );
}
sub _objs_from_sth {
my ($self, $sth) = @_;
my $db = $self->db();
my $slice_adaptor = $db->get_SliceAdaptor();
my @features;
my %slice_cache;
my($karyotype_id,$seq_region_id,$seq_region_start,$seq_region_end,
$band,$stain);
$sth->bind_columns(\$karyotype_id, \$seq_region_id, \$seq_region_start,
\$seq_region_end, \$band, \$stain);
while ( $sth->fetch() ) {
#need to get the internal_seq_region, if present
$seq_region_id = $self->get_seq_region_id_internal($seq_region_id);
my $slice = $slice_cache{$seq_region_id} ||=
$slice_adaptor->fetch_by_seq_region_id($seq_region_id);
push( @features,
$self->_create_feature( 'Bio::EnsEMBL::KaryotypeBand', {
-START => $seq_region_start,
-END => $seq_region_end,
-SLICE => $slice,
-ADAPTOR => $self,
-DBID => $karyotype_id,
-NAME => $band,
-STAIN => $stain
} ) );
}
return \@features;
}
=head2 fetch_all_by_chr_name
Arg [1] : string $chr_name
Name of the chromosome from which to retrieve band objects
Example : @bands=@{$karyotype_band_adaptor->fetch_all_by_chr_name('X')};
Description: Fetches all the karyotype band objects from the database for the
given chromosome.
Returntype : listref of Bio::EnsEMBL::KaryotypeBand in chromosomal
(assembly) coordinates
Exceptions : none
Caller : general
Status : Stable
=cut
sub fetch_all_by_chr_name {
my ($self,$chr_name) = @_;
throw('Chromosome name argument expected') if(!$chr_name);
my $slice =
$self->db->get_SliceAdaptor->fetch_by_region(undef, $chr_name);
unless ($slice){
warning("Cannot retrieve chromosome $chr_name");
return;
}
return $self->fetch_all_by_Slice($slice);
}
sub fetch_all_by_chr_band {
my ($self, $chr_name, $band) = @_;
throw('Chromosome name argument expected') if(!$chr_name);
throw('Band argument expected') if(!$band);
my $slice = $self->db->get_SliceAdaptor->fetch_by_region(undef,
$chr_name);
my $constraint = "k.band like '$band%'";
return $self->fetch_all_by_Slice_constraint($slice,$constraint);
}
=head2 fetch_by_chr_band
Arg [1] : string $chr_name
Name of the chromosome from which to retrieve the band
Arg [2] : string $band
The name of the band to retrieve from the specified chromosome
Example : @bands = @{$kary_adaptor->fetch_all_by_chr_band('4', 'q23')};
Description: Fetches the karyotype band object from the database
for the given chromosome and band name. If no such band
exists, undef is returned instead. This function uses fuzzy
matching of the band name. For example the bands 'q23.1' and
'q23.4' could be matched by fetch_all_by_chr_band('20', 'q23');
Returntype : Bio::EnsEMBL::KaryotypeBand in chromosomal coordinates.
Exceptions : throws if chr or band is missing in arguments
Caller : general
Status : Stable
=cut
sub fetch_by_chr_band {
my $self = shift;
deprecate('Use fetch_all_by_chr_band instead.');
my ($band) = @{$self->fetch_all_by_chr_band(@_)};
return $band;
}
=head2 list_dbIDs
Arg [1] : none
Example : @kary_ids = @{$karyotype_band_adaptor->list_dbIDs()};
Description: Gets an array of internal ids for all karyotype bands in the
current db
Arg[1] : <optional> int. not 0 for the ids to be sorted by the seq_region.
Returntype : reference to a list of ints
Exceptions : none
Caller : ?
Status : Stable
=cut
sub list_dbIDs {
my ($self, $ordered) = shift;
return $self->_list_dbIDs("karyotype",undef, $ordered);
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl/modules/Bio/EnsEMBL/DBSQL/KaryotypeBandAdaptor.pm | Perl | apache-2.0 | 6,187 |
#!/usr/bin/env perl
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# call
#
# perl ./genebuild/prepare_proteome.pl
# -proteome_file proteome_output_file.fa
# -file_info protein_input_file.fa='(\S+)'
#
# use regex '/(\S+)/' if header format of your fasta is
# >ABC234.1
# >ABC234.1
#
use strict;
use warnings;
use Bio::EnsEMBL::KillList::KillList;
use Bio::EnsEMBL::KillList::DBSQL::DBAdaptor;
use Bio::SeqIO;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Analysis::Tools::Logger qw (logger_info logger_verbosity);
use Getopt::Long;
my $output_file;
my @file_info;
my $skip_xs = 5;
my $use_killlist = 1;
my $skip_xps = 1;
my $logger_verbosity = "NONE";
my $min_length = 15;
&GetOptions( "proteome_file|pmatch_output_file:s" => \$output_file,
"file_info:s@" => \@file_info,
"skip_xs:s" => \$skip_xs,
"use_killlist!" => \$use_killlist,
"min_length:i" => \$min_length,
"logger_verbosity:s" => \$logger_verbosity, );
logger_verbosity($logger_verbosity);
my $kill_list_object = Bio::EnsEMBL::KillList::KillList->new(-TYPE => 'PROTEIN');
my %kill_list = %{$kill_list_object->get_kill_list()};
my %files;
my %refseq;
my %uniprot;
foreach my $file_info (@file_info) {
#print "Have file info ".$file_info."\n";
my ( $file, $regex, $other ) = split /\=/, $file_info;
#print "HAVE FILE ".$file." and regex ".$regex."\n";
throw( "Need both a file " . $file . " and a regex " . $regex )
if ( !$file || !$regex );
$files{$file} = $regex;
if ( $other && $other =~ /^refseq$/i ) {
$refseq{$file} = 1;
} elsif ( $other && $other =~ /^uniprot$/i ) {
$uniprot{$file} = 1;
}
}
if(-e $output_file){
print "\nProtein file '" . $output_file . "' already exists, these ".
"entries will be appended to the end of the file.\n";
print "Do you want this? Answer y/n or d to delete the file: ";
my $reply = get_input_arg();
if($reply =~ m/n/i) {
print "\nYou must delete or rename '" . $output_file . "' before you continue.\n";
exit;
} elsif($reply =~ m/d/i) {
print "\nRunning rm -f '$output_file'\n";
system("rm -f $output_file");
print "File '$output_file' deleted.\n";
} else {
print "\nWill append new entries to '" . $output_file . "'.\n";
}
}
my $output_io = Bio::SeqIO->new( -format => 'fasta',
-file => ">>" . $output_file, );
my %ids;
foreach my $file(keys(%files)) {
my $refseq = $refseq{$file};
my $uniprot = $uniprot{$file};
my $regex = $files{$file};
print "\nFile '" . $file . "' with regex: '" . $regex . "' was specified.\n\n";
my $x_string;
if($skip_xs){
$x_string = "X" x $skip_xs;
}
my $io = Bio::SeqIO->new( -format => 'fasta',
-file => $file, );
SEQ:while(my $seq = $io->next_seq){
my $parseable_string = $seq->id;
$parseable_string.=" ".$seq->desc if $seq->desc() ;
# bit of a hack:
my $sequence_version;
if ($parseable_string =~ /SV=(\d+)$/) {
$sequence_version = $1;
}
my ($id) = $parseable_string =~ /$regex/;
if($id =~ m/^1$/){
print "got ".$id." from ".$parseable_string."\n";
die;
}
if(!$id){
warn($regex." failed to parse an id out of ".
$parseable_string);
next SEQ;
}
if($x_string){
if($seq->seq =~ /$x_string/){
logger_info($id." contains too many X characters, SKIPPING");
next SEQ;
}
}
if (length($seq->seq) < $min_length) {
logger_info($id." is shorter than $min_length");
next SEQ;
}
if($refseq && !($id =~ /^NP/)){
logger_info($id." isn't an NP so skipping");
next SEQ;
}
if($uniprot && !($id =~ /^sp/ || $id =~ /^tr/)){
logger_info($id." isn't a sp or tr ");
throw("Don't know what to do with uniprot id $id");
} elsif ($uniprot && $regex eq '(\S+)') {
# need to fix the header because it will look like
#sp|Q9I9D5|RX1_ASTFA
#tr|O42292|O42292_ASTMX
$id =~ /^(sp|tr)\|(\w+)\|\w+$/;
$id = $2;
if ($id !~ /\.\d+/ && defined $sequence_version) {
$id .= ".".$sequence_version;
}
if ($id !~ /\w+/) {
throw("uniprot id looks wrong: $id");
}
}
if ($seq->seq =~/(B|Z|J)/) {
warn("AMBIGUITY CODE FOUND!!! $id contains at least one residue of code $1. May interfere with Pmatch.");
}
my $no_version_id = $id;
$no_version_id =~ s/\.\d+//;
if($use_killlist && exists $kill_list{$no_version_id}){
logger_info($id." on kill list as $no_version_id");
next SEQ;
}
if(exists($ids{$id})){
logger_info($id." has already been stored");
next SEQ;
}
$ids{$id} = 1;
$seq->desc("");
$seq->id($id);
my $seq_string = $seq->seq;
$seq_string =~ s/U/X/g;
$seq->seq($seq_string);
$output_io->write_seq($seq);
}
}
sub get_input_arg {
#my $line;
#print "Getting input arg\n";
my $line = <>;
return $line;
}
| Ensembl/ensembl-analysis | scripts/genebuild/prepare_proteome.pl | Perl | apache-2.0 | 5,720 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::voip::cisco::meetingplace::mode::videoports;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', default => '60' },
"critical:s" => { name => 'critical', default => '70' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
$self->{warning} = $self->{option_results}->{warning};
$self->{critical} = $self->{option_results}->{critical};
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold'" . $self->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
# Nombre de ports video utilise
my $oid_cmpVideoPortsUsage = '.1.3.6.1.4.1.9.9.733.1.4.1.0';
# Nombre maximum de ports video disponibles
my $oid_cmpMaxVideoPortsAvailable = '.1.3.6.1.4.1.9.9.733.1.4.2.0';
my $result = $self->{snmp}->get_leef(oids => [$oid_cmpVideoPortsUsage, $oid_cmpMaxVideoPortsAvailable], nothing_quit => 1);
my $prct = $result->{$oid_cmpVideoPortsUsage} / $result->{$oid_cmpMaxVideoPortsAvailable} * 100;
my $abs_warning = $self->{option_results}->{warning} / 100 * $result->{$oid_cmpMaxVideoPortsAvailable};
my $abs_critical = $self->{option_results}->{critical} / 100 * $result->{$oid_cmpMaxVideoPortsAvailable};
my $exit = $self->{perfdata}->threshold_check(value => $prct,
threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]
);
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("%.2f%% of video ports are in use. (%d max)",
$prct, $result->{$oid_cmpMaxVideoPortsAvailable}));
$self->{output}->perfdata_add(label => "video-ports", unit => 'ports',
value => $result->{$oid_cmpVideoPortsUsage},
warning => $abs_warning,
critical => $abs_critical,
min => 0,
max => $result->{$oid_cmpMaxVideoPortsAvailable});
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check the percentage of video ports used on this cisco meeting place platform.
=over 8
=item B<--warning>
Threshold warning: Percentage value of video ports usage resulting in a warning state
=item B<--critical>
Threshold critical: Percentage value of video ports usage resulting in a critical state
=back
==cut
| Sims24/centreon-plugins | apps/voip/cisco/meetingplace/mode/videoports.pm | Perl | apache-2.0 | 4,120 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::BiddingSeasonalityAdjustmentService::MutateBiddingSeasonalityAdjustmentsRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
customerId => $args->{customerId},
operations => $args->{operations},
partialFailure => $args->{partialFailure},
responseContentType => $args->{responseContentType},
validateOnly => $args->{validateOnly}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/BiddingSeasonalityAdjustmentService/MutateBiddingSeasonalityAdjustmentsRequest.pm | Perl | apache-2.0 | 1,311 |
package Net::Statsd;
# ABSTRACT: Sends statistics to the stats daemon over UDP
# Cosimo Streppone <cosimo@cpan.org>
use strict;
use warnings;
use Carp ();
use IO::Socket ();
our $HOST = 'localhost';
our $PORT = 8125;
my $SOCK;
my $SOCK_PEER;
=head1 NAME
Net::Statsd - Perl client for Etsy's statsd daemon
=head1 SYNOPSIS
# Configure where to send events
# That's where your statsd daemon is listening.
$Net::Statsd::HOST = 'localhost'; # Default
$Net::Statsd::PORT = 8125; # Default
#
# Keep track of events as counters
#
Net::Statsd::increment('site.logins');
Net::Statsd::increment('database.connects');
#
# Log timing of events, ex. db queries
#
use Time::HiRes;
my $start_time = [ Time::HiRes::gettimeofday ];
# do the complex database query
# note: time value sent to timing should
# be in milliseconds.
Net::Statsd::timing(
'database.complexquery',
Time::HiRes::tv_interval($start_time) * 1000
);
#
# Log metric values
#
Net::Statsd::gauge('core.temperature' => 55);
=head1 DESCRIPTION
This module implement a UDP client for the B<statsd> statistics
collector daemon in use at Etsy.com.
You want to use this module to track statistics in your Perl
application, such as how many times a certain event occurs
(user logins in a web application, or database queries issued),
or you want to time and then graph how long certain events take,
like database queries execution time or time to download a
certain file, etc...
If you're uncertain whether you'd want to use this module or
statsd, then you can read some background information here:
http://codeascraft.etsy.com/2011/02/15/measure-anything-measure-everything/
The github repository for statsd is:
http://github.com/etsy/statsd
By default the client will try to send statistic metrics to
C<localhost:8125>, but you can change the default hostname and port
with:
$Net::Statsd::HOST = 'your.statsd.hostname.net';
$Net::Statsd::PORT = 9999;
just after including the C<Net::Statsd> module.
=head1 ABOUT SAMPLING
A note about sample rate: A sample rate of < 1 instructs this
library to send only the specified percentage of the samples to
the server. As such, the application code should call this module
for every occurence of each metric and allow this library to
determine which specific measurements to deliver, based on the
sample_rate value. (e.g. a sample rate of 0.5 would indicate that
approximately only half of the metrics given to this module would
actually be sent to statsd).
=head1 FUNCTIONS
=cut
=head2 C<timing($name, $time, $sample_rate = 1)>
Log timing information.
B<Time is assumed to be in milliseconds (ms)>.
Net::Statsd::timing('some.timer', 500);
=cut
sub timing {
my ($name, $time, $sample_rate) = @_;
if (! defined $sample_rate) {
$sample_rate = 1;
}
my $stats = {
$name => sprintf "%d|ms", $time
};
return Net::Statsd::send($stats, $sample_rate);
}
=head2 C<increment($counter, $sample_rate=1)>
=head2 C<increment(\@counter, $sample_rate=1)>
Increments one or more stats counters
# +1 on 'some.int'
Net::Statsd::increment('some.int');
# 0.5 = 50% sampling
Net::Statsd::increment('some.int', 0.5);
To increment more than one counter at a time,
you can B<pass an array reference>:
Net::Statsd::increment(['grue.dinners', 'room.lamps'], 1);
B<You can also use "inc()" instead of "increment()" to type less>.
=cut
sub increment {
my ($stats, $sample_rate) = @_;
return Net::Statsd::update_stats($stats, 1, $sample_rate);
}
*inc = *increment;
=head2 C<decrement($counter, $sample_rate=1)>
Same as increment, but decrements. Yay.
Net::Statsd::decrement('some.int')
B<You can also use "dec()" instead of "decrement()" to type less>.
=cut
sub decrement {
my ($stats, $sample_rate) = @_;
return Net::Statsd::update_stats($stats, -1, $sample_rate);
}
*dec = *decrement;
=head2 C<update_stats($stats, $delta=1, $sample_rate=1)>
Updates one or more stats counters by arbitrary amounts
Net::Statsd::update_stats('some.int', 10)
equivalent to:
Net::Statsd::update_stats('some.int', 10, 1)
A sampling rate less than 1 means only update the stats
every x number of times (0.1 = 10% of the times).
=cut
sub update_stats {
my ($stats, $delta, $sample_rate) = @_;
if (! defined $delta) {
$delta = 1;
}
if (! defined $sample_rate) {
$sample_rate = 1;
}
if (! ref $stats) {
$stats = [ $stats ];
}
elsif (ref $stats eq 'HASH') {
Carp::croak("Usage: update_stats(\$str, ...) or update_stats(\\\@list, ...)");
}
my %data = map { $_ => sprintf "%s|c", $delta } @{ $stats };
return Net::Statsd::send(\%data, $sample_rate)
}
=head2 C<gauge($name, $value)>
Log arbitrary values, as a temperature, or server load.
Net::Statsd::gauge('core.temperature', 55);
=cut
sub gauge {
my ($name, $value) = @_;
$value = 0 unless defined $value;
# Didn't use '%d' because values might be floats
my $stats = {
$name => sprintf "%s|g", $value
};
return Net::Statsd::send($stats, 1);
}
=head2 C<send(\%data, $sample_rate = 1)>
Squirt the metrics over UDP.
Net::Statsd::send({ 'some.int' => 1 });
=cut
sub send {
my ($data, $sample_rate) = @_;
my $sampled_data = _sample_data($data, $sample_rate);
# No sampled_data can happen when:
# 1) No $data came in
# 2) Sample rate was low enough that we don't want to send events
if (! $sampled_data) {
return;
}
# cache the socket to avoid dns and socket creation overheads
# (this boosts performance from ~6k to >60k sends/sec)
if (!$SOCK || !$SOCK_PEER || "$HOST:$PORT" ne $SOCK_PEER) {
$SOCK = IO::Socket::INET->new(
Proto => 'udp',
PeerAddr => $HOST,
PeerPort => $PORT,
) or do {
Carp::carp("Net::Statsd can't create a socket to $HOST:$PORT: $!")
unless our $_warn_once->{"$HOST:$PORT"}++;
return
};
$SOCK_PEER = "$HOST:$PORT";
# We don't want to die if Net::Statsd::send() doesn't work...
# We could though:
#
# or die "Could not create UDP socket: $!\n";
}
my $all_sent = 1;
keys %{ $sampled_data }; # reset iterator
while ( my ($stat, $value) = each %{ $sampled_data } ) {
my $packet = "$stat:$value";
# send() returns the number of characters sent, or undef on error.
my $r = send($SOCK, $packet, 0);
if (!defined $r) {
#warn "Net::Statsd send error: $!";
$all_sent = 0;
}
elsif ($r != length($packet)) {
#warn "Net::Statsd send truncated: $!";
$all_sent = 0;
}
}
return $all_sent;
}
=head2 C<_sample_data(\%data, $sample_rate = 1)>
B<This method is used internally, it's not part of the public interface.>
Takes care of transforming a hash of metrics data into
a B<sampled> hash of metrics data, according to the given
C<$sample_rate>.
If C<$sample_rate == 1>, then sampled data is exactly the
incoming data.
If C<$sample_rate = 0.2>, then every metric value will be I<marked>
with the given sample rate, so the Statsd server will automatically
scale it. For example, with a sample rate of 0.2, the metric values
will be multiplied by 5.
=cut
sub _sample_data {
my ($data, $sample_rate) = @_;
if (! $data || ref $data ne 'HASH') {
Carp::croak("No data?");
}
if (! defined $sample_rate) {
$sample_rate = 1;
}
# Sample rate > 1 doesn't make sense though
if ($sample_rate >= 1) {
return $data;
}
my $sampled_data;
# Perform sampling here, so that clients using Net::Statsd
# don't have to do it every time. This is the same
# implementation criteria used in the other statsd client libs
#
# If rand() doesn't trigger, then no data will be sent
# to the statsd server, which is what we want.
if (rand() <= $sample_rate) {
while (my ($stat, $value) = each %{ $data }) {
# Uglier, but if there's no data to be sampled,
# we get a clean undef as returned value
$sampled_data ||= {};
$sampled_data->{$stat} = sprintf "%s|@%s", $value, $sample_rate;
}
}
return $sampled_data;
}
1;
| eoly/statsd-perl-scripts | lib/Net/Statsd.pm | Perl | apache-2.0 | 8,487 |
#!/usr/bin/perl
# MLSA
# Put .mgc files in gen folder along with their aschii pitch files
# Sankar Mukherjee
# CET IITKGP
# Settings ==============================
%ordr = ('mgc' => '35', # feature order
'lf0' => '1',
'dur' => '5');
# Speech Analysis/Synthesis Setting ==============
# speech analysis
$sr = 44100; # sampling rate (Hz)
$fs = 220; # frame period (point)
$fw = 0.55; # frequency warping
$gm = 0; # pole/zero representation weight
$lg = 1; # use log gain instead of linear gain
$fr = $fs/$sr; # frame period (sec)
# speech synthesis
$pf = 1.4; # postfiltering factor
$fl = 4096; # length of impulse response
$co = 2047; # order of cepstrum to approximate mel-generalized cepstrum
# Modeling/Generation Setting ==============
# generation
$maxEMiter = 20; # max EM iteration
$EMepsilon = 0.0001; # convergence factor for EM iteration
$useGV = 1; # turn on GV
$maxGViter = 50; # max GV iteration
$GVepsilon = 0.0001; # convergence factor for GV iteration
$minEucNorm = 0.01; # minimum Euclid norm for GV iteration
$stepInit = 1.0; # initial step size
$stepInc = 1.2; # step size acceleration factor
$stepDec = 0.5; # step size deceleration factor
$hmmWeight = 1.0; # weight for HMM output prob.
$gvWeight = 1.0; # weight for GV output prob.
$optKind = 'NEWTON'; # optimization method (STEEPEST, NEWTON, or LBFGS)
$nosilgv = 1; # GV without silent and pause phoneme
$cdgv = 1; # context-dependent GV
# Directories & Commands ===============
# project directories
use Cwd;
$prjdir = cwd();
$datdir = "$prjdir/data";
# Perl
$PERL = '/usr/bin/perl';
# wc
$WC = '/usr/bin/wc';
# tee
$TEE = '/usr/bin/tee';
# HTS commands
$HCOMPV = '/usr/local/HTS-2.2beta/bin/HCompV';
$HLIST = '/usr/local/HTS-2.2beta/bin/HList';
$HINIT = '/usr/local/HTS-2.2beta/bin/HInit';
$HREST = '/usr/local/HTS-2.2beta/bin/HRest';
$HEREST = '/usr/local/HTS-2.2beta/bin/HERest';
$HHED = '/usr/local/HTS-2.2beta/bin/HHEd';
$HSMMALIGN = '/usr/local/HTS-2.2beta/bin/HSMMAlign';
$HMGENS = '/usr/local/HTS-2.2beta/bin/HMGenS';
$ENGINE = '/usr/local/bin/hts_engine';
# SPTK commands
$X2X = '/usr/local/SPTK/bin/x2x';
$FREQT = '/usr/local/SPTK/bin/freqt';
$C2ACR = '/usr/local/SPTK/bin/c2acr';
$VOPR = '/usr/local/SPTK/bin/vopr';
$MC2B = '/usr/local/SPTK/bin/mc2b';
$SOPR = '/usr/local/SPTK/bin/sopr';
$B2MC = '/usr/local/SPTK/bin/b2mc';
$EXCITE = '/usr/local/SPTK/bin/excite';
$LSP2LPC = '/usr/local/SPTK/bin/lsp2lpc';
$MGC2MGC = '/usr/local/SPTK/bin/mgc2mgc';
$MGLSADF = '/usr/local/SPTK/bin/mglsadf';
$MERGE = '/usr/local/SPTK/bin/merge';
$BCP = '/usr/local/SPTK/bin/bcp';
$LSPCHECK = '/usr/local/SPTK/bin/lspcheck';
$BCUT = '/usr/local/SPTK/bin/bcut';
$VSTAT = '/usr/local/SPTK/bin/vstat';
$NAN = '/usr/local/SPTK/bin/nan';
$DFS = '/usr/local/SPTK/bin/dfs';
$SWAB = '/usr/local/SPTK/bin/swab';
# SoX (to add RIFF header)
$SOX = '/usr/bin/sox';
$SOXOPTION = '2';
#==============================================Main Program=====================================================
$line = `ls gen/*.mgc`;
@FILE = split( '\n', $line );
foreach $file (@FILE) {
$base = `basename $file .mgc`;
chomp($base);
$line = "$X2X +af gen/$base > gen/$base.pit"; #change to float value for the .mgs name file pitch
shell($line);
}
gen_wave("$prjdir/gen");
#========================================================================================================
# sub routine for speech synthesis from log f0 and Mel-cepstral coefficients
sub gen_wave($) {
my ($gendir) = @_;
my ( $line, @FILE, $file, $base );
$line = `ls $gendir/*.mgc`;
@FILE = split( '\n', $line );
$lgopt = "-l" if ($lg);
print "Processing directory $gendir:\n";
foreach $file (@FILE) {
$base = `basename $file .mgc`;
chomp($base);
if ( -s $file && -s "$gendir/$base.pit" ) {
print " Synthesizing a speech waveform from $base.mgc and $base.lf0...";
# convert log F0 to pitch
#lf02pitch( $base, $gendir );
if ( $gm > 0 ) {
# MGC-LSPs -> MGC coefficients
$line = "$LSPCHECK -m " . ( $ordr{'mgc'} - 1 ) . " -s " . ( $sr / 1000 ) . " -r 0.1 $file | ";
$line .= "$LSP2LPC -m " . ( $ordr{'mgc'} - 1 ) . " -s " . ( $sr / 1000 ) . " $lgopt | ";
$line .= "$MGC2MGC -m " . ( $ordr{'mgc'} - 1 ) . " -a $fw -c $gm -n -u -M " . ( $ordr{'mgc'} - 1 ) . " -A $fw -C $gm " . " > $gendir/$base.c_mgc";
shell($line);
$mgc = "$gendir/$base.c_mgc";
}
else {
# apply postfiltering
if ( $gm == 0 && $pf != 1.0 && $useGV == 0 ) {
postfiltering( $base, $gendir );
$mgc = "$gendir/$base.p_mgc";
}
else {
$mgc = $file;
}
}
# synthesize waveform
$lfil = `$PERL $datdir/scripts/makefilter.pl $sr 0`;
$hfil = `$PERL $datdir/scripts/makefilter.pl $sr 1`;
$line = "$SOPR -m 0 $gendir/$base.pit | $EXCITE -p $fs | $DFS -b $hfil > $gendir/$base.unv";
shell($line);
$line = "$EXCITE -p $fs $gendir/$base.pit | ";
$line .= "$DFS -b $lfil | $VOPR -a $gendir/$base.unv | ";
$line .= "$MGLSADF -m " . ( $ordr{'mgc'} - 1 ) . " -p $fs -a $fw -c $gm $mgc | ";
$line .= "$X2X +fs -o | ";
$line .= "$SOX -c 1 -s -$SOXOPTION -t raw -r $sr - -c 1 -s -$SOXOPTION -t wav -r $sr $gendir/$base.wav";
shell($line);
$line = "rm -f $gendir/$base.unv";
shell($line);
print "done\n";
}
}
print "done\n";
}
# sub routine for log f0 -> f0 conversion
sub lf02pitch($$) {
my ( $base, $gendir ) = @_;
my ( $t, $T, $data );
# read log f0 file
open( IN, "$gendir/${base}.lf0" );
@STAT = stat(IN);
read( IN, $data, $STAT[7] );
close(IN);
# log f0 -> pitch conversion
$T = $STAT[7] / 4;
@frq = unpack( "f$T", $data );
for ( $t = 0 ; $t < $T ; $t++ ) {
if ( $frq[$t] == -1.0e+10 ) {
$out[$t] = 0.0;
}
else {
$out[$t] = $sr / exp( $frq[$t] );
}
}
$data = pack( "f$T", @out );
# output data
open( OUT, ">$gendir/${base}.pit" );
print OUT $data;
close(OUT);
}
# sub routine for formant emphasis in Mel-cepstral domain
sub postfiltering($$) {
my ( $base, $gendir ) = @_;
my ( $i, $line );
# output postfiltering weight coefficient
$line = "echo 1 1 ";
for ( $i = 2 ; $i < $ordr{'mgc'} ; $i++ ) {
$line .= "$pf ";
}
$line .= "| $X2X +af > $gendir/weight";
shell($line);
# calculate auto-correlation of original mcep
$line = "$FREQT -m " . ( $ordr{'mgc'} - 1 ) . " -a $fw -M $co -A 0 < $gendir/${base}.mgc | ";
$line .= "$C2ACR -m $co -M 0 -l $fl > $gendir/${base}.r0";
shell($line);
# calculate auto-correlation of postfiltered mcep
$line = "$VOPR -m -n " . ( $ordr{'mgc'} - 1 ) . " < $gendir/${base}.mgc $gendir/weight | ";
$line .= "$FREQT -m " . ( $ordr{'mgc'} - 1 ) . " -a $fw -M $co -A 0 | ";
$line .= "$C2ACR -m $co -M 0 -l $fl > $gendir/${base}.p_r0";
shell($line);
# calculate MLSA coefficients from postfiltered mcep
$line = "$VOPR -m -n " . ( $ordr{'mgc'} - 1 ) . " < $gendir/${base}.mgc $gendir/weight | ";
$line .= "$MC2B -m " . ( $ordr{'mgc'} - 1 ) . " -a $fw | ";
$line .= "$BCP -n " . ( $ordr{'mgc'} - 1 ) . " -s 0 -e 0 > $gendir/${base}.b0";
shell($line);
# calculate 0.5 * log(acr_orig/acr_post)) and add it to 0th MLSA coefficient
$line = "$VOPR -d < $gendir/${base}.r0 $gendir/${base}.p_r0 | ";
$line .= "$SOPR -LN -d 2 | ";
$line .= "$VOPR -a $gendir/${base}.b0 > $gendir/${base}.p_b0";
shell($line);
# generate postfiltered mcep
$line = "$VOPR -m -n " . ( $ordr{'mgc'} - 1 ) . " < $gendir/${base}.mgc $gendir/weight | ";
$line .= "$MC2B -m " . ( $ordr{'mgc'} - 1 ) . " -a $fw | ";
$line .= "$BCP -n " . ( $ordr{'mgc'} - 1 ) . " -s 1 -e " . ( $ordr{'mgc'} - 1 ) . " | ";
$line .= "$MERGE -n " . ( $ordr{'mgc'} - 2 ) . " -s 0 -N 0 $gendir/${base}.p_b0 | ";
$line .= "$B2MC -m " . ( $ordr{'mgc'} - 1 ) . " -a $fw > $gendir/${base}.p_mgc";
shell($line);
}
sub shell($) {
my ($command) = @_;
my ($exit);
$exit = system($command);
if ( $exit / 256 != 0 ) {
die "Error in $command\n";
}
}
| sankar-mukherjee/DBM-TGP-F0 | MLSA/mlsa.pl | Perl | apache-2.0 | 8,470 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2017] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
=pod
=head1 NAME
Bio::EnsEMBL::Pipeline::RunnableDB::ProteinAnnotation::PIRSF
=head1 SYNOPSIS
my $seg = Bio::EnsEMBL::Pipeline::RunnableDB::ProteinAnnotation::PIRSF->
new ( -db => $db,
-input_id => $input_id,
-analysis => $analysis,
);
$seg->fetch_input; # gets sequence from DB
$seg->run;
$seg->write_output; # writes features to to DB
=head1 DESCRIPTION
This object wraps Bio::EnsEMBL::Pipeline::Runnable::Hmmpfam
to add functionality to read and write to databases in
a Pfam-specific way.
=head1 CONTACT
=cut
package Bio::EnsEMBL::Analysis::RunnableDB::ProteinAnnotation::PIRSF;
use warnings ;
use strict;
use vars qw(@ISA);
use Bio::EnsEMBL::Analysis::RunnableDB::ProteinAnnotation;
use Bio::EnsEMBL::Analysis::Runnable::ProteinAnnotation::PIRSF;
@ISA = qw(Bio::EnsEMBL::Analysis::RunnableDB::ProteinAnnotation);
sub fetch_input {
my ($self) = @_;
$self->SUPER::fetch_input;
my $run = Bio::EnsEMBL::Analysis::Runnable::ProteinAnnotation::PIRSF->
new(-query => $self->query,
-analysis => $self->analysis,
-database => $self->analysis->db_file,
%{$self->parameters_hash}
);
$self->runnable($run);
}
| james-monkeyshines/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/RunnableDB/ProteinAnnotation/PIRSF.pm | Perl | apache-2.0 | 2,024 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::protocols::smtp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_simple);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
%{$self->{modes}} = (
'login' => 'apps::protocols::smtp::mode::login',
'message' => 'apps::protocols::smtp::mode::message',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check a SMTP server.
=cut
| wilfriedcomte/centreon-plugins | apps/protocols/smtp/plugin.pm | Perl | apache-2.0 | 1,352 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::juniper::common::junos::mode::cpurouting;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', },
"critical:s" => { name => 'critical', },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_jnxOperatingDescr = '.1.3.6.1.4.1.2636.3.1.13.1.5';
my $oid_jnxOperatingCPU = '.1.3.6.1.4.1.2636.3.1.13.1.8';
my $oid_jnxOperating1MinLoadAvg = '.1.3.6.1.4.1.2636.3.1.13.1.20';
my $oid_jnxOperating5MinLoadAvg = '.1.3.6.1.4.1.2636.3.1.13.1.21';
my $oid_jnxOperating15MinLoadAvg = '.1.3.6.1.4.1.2636.3.1.13.1.22';
my $result = $self->{snmp}->get_table(oid => $oid_jnxOperatingDescr, nothing_quit => 1);
my $routing_engine_find = 0;
my @oids_routing_engine = ();
foreach my $oid (keys %$result) {
if ($result->{$oid} =~ /routing/i) {
$routing_engine_find = 1;
push @oids_routing_engine, $oid;
}
}
if ($routing_engine_find == 0) {
$self->{output}->add_option_msg(short_msg => "Cannot find operating with 'routing' in description.");
$self->{output}->option_exit();
}
my $multiple = 0;
if (scalar(@oids_routing_engine) > 1) {
$multiple = 1;
$self->{output}->output_add(severity => 'OK',
short_msg => sprintf("All CPU(s) average usages are ok"));
}
$self->{snmp}->load(oids => [$oid_jnxOperatingCPU, $oid_jnxOperating1MinLoadAvg, $oid_jnxOperating5MinLoadAvg, $oid_jnxOperating15MinLoadAvg],
instances => \@oids_routing_engine,
instance_regexp => "^" . $oid_jnxOperatingDescr . '\.(.+)');
my $result2 = $self->{snmp}->get_leef();
foreach my $oid_routing_engine (@oids_routing_engine) {
$oid_routing_engine =~ /^$oid_jnxOperatingDescr\.(.+)/;
my $instance = $1;
my $description = $result->{$oid_jnxOperatingDescr . '.' . $instance};
my $cpu_usage = $result2->{$oid_jnxOperatingCPU . '.' . $instance};
my $cpu_load1 = $result2->{$oid_jnxOperating1MinLoadAvg . '.' . $instance};
my $cpu_load5 = $result2->{$oid_jnxOperating5MinLoadAvg . '.' . $instance};
my $cpu_load15 = $result2->{$oid_jnxOperating15MinLoadAvg . '.' . $instance};
my $exit_code = $self->{perfdata}->threshold_check(value => $cpu_usage,
threshold => [ { label => 'critical', exit_litteral => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(long_msg => sprintf("CPU(s) '%s' average usage is: %s%%", $description, $cpu_usage));
if ($multiple == 0 || !$self->{output}->is_status(value => $exit_code, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit_code,
short_msg => sprintf("CPU(s) '%s' average usage is: %s%%", $description, $cpu_usage));
}
my $extra_label = '';
$extra_label = '_' . $description if ($multiple == 1);
$self->{output}->perfdata_add(label => 'cpu' . $extra_label, unit => '%',
value => $cpu_usage,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0, max => 100);
$self->{output}->perfdata_add(label => 'load1' . $extra_label,
value => $cpu_load1,
min => 0);
$self->{output}->perfdata_add(label => 'load5' . $extra_label,
value => $cpu_load5,
min => 0);
$self->{output}->perfdata_add(label => 'load15' . $extra_label,
value => $cpu_load15,
min => 0);
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check CPU Usage of routing engine.
=over 8
=item B<--warning>
Threshold warning in percent.
=item B<--critical>
Threshold critical in percent.
=back
=cut
| wilfriedcomte/centreon-plugins | network/juniper/common/junos/mode/cpurouting.pm | Perl | apache-2.0 | 6,186 |
=head1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
# EnsEMBL module for Bio::EnsEMBL::Variation::Utils::Sequence
#
#
=head1 NAME
Bio::EnsEMBL::Variation::Utils::VEP - Methods used by the Variant Effect Predictor
=head1 SYNOPSIS
use Bio::EnsEMBL::Variation::Utils::VEP qw(configure);
my $config = configure();
=head1 METHODS
=cut
use strict;
use warnings;
package Bio::EnsEMBL::Variation::Utils::VEP;
# module list
use Getopt::Long;
use FileHandle;
use File::Path qw(mkpath);
use Storable qw(nstore_fd fd_retrieve freeze thaw);
use Scalar::Util qw(weaken);
use Digest::MD5 qw(md5_hex);
use IO::Socket;
use IO::Select;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Variation::VariationFeature;
use Bio::EnsEMBL::Variation::DBSQL::VariationFeatureAdaptor;
use Bio::EnsEMBL::Variation::Utils::VariationEffect qw(MAX_DISTANCE_FROM_TRANSCRIPT overlap);
use Bio::EnsEMBL::Utils::Sequence qw(reverse_comp);
use Bio::EnsEMBL::Variation::Utils::Sequence qw(unambiguity_code SO_variation_class);
use Bio::EnsEMBL::Variation::Utils::EnsEMBL2GFF3;
use Bio::EnsEMBL::Variation::StructuralVariationFeature;
use Bio::EnsEMBL::Variation::DBSQL::StructuralVariationFeatureAdaptor;
use Bio::EnsEMBL::Variation::TranscriptStructuralVariation;
# we need to manually include all these modules for caching to work
use Bio::EnsEMBL::CoordSystem;
use Bio::EnsEMBL::Transcript;
use Bio::EnsEMBL::Translation;
use Bio::EnsEMBL::Exon;
use Bio::EnsEMBL::ProteinFeature;
use Bio::EnsEMBL::Analysis;
use Bio::EnsEMBL::DBSQL::GeneAdaptor;
use Bio::EnsEMBL::DBSQL::SliceAdaptor;
use Bio::EnsEMBL::DBSQL::TranslationAdaptor;
use Bio::EnsEMBL::DBSQL::TranscriptAdaptor;
use Bio::EnsEMBL::DBSQL::MetaContainer;
use Bio::EnsEMBL::DBSQL::CoordSystemAdaptor;
use Exporter;
use vars qw(@ISA @EXPORT_OK);
@ISA = qw(Exporter);
@EXPORT_OK = qw(
&detect_format
&parse_line
&vf_to_consequences
&validate_vf
&read_cache_info
&dump_adaptor_cache
&load_dumped_adaptor_cache
&load_dumped_variation_cache
&get_all_consequences
&get_slice
&build_slice_cache
&build_full_cache
®ions_from_hash
&get_time
&debug
&convert_to_vcf
&progress
&end_progress
@REG_FEAT_TYPES
@OUTPUT_COLS
%COL_DESCS
@VEP_WEB_CONFIG
%FILTER_SHORTCUTS
);
our @OUTPUT_COLS = qw(
Uploaded_variation
Location
Allele
Gene
Feature
Feature_type
Consequence
cDNA_position
CDS_position
Protein_position
Amino_acids
Codons
Existing_variation
Extra
);
our %COL_DESCS = (
'Uploaded_variation' => 'Identifier of uploaded variant',
'ID' => 'Identifier of uploaded variant',
'Location' => 'Location of variant in standard coordinate format (chr:start or chr:start-end)',
'Allele' => 'The variant allele used to calculate the consequence',
'Gene' => 'Ensembl stable ID of affected gene',
'Feature' => 'Ensembl stable ID of feature',
'Feature_type' => 'Type of feature - Transcript, RegulatoryFeature or MotifFeature',
'Consequence' => 'Consequence type',
'cDNA_position' => 'Relative position of base pair in cDNA sequence',
'CDS_position' => 'Relative position of base pair in coding sequence',
'Protein_position' => 'Relative position of amino acid in protein',
'Amino_acids' => 'Reference and variant amino acids',
'Codons' => 'Reference and variant codon sequence',
'Existing_variation' => 'Identifier(s) of co-located known variants',
'CANONICAL' => 'Indicates if transcript is canonical for this gene',
'CCDS' => 'Indicates if transcript is a CCDS transcript',
'SYMBOL' => 'Gene symbol (e.g. HGNC)',
'SYMBOL_SOURCE' => 'Source of gene symbol',
'ENSP' => 'Ensembl protein identifer',
'HGVSc' => 'HGVS coding sequence name',
'HGVSp' => 'HGVS protein sequence name',
'SIFT' => 'SIFT prediction and/or score',
'PolyPhen' => 'PolyPhen prediction and/or score',
'EXON' => 'Exon number(s) / total',
'INTRON' => 'Intron number(s) / total',
'DOMAINS' => 'The source and identifer of any overlapping protein domains',
'MOTIF_NAME' => 'The source and identifier of a transcription factor binding profile (TFBP) aligned at this position',
'MOTIF_POS' => 'The relative position of the variation in the aligned TFBP',
'HIGH_INF_POS' => 'A flag indicating if the variant falls in a high information position of the TFBP',
'MOTIF_SCORE_CHANGE' => 'The difference in motif score of the reference and variant sequences for the TFBP',
'CELL_TYPE' => 'List of cell types and classifications for regulatory feature',
'IND' => 'Individual name',
'ZYG' => 'Zygosity of individual genotype at this locus',
'SV' => 'IDs of overlapping structural variants',
'FREQS' => 'Frequencies of overlapping variants used in filtering',
'GMAF' => 'Minor allele and frequency of existing variant in 1000 Genomes Phase 1 combined population',
'AFR_MAF' => 'Frequency of existing variant in 1000 Genomes Phase 1 combined African population',
'AMR_MAF' => 'Frequency of existing variant in 1000 Genomes Phase 1 combined American population',
'ASN_MAF' => 'Frequency of existing variant in 1000 Genomes Phase 1 combined Asian population',
'EUR_MAF' => 'Frequency of existing variant in 1000 Genomes Phase 1 combined European population',
'AA_MAF' => 'Frequency of existing variant in NHLBI-ESP African American population',
'EA_MAF' => 'Frequency of existing variant in NHLBI-ESP European American population',
'DISTANCE' => 'Shortest distance from variant to transcript',
'CLIN_SIG' => 'Clinical significance of variant from dbSNP',
'BIOTYPE' => 'Biotype of transcript',
'PUBMED' => 'Pubmed ID(s) of publications that cite existing variant',
'ALLELE_NUM' => 'Allele number from input; 0 is reference, 1 is first alternate etc',
'STRAND' => 'Strand of the feature (1/-1)',
);
our @REG_FEAT_TYPES = qw(
RegulatoryFeature
MotifFeature
);
our @VEP_WEB_CONFIG = qw(
format
check_existing
coding_only
core_type
symbol
protein
hgvs
terms
check_frequency
freq_filter
freq_gt_lt
freq_freq
freq_pop
filter_common
sift
polyphen
regulatory
);
our @VAR_CACHE_COLS = qw(
variation_name
failed
start
end
allele_string
strand
minor_allele
minor_allele_freq
);
our %FILTER_SHORTCUTS = (
upstream => {
'5KB_upstream_variant' => 1,
'2KB_upstream_variant' => 1,
},
downstream => {
'5KB_downstream_variant' => 1,
'2KB_downstream_variant' => 1,
'500B_downstream_variant' => 1,
},
utr => {
'5_prime_UTR_variant' => 1,
'3_prime_UTR_variant' => 1,
},
splice => {
splice_donor_variant => 1,
splice_acceptor_variant => 1,
splice_region_variant => 1,
},
coding_change => {
stop_lost => 1,
stop_gained => 1,
missense_variant => 1,
frameshift_variant => 1,
inframe_insertion => 1,
inframe_deletion => 1,
},
regulatory => {
regulatory_region_variant => 1,
TF_binding_site_variant => 1,
},
);
# parses a line of input, returns VF object(s)
sub parse_line {
my $config = shift;
my $line = shift;
# find out file format - will only do this on first line
if(!defined($config->{format}) || (defined($config->{format}) && $config->{format} eq 'guess')) {
$config->{format} = &detect_format($line);
debug("Detected format of input file as ", $config->{format}) unless defined($config->{quiet});
# HGVS and ID formats need DB
die("ERROR: Can't use ".uc($config->{format})." format in offline mode") if $config->{format} =~ /id|hgvs/ && defined($config->{offline});
# force certain options if format is VEP output
if($config->{format} eq 'vep') {
$config->{no_consequence} = 1;
delete $config->{regulatory};
debug("Forcing no consequence calculation") unless defined($config->{quiet});
}
}
# check that format is vcf when using --individual
die("ERROR: --individual only compatible with VCF input files\n") if defined($config->{individual}) && $config->{format} ne 'vcf';
my $parse_method = 'parse_'.$config->{format};
$parse_method =~ s/vep_//;
my $method_ref = \&$parse_method;
my $vfs = &$method_ref($config, $line);
$vfs = add_lrg_mappings($config, $vfs) if defined($config->{lrg});
return $vfs;
}
# sub-routine to detect format of input
sub detect_format {
my $line = shift;
my @data = split /\s+/, $line;
# HGVS: ENST00000285667.3:c.1047_1048insC
if (
scalar @data == 1 &&
$data[0] =~ /^([^\:]+)\:.*?([cgmrp]?)\.?([\*\-0-9]+.*)$/i
) {
return 'hgvs';
}
# variant identifier: rs123456
elsif (
scalar @data == 1
) {
return 'id';
}
# VCF: 20 14370 rs6054257 G A 29 0 NS=58;DP=258;AF=0.786;DB;H2 GT:GQ:DP:HQ
elsif (
$data[0] =~ /(chr)?\w+/ &&
$data[1] =~ /^\d+$/ &&
$data[3] =~ /^[ACGTN\-\.]+$/i &&
$data[4] && $data[4] =~ /^([\.ACGTN\-]+\,?)+|(\<[A-Z]+\>)$/i
) {
return 'vcf';
}
# pileup: chr1 60 T A
elsif (
$data[0] =~ /(chr)?\w+/ &&
$data[1] =~ /^\d+$/ &&
$data[2] =~ /^[\*ACGTN-]+$/i &&
$data[3] =~ /^[\*ACGTNRYSWKM\+\/-]+$/i
) {
return 'pileup';
}
# ensembl: 20 14370 14370 A/G +
elsif (
$data[0] =~ /\w+/ &&
$data[1] =~ /^\d+$/ &&
$data[2] =~ /^\d+$/ &&
$data[3] =~ /(ins|dup|del)|([ACGTN-]+\/[ACGTN-]+)/i
) {
return 'ensembl';
}
# vep output: ID 1:142849179 - - - - INTERGENIC
elsif (
$data[0] =~ /\w+/ &&
$data[1] =~ /^\w+?\:\d+(\-\d+)*$/ &&
scalar @data == 14
) {
return 'vep';
}
else {
die("ERROR: Could not detect input file format\n");
}
}
# parse a line of Ensembl format input into a variation feature object
sub parse_ensembl {
my $config = shift;
my $line = shift;
my ($chr, $start, $end, $allele_string, $strand, $var_name) = split /\s+/, $line;
my $vf;
# sv?
if($allele_string !~ /\//) {
my $so_term;
# convert to SO term
my %terms = (
INS => 'insertion',
DEL => 'deletion',
TDUP => 'tandem_duplication',
DUP => 'duplication'
);
$so_term = defined $terms{$allele_string} ? $terms{$allele_string} : $allele_string;
$vf = Bio::EnsEMBL::Variation::StructuralVariationFeature->new_fast({
start => $start,
end => $end,
strand => $strand,
adaptor => $config->{svfa},
variation_name => $var_name,
chr => $chr,
class_SO_term => $so_term,
});
}
# normal vf
else {
$vf = Bio::EnsEMBL::Variation::VariationFeature->new_fast({
start => $start,
end => $end,
allele_string => $allele_string,
strand => $strand,
map_weight => 1,
adaptor => $config->{vfa},
variation_name => $var_name,
chr => $chr,
});
}
return [$vf];
}
# parse a line of VCF input into a variation feature object
sub parse_vcf {
my $config = shift;
my $line = shift;
my @data = split /\s+/, $line;
# non-variant
my $non_variant = 0;
if($data[4] eq '.') {
if(defined($config->{allow_non_variant})) {
$non_variant = 1;
}
else {
return [];
}
}
# get relevant data
my ($chr, $start, $end, $ref, $alt) = ($data[0], $data[1], $data[1], $data[3], $data[4]);
# some VCF files have a GRCh37 pos defined in GP flag in INFO column
# if user has requested, we can use that as the position instead
if(defined $config->{gp}) {
$chr = undef;
$start = undef;
foreach my $pair(split /\;/, $data[7]) {
my ($key, $value) = split /\=/, $pair;
if($key eq 'GP') {
($chr, $start) = split /\:/, $value;
$end = $start;
}
}
unless(defined($chr) and defined($start)) {
warn "No GP flag found in INFO column" unless defined $config->{quiet};
return [];
}
}
# adjust end coord
$end += (length($ref) - 1);
# structural variation
if((defined($data[7]) && $data[7] =~ /SVTYPE/) || $alt =~ /\<|\[|\]|\>/) {
# parse INFO field
my %info = ();
foreach my $bit(split /\;/, $data[7]) {
my ($key, $value) = split /\=/, $bit;
$info{$key} = $value;
}
# like indels, SVs have the base before included for reference
$start++;
# work out the end coord
if(defined($info{END})) {
$end = $info{END};
}
elsif(defined($info{SVLEN})) {
$end = $start + abs($info{SVLEN}) - 1;
}
# check for imprecise breakpoints
my ($min_start, $max_start, $min_end, $max_end);
if(defined($info{CIPOS})) {
my ($low, $high) = split /\,/, $info{CIPOS};
$min_start = $start + $low;
$max_start = $start + $high;
}
if(defined($info{CIEND})) {
my ($low, $high) = split /\,/, $info{CIEND};
$min_end = $end + $low;
$max_end = $end + $high;
}
# get type
my $type;
if($alt =~ /\<|\[|\]|\>/) {
$type = $alt;
$type =~ s/\<|\>//g;
$type =~ s/\:.+//g;
if($start >= $end && $type =~ /del/i) {
warn "WARNING: VCF line on line ".$config->{line_number}." looks incomplete, skipping:\n$line\n";
return [];
}
}
else {
$type = $info{SVTYPE};
}
my $so_term;
if(defined($type)) {
# convert to SO term
my %terms = (
INS => 'insertion',
DEL => 'deletion',
TDUP => 'tandem_duplication',
DUP => 'duplication'
);
$so_term = defined $terms{$type} ? $terms{$type} : $type;
}
my $svf = Bio::EnsEMBL::Variation::StructuralVariationFeature->new_fast({
start => $start,
inner_start => $max_start,
outer_start => $min_start,
end => $end,
inner_end => $min_end,
outer_end => $max_end,
strand => 1,
adaptor => $config->{svfa},
variation_name => $data[2] eq '.' ? undef : $data[2],
chr => $chr,
class_SO_term => $so_term,
});
return [$svf];
}
# normal variation
else {
# find out if any of the alt alleles make this an insertion or a deletion
my ($is_indel, $is_sub, $ins_count, $total_count);
foreach my $alt_allele(split /\,/, $alt) {
$is_indel = 1 if $alt_allele =~ /D|I/;
$is_indel = 1 if length($alt_allele) != length($ref);
$is_sub = 1 if length($alt_allele) == length($ref);
$ins_count++ if length($alt_allele) > length($ref);
$total_count++;
}
# multiple alt alleles?
if($alt =~ /\,/) {
if($is_indel) {
my @alts;
if($alt =~ /D|I/) {
foreach my $alt_allele(split /\,/, $alt) {
# deletion (VCF <4)
if($alt_allele =~ /D/) {
push @alts, '-';
}
elsif($alt_allele =~ /I/) {
$alt_allele =~ s/^I//g;
push @alts, $alt_allele;
}
}
}
else {
$ref = substr($ref, 1) || '-';
$start++;
foreach my $alt_allele(split /\,/, $alt) {
$alt_allele = substr($alt_allele, 1);
$alt_allele = '-' if $alt_allele eq '';
push @alts, $alt_allele;
}
}
$alt = join "/", @alts;
}
else {
# for substitutions we just need to replace ',' with '/' in $alt
$alt =~ s/\,/\//g;
}
}
elsif($is_indel) {
# deletion (VCF <4)
if($alt =~ /D/) {
my $num_deleted = $alt;
$num_deleted =~ s/\D+//g;
$end += $num_deleted - 1;
$alt = "-";
# get ref seq from slice
my $tmp_chr = $chr;
$tmp_chr =~ s/chr//ig;
my $slice = get_slice($config, $tmp_chr);
$ref .= $slice ? $slice->sub_Slice($start + 1, $start + $num_deleted - 1)->seq : ("N" x ($num_deleted - 1)) unless length($ref) > 1 || $start == $end;
}
# insertion (VCF <4)
elsif($alt =~ /I/) {
$ref = '-';
$alt =~ s/^I//g;
$start++;
}
# insertion or deletion (VCF 4+)
elsif(substr($ref, 0, 1) eq substr($alt, 0, 1)) {
# chop off first base
$ref = substr($ref, 1) || '-';
$alt = substr($alt, 1) || '-';
$start++;
}
}
# create VF object
my $vf = Bio::EnsEMBL::Variation::VariationFeature->new_fast({
start => $start,
end => $end,
allele_string => $non_variant ? $ref : $ref.'/'.$alt,
strand => 1,
map_weight => 1,
adaptor => $config->{vfa},
variation_name => $data[2] eq '.' ? undef : $data[2],
chr => $chr,
});
# flag as non-variant
$vf->{non_variant} = 1 if $non_variant;
# individuals?
if(defined($config->{individual})) {
my @alleles = split /\//, $ref.'/'.$alt;
my @return;
foreach my $ind(keys %{$config->{ind_cols}}) {
# get alleles present in this individual
my @bits;
my $gt = (split /\:/, $data[$config->{ind_cols}->{$ind}])[0];
my $phased = ($gt =~ /\|/ ? 1 : 0);
foreach my $bit(split /\||\/|\\/, $gt) {
push @bits, $alleles[$bit] unless $bit eq '.';
}
# shallow copy VF
my $vf_copy = { %$vf };
bless $vf_copy, ref($vf);
# get non-refs
my %non_ref = map {$_ => 1} grep {$_ ne $ref} @bits;
# construct allele_string
if(scalar keys %non_ref) {
$vf_copy->{allele_string} = $ref."/".(join "/", keys %non_ref);
}
else {
$vf_copy->{allele_string} = $ref;
$vf_copy->{hom_ref} = 1;
if(defined($config->{process_ref_homs})) {
$vf_copy->{allele_string} .= "/".$ref;
}
else {
$vf_copy->{non_variant} = 1;
}
}
# store phasing info
$vf_copy->{phased} = defined($config->{phased} ? 1 : $phased);
# store GT
$vf_copy->{genotype} = \@bits;
# store individual name
$vf_copy->{individual} = $ind;
push @return, $vf_copy;
}
return \@return;
}
else {
return [$vf];
}
}
}
# parse a line of pileup input into variation feature objects
sub parse_pileup {
my $config = shift;
my $line = shift;
my @data = split /\s+/, $line;
# pileup can produce more than one VF per line
my @return;
# normal variant
if($data[2] ne "*"){
my $var;
if($data[3] =~ /^[A|C|G|T]$/) {
$var = $data[3];
}
else {
($var = (unambiguity_code($data[3]) || $data[3])) =~ s/$data[2]//ig;
}
for my $alt(split //, $var){
push @return, Bio::EnsEMBL::Variation::VariationFeature->new_fast({
start => $data[1],
end => $data[1],
allele_string => $data[2].'/'.$alt,
strand => 1,
map_weight => 1,
adaptor => $config->{vfa},
chr => $data[0],
});
}
}
# in/del
else {
my %tmp_hash = map {$_ => 1} split /\//, $data[3];
my @genotype = keys %tmp_hash;
foreach my $allele(@genotype){
if(substr($allele,0,1) eq "+") { #ins
push @return, Bio::EnsEMBL::Variation::VariationFeature->new_fast({
start => $data[1] + 1,
end => $data[1],
allele_string => '-/'.substr($allele, 1),
strand => 1,
map_weight => 1,
adaptor => $config->{vfa},
chr => $data[0],
});
}
elsif(substr($allele,0,1) eq "-"){ #del
push @return, Bio::EnsEMBL::Variation::VariationFeature->new_fast({
start => $data[1] + 1,
end => $data[1] + length(substr($allele, 1)),
allele_string => substr($allele, 1).'/-',
strand => 1,
map_weight => 1,
adaptor => $config->{vfa},
chr => $data[0],
});
}
elsif($allele ne "*"){
warn("WARNING: invalid pileup indel genotype: $line\n") unless defined $config->{quiet};
}
}
}
return \@return;
}
# parse a line of HGVS input into a variation feature object
sub parse_hgvs {
my $config = shift;
my $line = shift;
my $vf;
# not all hgvs notations are supported yet, so we have to wrap it in an eval
eval { $vf = $config->{vfa}->fetch_by_hgvs_notation($line, $config->{sa}, $config->{ta}) };
if((!defined($vf) || (defined $@ && length($@) > 1)) && defined($config->{coordinator})) {
eval { $vf = $config->{vfa}->fetch_by_hgvs_notation($line, $config->{ofsa}, $config->{ofta}) };
}
if(!defined($vf) || (defined $@ && length($@) > 1)) {
warn("WARNING: Unable to parse HGVS notation \'$line\'\n$@") unless defined $config->{quiet};
return [];
}
# get whole chromosome slice
my $slice = $vf->slice->adaptor->fetch_by_region($vf->slice->coord_system->name, $vf->slice->seq_region_name);
$vf = $vf->transfer($slice);
# name it after the HGVS
$vf->{variation_name} = $line;
# add chr attrib
$vf->{chr} = $vf->slice->seq_region_name;
return [$vf];
}
# parse a variation identifier e.g. a dbSNP rsID
sub parse_id {
my $config = shift;
my $line = shift;
my $v_obj = $config->{va}->fetch_by_name($line);
return [] unless defined $v_obj;
my @vfs = @{$v_obj->get_all_VariationFeatures};
for(@vfs) {
delete $_->{dbID};
delete $_->{overlap_consequences};
$_->{chr} = $_->seq_region_name;
$config->{slice_cache}->{$_->{chr}} = $_->slice;
}
return \@vfs;
}
# parse a line of VEP output
sub parse_vep {
my $config = shift;
my $line = shift;
my @data = split /\t/, $line;
my ($chr, $start, $end) = split /\:|\-/, $data[1];
$end ||= $start;
# might get allele string from ID
my $allele_string;
if($data[0] =~ /^\w\_\w\_\w$/) {
my @split = split /\_/, $data[0];
$allele_string = $split[-1] if $split[-1] =~ /[ACGTN-]+\/[ACGTN-]+/;
}
$allele_string ||= 'N/'.($data[6] =~ /intergenic/ ? 'N' : $data[2]);
my $vf = Bio::EnsEMBL::Variation::VariationFeature->new_fast({
start => $start,
end => $end,
allele_string => $allele_string,
strand => 1,
map_weight => 1,
adaptor => $config->{vfa},
chr => $chr,
variation_name => $data[0],
});
return [$vf];
}
# converts to VCF format
sub convert_to_vcf {
my $config = shift;
my $vf = shift;
# look for imbalance in the allele string
if($vf->isa('Bio::EnsEMBL::Variation::VariationFeature')) {
my %allele_lengths;
my @alleles = split /\//, $vf->allele_string;
map {reverse_comp(\$_)} @alleles if $vf->strand < 0;
foreach my $allele(@alleles) {
$allele =~ s/\-//g;
$allele_lengths{length($allele)} = 1;
}
# in/del/unbalanced
if(scalar keys %allele_lengths > 1) {
# we need the ref base before the variation
# default to N in case we can't get it
my $prev_base = 'N';
unless(defined($config->{cache})) {
my $slice = $vf->slice->sub_Slice($vf->start - 1, $vf->start - 1);
$prev_base = $slice->seq if defined($slice);
}
for my $i(0..$#alleles) {
$alleles[$i] =~ s/\-//g;
$alleles[$i] = $prev_base.$alleles[$i];
}
return [
$vf->{chr} || $vf->seq_region_name,
$vf->start - 1,
$vf->variation_name,
shift @alleles,
(join ",", @alleles),
'.', '.', '.'
];
}
# balanced sub
else {
return [
$vf->{chr} || $vf->seq_region_name,
$vf->start,
$vf->variation_name,
shift @alleles,
(join ",", @alleles),
'.', '.', '.'
];
}
}
# SV
else {
# convert to SO term
my %terms = (
'insertion' => 'INS',
'deletion' => 'DEL',
'tandem_duplication' => 'TDUP',
'duplication' => 'DUP'
);
my $alt = '<'.($terms{$vf->class_SO_term} || $vf->class_SO_term).'>';
return [
$vf->{chr} || $vf->seq_region_name,
$vf->start,
$vf->variation_name,
'.',
$alt,
'.', '.', '.'
];
}
}
# tries to map a VF to the LRG coordinate system
sub add_lrg_mappings {
my $config = shift;
my $vfs = shift;
my @new_vfs;
foreach my $vf(@$vfs) {
# add the unmapped VF to the array
push @new_vfs, $vf;
# make sure the VF has an attached slice
$vf->{slice} ||= get_slice($config, $vf->{chr}, undef, 1);
next unless defined($vf->{slice});
# transform LRG <-> chromosome
my $new_vf = $vf->transform($vf->{slice}->coord_system->name eq 'lrg' ? 'chromosome' : 'lrg');
# add it to the array if transformation worked
if(defined($new_vf)) {
# update new VF's chr entry
$new_vf->{chr} = $new_vf->seq_region_name;
push @new_vfs, $new_vf;
}
}
return \@new_vfs;
}
# wrapper for whole_genome_fetch and vf_to_consequences
# takes config and a listref of VFs, returns listref of line hashes for printing
sub get_all_consequences {
my $config = shift;
my $listref = shift;
if ($config->{extra}) {
eval "use Plugin qw($config);"
}
# check we can use MIME::Base64
if(defined($config->{fork})) {
eval q{ use MIME::Base64; };
if($@) {
debug("WARNING: Unable to load MIME::Base64, forking disabled") unless defined($config->{quiet});
delete $config->{fork};
}
}
my (@temp_array, @return, %by_pid, @pids);
my $active_forks = 0;
if(defined($config->{fork})) {
my $total_size = scalar @$listref;
my $done_vars = 0;
# this variable stores child process references
my $sel = IO::Select->new;
debug("Calculating consequences") unless defined($config->{quiet});
progress($config, 0, 1);
# loop while variants in $listref or forks running
while (scalar @$listref or $active_forks ) {
# only spawn new forks if we have space
if ($active_forks <= $config->{fork} ) {
my $delta = 0.5;
my $minForkSize = 5;
my $maxForkSize = 200;
my $numLines = scalar @$listref;
my $forkSize = int($numLines / ($config->{fork} + $delta*$config->{fork}) + $minForkSize ) + 1;
$forkSize = $maxForkSize if $forkSize > $maxForkSize;
while($active_forks <= $config->{fork} && scalar @$listref) {
my $tmp_vf = shift @$listref;
push @temp_array, $tmp_vf;
# fork
if(scalar @temp_array >= $forkSize || scalar @$listref == 0) {
my ($child,$parent);
socketpair($child, $parent, AF_UNIX, SOCK_STREAM, PF_UNSPEC) or die "ERROR: Failed to open socketpair: $!";
$child->autoflush(1);
$parent->autoflush(1);
$sel->add($child);
my $pid = fork;
if(!defined($pid)) {
die("WARNING: Failed to fork -") unless defined($config->{quiet});
last;
}
elsif($pid) {
push @pids, $pid;
$active_forks++;
@temp_array = ();
}
elsif($pid == 0) {
$config->{forked} = $$;
$config->{quiet} = 1;
$config->{stats} = {};
*PARENT = $parent;
*STDERR = *PARENT;
#die("TEST DEATH\n") if rand() < 0.1;
my $cons = vf_list_to_cons($config, \@temp_array);
# what we're doing here is sending a serialised hash of the
# results through to the parent process through the socket.
# This is then thawed by the parent process.
# $$, or the PID, is added so that the input can be sorted
# back into the correct order for output
print PARENT $$." ".encode_base64(freeze($_), "\t")."\n" for @$cons;
# some plugins may cache stuff, check for this and try and
# reconstitute it into parent's plugin cache
foreach my $plugin(@{$config->{plugins}}) {
next unless defined($plugin->{has_cache});
# delete unnecessary stuff and stuff that can't be serialised
delete $plugin->{$_} for qw(config feature_types variant_feature_types version feature_types_wanted variant_feature_types_wanted params);
print PARENT $$." PLUGIN ".ref($plugin)." ".encode_base64(freeze($plugin), "\t")."\n";
}
# tell parent about stats
print PARENT $$." STATS ".encode_base64(freeze($config->{stats}), "\t")."\n" if defined($config->{stats});
# we need to tell the parent this child is finished
# otherwise it keeps listening
print PARENT "DONE $$\n";
exit(0);
}
}
}
}
# read child input
while(my @ready = $sel->can_read()) {
my $no_read = 1;
foreach my $fh(@ready) {
$no_read++;
my $line = $fh->getline();
next unless defined($line) && $line;
$no_read = 0;
# child finished
if($line =~ /^DONE/) {
$sel->remove($fh);
$fh->close;
$active_forks--;
last;
}
# variant finished / progress indicator
elsif($line =~ /^BUMP/) {
$line =~ m/BUMP ?(\d*)/;
$done_vars += $1 || 1;
progress($config, $done_vars, $total_size);
}
# output
elsif($line =~ /^\-?\d+ /) {
# plugin
if($line =~ /^\-?\d+ PLUGIN/) {
$line =~ m/^(\-?\d+) PLUGIN (\w+) /;
my ($pid, $plugin) = ($1, $2);
# remove the PID
$line =~ s/^\-?\d+ PLUGIN \w+ //;
chomp $line;
my $tmp = thaw(decode_base64($line));
next unless defined($plugin);
# copy data to parent plugin
my ($parent_plugin) = grep {ref($line) eq $plugin} @{$config->{plugins}};
next unless defined($parent_plugin);
merge_hashes($parent_plugin, $tmp);
}
# filtered count
elsif($line =~ /^\-?\d+ STATS/) {
$line =~ s/^\-?\d+\sSTATS\s//;
my $tmp = thaw(decode_base64($line));
$config->{stats} ||= {};
# special case chr lengths
my %chr_lengths;
if(defined($config->{stats}->{chr_lengths})) {
merge_hashes($config->{stats}->{chr_lengths}, $tmp->{chr_lengths});
%chr_lengths = %{$config->{stats}->{chr_lengths}};
}
merge_hashes($config->{stats}, $tmp, 1);
$config->{stats}->{chr_lengths} = \%chr_lengths;
}
else {
# grab the PID
$line =~ m/^(\-?\d+)\s/;
my $pid = $1;
die "ERROR: Could not parse forked PID from line $line" unless defined($pid);
# remove the PID
$line =~ s/^\-?\d+\s//;
chomp $line;
# decode and thaw "output" from forked process
push @{$by_pid{$pid}}, thaw(decode_base64($line));
}
}
# something's wrong
else {
print STDERR "\n$line\n";
}
}
# read-through detected, DIE
die("\nERROR: Forked process(es) died\n") if $no_read;
last if $active_forks < $config->{fork};
}
}
end_progress($config);
debug("Writing output") unless defined($config->{quiet});
waitpid($_, 0) for @pids;
# add the sorted data to the return array
push @return, @{$by_pid{$_} || []} for @pids;
}
# no forking
else {
push @return, @{vf_list_to_cons($config, $listref)};
}
if(defined($config->{debug})) {
eval q{use Devel::Size qw(total_size)};
my $mem = memory();
my $tot;
$tot += $_ for @$mem;
if($tot > 1000000) {
$tot = sprintf("%.2fGB", $tot / (1024 * 1024));
}
elsif($tot > 1000) {
$tot = sprintf("%.2fMB", $tot / 1024);
}
my $mem_diff = mem_diff($config);
debug(
"LINES ", $config->{line_number},
"\tMEMORY $tot ", (join " ", @$mem),
"\tDIFF ", (join " ", @$mem_diff),
"\tCONFIG ", total_size($config)
);
#exit(0) if grep {$_ < 0} @$mem_diff;
}
return \@return;
}
sub vf_list_to_cons {
my $config = shift;
my $listref = shift;
# initialize caches
$config->{$_.'_cache'} ||= {} for qw(tr rf slice);
# build hash
my %vf_hash;
push @{$vf_hash{$_->{chr}}{int($_->{start} / $config->{chunk_size})}{$_->{start}}}, $_ for @$listref;
# get chr list
my @chrs = sort {natural_sort($a,$b)} keys %{{map {$_->{chr} => 1} @$listref}};
# get non-variants
my @non_variants = grep {$_->{non_variant}} @$listref;
# check existing VFs
if(defined($config->{'cache_var_type'}) && $config->{'cache_var_type'} eq 'tabix') {
check_existing_tabix($config, $listref) if defined($config->{check_existing});
}
else {
check_existing_hash($config, \%vf_hash) if defined($config->{check_existing});
}
my $new_listref = [];
# skip any based on frequency checks?
if(defined($config->{check_frequency})) {
foreach my $vf(@$listref) {
if(defined($vf->{existing}) && scalar @{$vf->{existing}}) {
my @passed = grep {$_} map {check_frequencies($config, $_)} reverse @{$vf->{existing}};
push @$new_listref, $vf if scalar @passed == scalar @{$vf->{existing}};
$vf->{freqs} = $config->{filtered_freqs};
}
else {
push @$new_listref, $vf;
}
}
}
else {
$new_listref = $listref;
}
# if using consequence filter, we're not interested in how many remain yet
$config->{stats}->{filter_count} += scalar @$new_listref unless defined($config->{filter});
# get overlapping SVs
&check_svs_hash($config, \%vf_hash) if defined($config->{check_svs});
# remake hash without non-variants
%vf_hash = ();
push @{$vf_hash{$_->{chr}}{int($_->{start} / $config->{chunk_size})}{$_->{start}}}, $_ for grep {!defined($_->{non_variant})} @$new_listref;
# get regions
my $regions = ®ions_from_hash($config, \%vf_hash);
my $trim_regions = $regions;
# prune caches
if(!defined($config->{forked})) {
prune_cache($config, $config->{tr_cache}, $regions, $config->{loaded_tr});
prune_cache($config, $config->{rf_cache}, $regions, $config->{loaded_rf});
}
my $fetched_tr_count = 0;
$fetched_tr_count = fetch_transcripts($config, $regions, $trim_regions)
unless defined($config->{no_consequences});
my $fetched_rf_count = 0;
$fetched_rf_count = fetch_regfeats($config, $regions, $trim_regions)
if defined($config->{regulatory})
&& !defined($config->{no_consequences});
my @return;
foreach my $chr(@chrs) {
my $finished_vfs = whole_genome_fetch($config, $chr, \%vf_hash);
# non-variants?
if(scalar @non_variants) {
push @$finished_vfs, grep {$_->{chr} eq $chr} @non_variants;
# need to re-sort
@$finished_vfs = sort {$a->{start} <=> $b->{start} || $a->{end} <=> $b->{end}} @$finished_vfs;
}
debug("Calculating consequences") unless defined($config->{quiet});
my $vf_count = scalar @$finished_vfs;
my $vf_counter = 0;
while(my $vf = shift @$finished_vfs) {
progress($config, $vf_counter++, $vf_count) unless $vf_count == 1;
my $filter_ok = 1;
# filtered output
if(defined($config->{filter})) {
$filter_ok = filter_by_consequence($config, $vf);
$config->{stats}->{filter_count} += $filter_ok;
}
# skip filtered lines
next unless $filter_ok;
# original output
if(defined($config->{original})) {
push @return, \$vf->{_line};
}
# GVF output
elsif(defined($config->{gvf})) {
$vf->source("User");
$config->{gvf_id} ||= 1;
# get custom annotation
my $custom_annotation = defined($config->{custom}) ? get_custom_annotation($config, $vf) : {};
$custom_annotation->{ID} = $config->{gvf_id}++;
my $tmp = $vf->to_gvf(
include_consequences => defined($config->{no_consequences}) ? 0 : 1,
extra_attrs => $custom_annotation,
);
push @return, \$tmp;
}
# VCF output
elsif(defined($config->{vcf})) {
# convert to VCF, otherwise get line
my $line = $config->{format} eq 'vcf' ? [split /\s+/, $vf->{_line}] : convert_to_vcf($config, $vf);
if(!defined($line->[7]) || $line->[7] eq '.') {
$line->[7] = '';
}
# nuke existing CSQ field
if($line->[7] =~ /CSQ\=/ && !defined($config->{keep_csq})) {
$line->[7] =~ s/CSQ\=\S+?\;?(\s|$)/$1/;
}
# get all the lines the normal way
# and process them into VCF-compatible string
my $string = 'CSQ=';
foreach my $line(@{vf_to_consequences($config, $vf)}) {
# use the field list (can be user-defined by setting --fields)
for my $col(@{$config->{fields}}) {
# skip fields already represented in the VCF
next if $col eq 'Uploaded_variation' or $col eq 'Location' or $col eq 'Extra';
# search for data in main line hash as well as extra field
my $data = defined $line->{$col} ? $line->{$col} : $line->{Extra}->{$col};
reverse_comp(\$data) if $vf->strand < 0 and $col eq 'Allele' and $config->{format} ne 'vcf';
# "-" means null for everything except the Allele field (confusing...)
$data = undef if defined($data) and $data eq '-' and $col ne 'Allele';
$data =~ s/\,/\&/g if defined $data;
$string .= defined($data) ? $data : '';
$string .= '|';
}
$string =~ s/\|$//;
$string .= ',';
}
$string =~ s/\,$//;
if(!defined($config->{no_consequences}) && $string ne 'CSQ=') {
$line->[7] .= ($line->[7] ? ';' : '').$string;
}
# get custom annotation
if(defined($config->{custom}) && scalar @{$config->{custom}}) {
my $custom_annotation = get_custom_annotation($config, $vf);
foreach my $key(keys %{$custom_annotation}) {
$line->[7] .= ($line->[7] ? ';' : '').$key.'='.$custom_annotation->{$key};
}
}
$_ ||= '.' for @$line;
my $tmp = join "\t", @$line;
push @return, \$tmp;
}
# no consequence output from vep input
elsif(defined($config->{no_consequences}) && $config->{format} eq 'vep') {
my $line = [split /\s+/, $vf->{_line}];
if($line->[13] eq '-') {
$line->[13] = '';
}
# get custom annotation
if(defined($config->{custom})) {
my $custom_annotation = get_custom_annotation($config, $vf);
foreach my $key(keys %{$custom_annotation}) {
$line->[13] .= ($line->[13] ? ';' : '').$key.'='.$custom_annotation->{$key};
}
}
my $tmp = join "\t", @$line;
push @return, \$tmp;
}
# XML output for Solr
elsif(defined($config->{solr})) {
eval q{
use CGI qw(escape);
};
foreach my $con(@{vf_to_consequences($config, $vf)}) {
my $line = "<doc>\n";
# create unique ID
$line .= sprintf(qq{ <field name="id">%s_%i_%i_%s_%s</field>\n}, $vf->{chr}, $vf->{start}, $vf->{end}, $con->{Allele} || '-', $con->{Feature} || '-');
# add proper location fields that can be indexed
$line .= sprintf(qq{ <field name="chr">%s</field>\n}, $vf->{chr});
$line .= sprintf(qq{ <field name="start">%s</field>\n}, $vf->{start});
$line .= sprintf(qq{ <field name="end">%s</field>\n}, $vf->{end});
foreach my $col(@{$config->{fields}}) {
# search for data in main line hash as well as extra field
my $val = defined $con->{$col} ? $con->{$col} : $con->{Extra}->{$col};
next unless defined($val) && $val ne '-';
# some have multiple values
foreach my $data(split(',', $val)) {
# split SIFT and PolyPhen
if($col eq 'SIFT' || $col eq 'PolyPhen') {
if($data =~ m/([a-z\_]+)?\(?([\d\.]+)?\)?/i) {
my ($pred, $score) = ($1, $2);
$line .= sprintf(qq{ <field name="%s">%s</field>\n}, $col.'_pred', $pred) if $pred;
$line .= sprintf(qq{ <field name="%s">%s</field>\n}, $col.'_score', $score) if defined($score);
}
}
# GMAF
elsif($col eq 'GMAF') {
if($data =~ m/([\d\.]+)/) {
$line .= sprintf(qq{ <field name="%s">%s</field>\n}, $col, $1) if defined($1);
}
}
else {
$line .= sprintf(qq{ <field name="%s">%s</field>\n}, $col, escape($data)) if defined($data);
}
}
}
$line .= "</doc>\n";
push @return, \$line;
}
}
# normal output
else {
push @return, @{vf_to_consequences($config, $vf)};
}
print PARENT "BUMP\n" if defined($config->{forked}) && !defined($config->{no_progress});
}
end_progress($config) unless scalar @$listref == 1;
}
return \@return;
}
sub natural_sort {
my ($a, $b) = @_;
if($a =~ /^[0-9]+$/ && $b =~ /^[0-9]+$/) {
return $a <=> $b;
}
else {
return $a cmp $b;
}
}
# takes a variation feature and returns ready to print consequence information
sub vf_to_consequences {
my $config = shift;
my $vf = shift;
# force empty hash into object's transcript_variations if undefined from whole_genome_fetch
# this will stop the API trying to go off and fill it again
if(defined $config->{whole_genome}) {
$vf->{transcript_variations} ||= {};
$vf->{regulation_variations}->{$_} ||= [] for (@REG_FEAT_TYPES, 'ExternalFeature');
}
# pos stats
$config->{stats}->{chr}->{$vf->{chr}}->{1e6 * int($vf->start / 1e6)}++;
$config->{stats}->{var_cons}->{$vf->display_consequence}++;
# use a different method for SVs
return svf_to_consequences($config, $vf) if $vf->isa('Bio::EnsEMBL::Variation::StructuralVariationFeature');
my @return = ();
# get allele nums
if(defined($config->{allele_number})) {
my @alleles = split /\//, $vf->allele_string || '';
%{$vf->{_allele_nums}} = map {$alleles[$_] => $_} (0..$#alleles);
}
# method name stub for getting *VariationAlleles
my $allele_method = defined($config->{process_ref_homs}) ? 'get_all_' : 'get_all_alternate_';
# get stats
my $so_term = SO_variation_class($vf->allele_string, 1);
if(defined($so_term)) {
$config->{stats}->{classes}->{$so_term}++;
$config->{stats}->{allele_changes}->{$vf->allele_string}++ if $so_term eq 'SNV';
}
# stats
$config->{stats}->{existing}++ if defined($vf->{existing}) && scalar @{$vf->{existing}};
# prefetch intergenic variation
# pass a true argument to get_IntergenicVariation to stop it doing a reference allele check
# (to stay consistent with the rest of the VEP)
$vf->get_IntergenicVariation(1);
# only most severe or summary?
if(defined($config->{most_severe}) || defined($config->{summary})) {
my $line = init_line($config, $vf);
if(defined($config->{summary})) {
$line->{Consequence} = join ",", @{$vf->consequence_type($config->{terms}) || $vf->consequence_type};
}
else {
$line->{Consequence} = $vf->display_consequence($config->{terms}) || $vf->display_consequence;
}
$config->{stats}->{consequences}->{$_}++ for split(',', $line->{Consequence});
push @return, $line;
}
# otherwise do normal consequence processing
else {
my $vfos;
my $method = $allele_method.'VariationFeatureOverlapAlleles';
# include regulatory stuff?
if(!defined $config->{coding_only} && defined $config->{regulatory}) {
$vfos = $vf->get_all_VariationFeatureOverlaps;
}
# otherwise just get transcript & intergenic ones
else {
@$vfos = grep {defined($_)} (
@{$vf->get_all_TranscriptVariations},
$vf->get_IntergenicVariation
);
}
# grep out non-coding?
@$vfos = grep {$_->can('affects_cds') && $_->affects_cds} @$vfos if defined($config->{coding_only});
# get alleles
my @vfoas = map {@{$_->$method}} @{$vfos};
# pick worst?
@vfoas = (pick_worst_vfoa($config, \@vfoas)) if defined($config->{pick});
# pick per gene?
@vfoas = @{pick_vfoa_per_gene($config, \@vfoas)} if defined($config->{per_gene});
# process remaining
push @return, map {vfoa_to_line($config, $_)} grep {defined($_)} @vfoas;
}
return \@return;
}
# picks the worst of a list of VariationFeatureOverlapAlleles
# VFOAs are ordered by a heirarchy:
# 1: canonical
# 2: biotype (protein coding favoured)
# 3: consequence rank
# 4: transcript length
sub pick_worst_vfoa {
my $config = shift;
my $vfoas = shift;
my @vfoa_info;
my %ranks = map {$_->SO_term => $_->rank} values %Bio::EnsEMBL::Variation::Utils::Constants::OVERLAP_CONSEQUENCES;
return $vfoas->[0] if scalar @$vfoas == 1;
foreach my $vfoa(@$vfoas) {
my @ocs = sort {$a->rank <=> $b->rank} @{$vfoa->get_all_OverlapConsequences};
next unless scalar @ocs;
# create a hash of info for this VFOA that will be used to rank it
my $info = {
vfoa => $vfoa,
rank => $ranks{$ocs[0]->SO_term},
# these will only be used by transcript types, default to 0 for others
# to avoid writing an else clause below
canonical => 0,
length => 0,
biotype => 0
};
if($vfoa->isa('Bio::EnsEMBL::Variation::TranscriptVariationAllele')) {
my $tr = $vfoa->feature;
$info->{canonical} = $tr->is_canonical ? 1 : 0;
$info->{length} = $tr->length();
$info->{biotype} = $tr->biotype eq 'protein_coding' ? 1 : 0;
}
push @vfoa_info, $info;
}
if(scalar @vfoa_info) {
my $picked = (sort {
$b->{canonical} <=> $a->{canonical} ||
$b->{biotype} <=> $a->{biotype} ||
$a->{rank} <=> $b->{rank} ||
$b->{length} <=> $a->{length}
} @vfoa_info)[0]->{vfoa};
return $picked;
}
return undef;
}
# pick one vfoa per gene
# allow non-transcript types to pass through
sub pick_vfoa_per_gene {
my $config = shift;
my $vfoas = shift;
my @return;
my @tvas;
# pick out TVAs
foreach my $vfoa(@$vfoas) {
if($vfoa->isa('Bio::EnsEMBL::Variation::TranscriptVariationAllele')) {
push @tvas, $vfoa;
}
else {
push @return, $vfoa;
}
}
# sort the TVA objects into a hash by gene
my %by_gene;
foreach my $tva(@tvas) {
my $gene = $tva->transcript->{_gene_stable_id} || $config->{ga}->fetch_by_transcript_stable_id($tva->transcript->stable_id)->stable_id;
push @{$by_gene{$gene}}, $tva;
}
foreach my $gene(keys %by_gene) {
push @return, grep {defined($_)} pick_worst_vfoa($config, $by_gene{$gene});
}
return \@return;
}
# get consequences for a structural variation feature
sub svf_to_consequences {
my $config = shift;
my $svf = shift;
my @return = ();
# stats
$config->{stats}->{classes}->{$svf->{class_SO_term}}++;
my $term_method = $config->{terms}.'_term';
if(defined $config->{whole_genome}) {
$svf->{transcript_structural_variations} ||= [];
$svf->{regulation_structural_variations}->{$_} ||= [] for @REG_FEAT_TYPES;
}
if ((my $iv = $svf->get_IntergenicStructuralVariation(1)) && !defined($config->{no_intergenic})) {
for my $iva (@{ $iv->get_all_alternate_IntergenicStructuralVariationAlleles }) {
my $line = init_line($config, $svf);
$line->{Allele} = '-';
my $cons = $iva->get_all_OverlapConsequences->[0];
$line->{Consequence} = $cons->$term_method || $cons->SO_term;
$config->{stats}->{consequences}->{$cons->$term_method || $cons->SO_term}++;
$line = run_plugins($iva, $line, $config);
push @return, $line;
}
}
foreach my $svo(@{$svf->get_all_StructuralVariationOverlaps}) {
next if $svo->isa('Bio::EnsEMBL::Variation::IntergenicStructuralVariation');
my $feature = $svo->feature;
# get feature type
my $feature_type = (split '::', ref($feature))[-1];
my $base_line = {
Feature_type => $feature_type,
Feature => $feature->stable_id,
Allele => $svf->class_SO_term,
};
if($svo->isa('Bio::EnsEMBL::Variation::BaseTranscriptVariation')) {
$base_line->{cDNA_position} = format_coords($svo->cdna_start, $svo->cdna_end).
(defined($config->{total_length}) ? '/'.$feature->length : '');
$base_line->{CDS_position} = format_coords($svo->cds_start, $svo->cds_end).
(defined($config->{total_length}) && $feature->{_variation_effect_feature_cache}->{translateable_seq} ?
'/'.length($feature->{_variation_effect_feature_cache}->{translateable_seq}) : ''
);
$base_line->{Protein_position} = format_coords($svo->translation_start, $svo->translation_end).
(defined($config->{total_length}) && $feature->{_variation_effect_feature_cache}->{peptide} ?
'/'.length($feature->{_variation_effect_feature_cache}->{peptide}) : ''
);
}
foreach my $svoa(@{$svo->get_all_StructuralVariationOverlapAlleles}) {
my $line = init_line($config, $svf, $base_line);
$line->{Consequence} = join ",",
#map {s/feature/$feature_type/e; $_}
map {$_->$term_method}
sort {$a->rank <=> $b->rank}
@{$svoa->get_all_OverlapConsequences};
map {$config->{stats}->{consequences}->{$_->$term_method}++} @{$svoa->get_all_OverlapConsequences};
# work out overlap amounts
my $overlap_start = (sort {$a <=> $b} ($svf->start, $feature->start))[-1];
my $overlap_end = (sort {$a <=> $b} ($svf->end, $feature->end))[0];
my $overlap_length = ($overlap_end - $overlap_start) + 1;
my $overlap_pc = 100 * ($overlap_length / (($feature->end - $feature->start) + 1));
$line->{Extra}->{OverlapBP} = $overlap_length if $overlap_length > 0;
$line->{Extra}->{OverlapPC} = sprintf("%.2f", $overlap_pc) if $overlap_pc > 0;
add_extra_fields($config, $line, $svoa);
push @return, $line;
}
}
return \@return;
}
# run all of the configured plugins on a VariationFeatureOverlapAllele instance
# and store any results in the provided line hash
sub run_plugins {
my ($bvfoa, $line_hash, $config) = @_;
my $skip_line = 0;
for my $plugin (@{ $config->{plugins} || [] }) {
# check that this plugin is interested in this type of variation feature
if ($plugin->check_variant_feature_type(ref $bvfoa->base_variation_feature)) {
# check that this plugin is interested in this type of feature
if ($plugin->check_feature_type(ref $bvfoa->feature || 'Intergenic')) {
eval {
my $plugin_results = $plugin->run($bvfoa, $line_hash);
if (defined $plugin_results) {
if (ref $plugin_results eq 'HASH') {
for my $key (keys %$plugin_results) {
$line_hash->{Extra}->{$key} = $plugin_results->{$key};
}
}
else {
warn "Plugin '".(ref $plugin)."' did not return a hashref, output ignored!\n";
}
}
else {
# if a plugin returns undef, that means it want to filter out this line
$skip_line = 1;
}
};
if ($@) {
warn "Plugin '".(ref $plugin)."' went wrong: $@";
}
# there's no point running any other plugins if we're filtering this line,
# because the first plugin to skip the line wins, so we might as well last
# out of the loop now and avoid any unnecessary computation
last if $skip_line;
}
}
}
return $skip_line ? undef : $line_hash;
}
# turn a generic VariationFeatureOverlapAllele into a line hash
sub vfoa_to_line {
my $config = shift;
my $vfoa = shift;
# stats
my $term_method = $config->{terms}.'_term';
map {$config->{stats}->{consequences}->{$_->$term_method || $_->SO_term}++} @{$vfoa->get_all_OverlapConsequences};
my $line;
if($vfoa->isa('Bio::EnsEMBL::Variation::TranscriptVariationAllele')) {
$line = tva_to_line($config, $vfoa);
}
elsif($vfoa->isa('Bio::EnsEMBL::Variation::RegulatoryFeatureVariationAllele')) {
$line = rfva_to_line($config, $vfoa);
}
elsif($vfoa->isa('Bio::EnsEMBL::Variation::MotifFeatureVariationAllele')) {
$line = mfva_to_line($config, $vfoa);
}
elsif($vfoa->isa('Bio::EnsEMBL::Variation::IntergenicVariationAllele')) {
$line = iva_to_line($config, $vfoa);
}
else {
return undef;
}
# add extra fields
$line = add_extra_fields($config, $line, $vfoa);
# run plugins
$line = run_plugins($vfoa, $line, $config);
return $line;
}
# process IntergenicVariationAllele
sub iva_to_line {
my $config = shift;
my $iva = shift;
my $line = init_line($config, $iva->variation_feature);
$line->{Allele} = $iva->variation_feature_seq;
my $cons = $iva->get_all_OverlapConsequences->[0];
# method name for consequence terms
my $term_method = $config->{terms}.'_term';
$line->{Consequence} = $cons->$term_method || $cons->SO_term;
return $line;
}
# process TranscriptVariationAllele
sub tva_to_line {
my $config = shift;
my $tva = shift;
my $tv = $tva->transcript_variation;
my $t = $tv->transcript;
# method name for consequence terms
my $term_method = $config->{terms}.'_term';
my $base_line = {
Feature_type => 'Transcript',
Feature => (defined $t ? $t->stable_id : undef),
cDNA_position => format_coords($tv->cdna_start, $tv->cdna_end).
(defined($config->{total_length}) ? '/'.$t->length : ''),
CDS_position => format_coords($tv->cds_start, $tv->cds_end).
(defined($config->{total_length}) && $t->{_variation_effect_feature_cache}->{translateable_seq} ?
'/'.length($t->{_variation_effect_feature_cache}->{translateable_seq}) : ''
),
Protein_position => format_coords($tv->translation_start, $tv->translation_end).
(defined($config->{total_length}) && $t->{_variation_effect_feature_cache}->{peptide} ?
'/'.length($t->{_variation_effect_feature_cache}->{peptide}) : ''
),
Allele => $tva->variation_feature_seq,
Amino_acids => $tva->pep_allele_string,
Codons => $tva->display_codon_allele_string,
Consequence => join ",", map {$_->$term_method || $_->SO_term} sort {$a->rank <=> $b->rank} @{$tva->get_all_OverlapConsequences},
};
if(defined($tv->translation_start)) {
$config->{stats}->{protein_pos}->{int(10 * ($tv->translation_start / ($t->{_variation_effect_feature_cache}->{peptide} ? length($t->{_variation_effect_feature_cache}->{peptide}) : $t->translation->length)))}++;
}
my $line = init_line($config, $tva->variation_feature, $base_line);
# HGVS
if(defined $config->{hgvs}) {
my $hgvs_t = $tva->hgvs_transcript;
my $hgvs_p = $tva->hgvs_protein;
# URI encode "="
$hgvs_p =~ s/\=/\%3D/g if $hgvs_p && !defined($config->{no_escape});
$line->{Extra}->{HGVSc} = $hgvs_t if $hgvs_t;
$line->{Extra}->{HGVSp} = $hgvs_p if $hgvs_p;
}
foreach my $tool (qw(SIFT PolyPhen)) {
my $lc_tool = lc($tool);
if (my $opt = $config->{$lc_tool}) {
my $want_pred = $opt =~ /^p/i;
my $want_score = $opt =~ /^s/i;
my $want_both = $opt =~ /^b/i;
if ($want_both) {
$want_pred = 1;
$want_score = 1;
}
next unless $want_pred || $want_score;
my $pred_meth = $lc_tool.'_prediction';
my $score_meth = $lc_tool.'_score';
my $analysis = $config->{polyphen_analysis} if $lc_tool eq 'polyphen';
my $pred = $tva->$pred_meth($analysis);
if($pred) {
if ($want_pred) {
$pred =~ s/\s+/\_/;
$line->{Extra}->{$tool} = $pred;
}
if ($want_score) {
my $score = $tva->$score_meth($analysis);
if(defined $score) {
if($want_pred) {
$line->{Extra}->{$tool} .= "($score)";
}
else {
$line->{Extra}->{$tool} = $score;
}
}
}
}
# update stats
$config->{stats}->{$tool}->{$tva->$pred_meth}++ if $tva->$pred_meth;
}
}
return $line;
}
# process RegulatoryFeatureVariationAllele
sub rfva_to_line {
my $config = shift;
my $rfva = shift;
# method name for consequence terms
my $term_method = $config->{terms}.'_term';
# method name stub for getting *VariationAlleles
my $allele_method = defined($config->{process_ref_homs}) ? 'get_all_' : 'get_all_alternate_';
my $rf = $rfva->regulatory_feature;
my $base_line = {
Feature_type => 'RegulatoryFeature',
Feature => $rf->stable_id,
};
if(defined($config->{cell_type}) && scalar(@{$config->{cell_type}})) {
$base_line->{Extra}->{CELL_TYPE} = join ",",
map {$_.':'.$rf->{cell_types}->{$_}}
grep {$rf->{cell_types}->{$_}}
@{$config->{cell_type}};
$base_line->{Extra}->{CELL_TYPE} =~ s/\s+/\_/g;
}
# this currently always returns 'RegulatoryFeature', so we ignore it for now
#$base_line->{Extra}->{REG_FEAT_TYPE} = $rf->feature_type->name;
my $method = $allele_method.'RegulatoryFeatureVariationAlleles';
my $line = init_line($config, $rfva->variation_feature, $base_line);
$line->{Allele} = $rfva->variation_feature_seq;
$line->{Consequence} = join ',',
map { $_->$term_method || $_->SO_term }
@{ $rfva->get_all_OverlapConsequences };
$line = run_plugins($rfva, $line, $config);
return $line;
}
# process MotifFeatureVariationAllele
sub mfva_to_line {
my $config = shift;
my $mfva = shift;
# method name for consequence terms
my $term_method = $config->{terms}.'_term';
# method name stub for getting *VariationAlleles
my $allele_method = defined($config->{process_ref_homs}) ? 'get_all_' : 'get_all_alternate_';
my $mf = $mfva->motif_feature;
# check that the motif has a binding matrix, if not there's not
# much we can do so don't return anything
return undef unless defined $mf->binding_matrix;
my $matrix = $mf->binding_matrix->description.' '.$mf->display_label;
$matrix =~ s/\s+/\_/g;
my $base_line = {
Feature_type => 'MotifFeature',
Feature => $mf->binding_matrix->name,
Extra => {
MOTIF_NAME => $matrix,
}
};
if(defined($config->{cell_type}) && scalar(@{$config->{cell_type}})) {
$base_line->{Extra}->{CELL_TYPE} = join ",",
map {$_.':'.$mf->{cell_types}->{$_}}
grep {$mf->{cell_types}->{$_}}
@{$config->{cell_type}};
$base_line->{Extra}->{CELL_TYPE} =~ s/\s+/\_/g;
}
my $method = $allele_method.'MotifFeatureVariationAlleles';
my $line = init_line($config, $mfva->variation_feature, $base_line);
$line->{Extra}->{MOTIF_POS} = $mfva->motif_start if defined $mfva->motif_start;
$line->{Extra}->{HIGH_INF_POS} = ($mfva->in_informative_position ? 'Y' : 'N');
my $delta = $mfva->motif_score_delta if $mfva->variation_feature_seq =~ /^[ACGT]+$/;
$line->{Extra}->{MOTIF_SCORE_CHANGE} = sprintf("%.3f", $delta) if defined $delta;
$line->{Allele} = $mfva->variation_feature_seq;
$line->{Consequence} = join ',',
map { $_->$term_method || $_->SO_term }
@{ $mfva->get_all_OverlapConsequences };
return $line;
}
sub add_extra_fields {
my $config = shift;
my $line = shift;
my $bvfoa = shift;
# overlapping SVs
if(defined $config->{check_svs} && defined $bvfoa->base_variation_feature->{overlapping_svs}) {
$line->{Extra}->{SV} = $bvfoa->base_variation_feature->{overlapping_svs};
}
# allele number
if(defined($config->{allele_number})) {
$line->{Extra}->{ALLELE_NUM} = $bvfoa->variation_feature->{_allele_nums}->{$bvfoa->variation_feature_seq} || '?' if $bvfoa->variation_feature->{_allele_nums};
}
# strand
if(my $f = $bvfoa->feature) {
my $strand = $f->seq_region_strand;
# regfeats have an undefined strand (0); recommended not to report this
$line->{Extra}->{STRAND} = $strand if $strand;
}
# add transcript-specific fields
$line = add_extra_fields_transcript($config, $line, $bvfoa) if $bvfoa->isa('Bio::EnsEMBL::Variation::BaseTranscriptVariationAllele');
# stats
$config->{stats}->{gene}->{$line->{Gene}}++ if defined($line->{Gene});
$config->{stats}->{lc($line->{Feature_type})}->{$line->{Feature}}++ if defined($line->{Feature_type}) && defined($line->{Feature});
return $line;
}
sub add_extra_fields_transcript {
my $config = shift;
my $line = shift;
my $tva = shift;
my $tv = $tva->base_variation_feature_overlap;
my $tr = $tva->transcript;
# get gene
my $gene;
$line->{Gene} = $tr->{_gene_stable_id};
if(!defined($line->{Gene})) {
$gene = $config->{ga}->fetch_by_transcript_stable_id($tr->stable_id);
$line->{Gene} = $gene ? $gene->stable_id : '-';
}
# exon/intron numbers
if ($config->{numbers}) {
$line->{Extra}->{EXON} = $tv->exon_number if defined $tv->exon_number;
$line->{Extra}->{INTRON} = $tv->intron_number if defined $tv->intron_number;
}
if ($config->{domains}) {
my $feats = $tv->get_overlapping_ProteinFeatures;
my @strings;
for my $feat (@$feats) {
my $label = $feat->analysis->display_label.':'.$feat->hseqname;
# replace any special characters
$label =~ s/[\s;=]/_/g;
push @strings, $label;
}
$line->{Extra}->{DOMAINS} = join ',', @strings if @strings;
}
# distance to transcript
if($line->{Consequence} =~ /(up|down)stream/i) {
$line->{Extra}->{DISTANCE} = $tv->distance_to_transcript;
}
# gene symbol
if(defined $config->{symbol}) {
my ($symbol, $source);
$symbol = $tr->{_gene_symbol} || $tr->{_gene_hgnc};
$source = $tr->{_gene_symbol_source};
if(!defined($symbol) && defined($config->{database})) {
if(!defined($gene)) {
$gene = $config->{ga}->fetch_by_transcript_stable_id($tr->stable_id);
}
if(my $xref = $gene->display_xref) {
$symbol = $xref->display_id;
$source = $xref->dbname;
}
else {
my ($entry) = @{$gene->get_all_DBEntries('RefSeq_gene_name')};
$symbol = $entry->display_id if $entry;
}
}
$symbol = undef if defined($symbol) && $symbol eq '-';
$source = undef if defined($source) && $source eq '-';
$line->{Extra}->{SYMBOL} = $symbol if defined($symbol);
$line->{Extra}->{SYMBOL_SOURCE} = $source if defined($source);
}
# CCDS
if(defined($config->{ccds})) {
my $ccds = $tr->{_ccds};
if(!defined($ccds)) {
my @entries = grep {$_->database eq 'CCDS'} @{$tr->get_all_DBEntries};
$ccds = $entries[0]->display_id if scalar @entries;
}
$ccds = undef if defined($ccds) && $ccds eq '-';
$line->{Extra}->{CCDS} = $ccds if defined($ccds);
}
# refseq xref
if(defined($config->{xref_refseq})) {
my $refseq = $tr->{_refseq};
if(!defined($refseq)) {
my @entries = grep {$_->database eq 'RefSeq_mRNA'} @{$tr->get_all_DBEntries};
if(scalar @entries) {
$refseq = join ",", map {$_->display_id."-".$_->database} @entries;
}
}
$refseq = undef if defined($refseq) && $refseq eq '-';
$line->{Extra}->{RefSeq} = $refseq if defined($refseq);
}
# protein ID
if(defined $config->{protein}) {
my $protein = $tr->{_protein};
if(!defined($protein)) {
$protein = $tr->translation->stable_id if defined($tr->translation);
}
$protein = undef if defined($protein) && $protein eq '-';
$line->{Extra}->{ENSP} = $protein if defined($protein);
}
# canonical transcript
if(defined $config->{canonical}) {
$line->{Extra}->{CANONICAL} = 'YES' if $tr->is_canonical;
}
# biotype
if(defined $config->{biotype}) {
$line->{Extra}->{BIOTYPE} = $tr->biotype;
}
return $line;
}
# initialize a line hash
sub init_line {
my $config = shift;
my $vf = shift;
my $base_line = shift;
my $line = {
Uploaded_variation => $vf->variation_name,
Location => ($vf->{chr} || $vf->seq_region_name).':'.format_coords($vf->start, $vf->end),
Existing_variation => defined $vf->{existing} && scalar @{$vf->{existing}} ? join ",", map {$_->{variation_name}} @{$vf->{existing}} : '-',
Extra => {},
};
# add custom info
if(defined($config->{custom}) && scalar @{$config->{custom}}) {
# merge the custom hash with the extra hash
my $custom = get_custom_annotation($config, $vf);
for my $key (keys %$custom) {
$line->{Extra}->{$key} = $custom->{$key};
}
}
# individual?
if(defined($vf->{individual})) {
$line->{Extra}->{IND} = $vf->{individual};
# zygosity
if(defined($vf->{genotype})) {
my %unique = map {$_ => 1} @{$vf->{genotype}};
$line->{Extra}->{ZYG} = (scalar keys %unique > 1 ? 'HET' : 'HOM').(defined($vf->{hom_ref}) ? 'REF' : '');
}
}
# frequencies?
$line->{Extra}->{FREQS} = join ",", @{$vf->{freqs}} if defined($vf->{freqs});
# gmaf?
if(defined($config->{gmaf}) && defined($vf->{existing}) && scalar @{$vf->{existing}}) {
my @gmafs =
map {$_->{minor_allele}.':'.$_->{minor_allele_freq}}
grep {defined($_->{minor_allele}) && $_->{minor_allele_freq} =~ /\d/}
@{$vf->{existing}};
$line->{Extra}->{GMAF} = join ",", @gmafs if scalar @gmafs;
}
# existing var stuff
if(defined($vf->{existing}) && scalar @{$vf->{existing}}) {
# 1KG MAFs?
if(defined($config->{maf_1kg})) {
my @pops = qw(AFR AMR ASN EUR);
foreach my $var(@{$vf->{existing}}) {
foreach my $pop(grep {defined($var->{$_})} @pops) {
my $freq = $var->{$pop};
$freq = '-' unless defined($freq);
$line->{Extra}->{$pop.'_MAF'} =
exists($line->{Extra}->{$pop.'_MAF'}) ?
$line->{Extra}->{$pop.'_MAF'}.','.$freq :
$freq;
}
}
}
# ESP MAFs?
if(defined($config->{maf_esp})) {
my @pops = qw(AA EA);
foreach my $var(@{$vf->{existing}}) {
foreach my $pop(grep {defined($var->{$_})} @pops) {
my $freq = $var->{$pop};
$freq = '-' unless defined($freq);
$line->{Extra}->{$pop.'_MAF'} =
exists($line->{Extra}->{$pop.'_MAF'}) ?
$line->{Extra}->{$pop.'_MAF'}.','.$freq :
$freq;
}
}
}
# clin sig and pubmed?
foreach my $var(@{$vf->{existing}}) {
if(defined($var->{clin_sig}) && $var->{clin_sig}) {
$line->{Extra}->{CLIN_SIG} =
exists($line->{Extra}->{CLIN_SIG}) ?
$line->{Extra}->{CLIN_SIG}.','.$var->{clin_sig} :
$var->{clin_sig};
}
if(defined($config->{pubmed}) && defined($var->{pubmed}) && $var->{pubmed}) {
$line->{Extra}->{PUBMED} =
exists($line->{Extra}->{PUBMED}) ?
$line->{Extra}->{PUBMED}.','.$var->{pubmed} :
$var->{pubmed};
}
}
}
# copy entries from base_line
merge_hashes($line, $base_line) if defined($base_line);
return $line;
}
# get custom annotation for a single VF
sub get_custom_annotation {
my $config = shift;
my $vf = shift;
my $cache = shift;
return $vf->{custom} if defined($vf->{custom});
my $annotation = {};
my $chr = $vf->{chr};
if(!defined($cache)) {
# spoof regions
my $regions;
$regions->{$chr} = [$vf->{start}.'-'.$vf->{end}];
$cache = cache_custom_annotation($config, $regions, $chr);
}
foreach my $custom(@{$config->{custom}}) {
next unless defined($cache->{$chr}->{$custom->{name}});
# exact type must match coords of variant exactly
if($custom->{type} eq 'exact') {
foreach my $feature(values %{$cache->{$chr}->{$custom->{name}}->{$vf->{start}}}) {
next unless
$feature->{chr} eq $chr &&
$feature->{start} eq $vf->{start} &&
$feature->{end} eq $vf->{end};
$annotation->{$custom->{name}} .= $feature->{name}.',';
foreach my $field(@{$custom->{fields}}) {
$annotation->{$custom->{name}."_".$field} .= $feature->{$field}.',' if defined($feature->{$field});
}
}
}
# overlap type only needs to overlap, but we need to search the whole range
elsif($custom->{type} eq 'overlap') {
foreach my $pos(keys %{$cache->{$chr}->{$custom->{name}}}) {
foreach my $feature(values %{$cache->{$chr}->{$custom->{name}}->{$pos}}) {
next unless
$feature->{chr} eq $chr &&
$feature->{end} >= $vf->{start} &&
$feature->{start} <= $vf->{end};
$annotation->{$custom->{name}} .= $feature->{name}.',';
foreach my $field(@{$custom->{fields}}) {
$annotation->{$custom->{name}."_".$field} = $feature->{$field} if defined($feature->{$field});
}
}
}
}
# trim off trailing commas
$annotation->{$custom->{name}} =~ s/\,$//g if defined($annotation->{$custom->{name}});
foreach my $field(@{$custom->{fields}}) {
$annotation->{$custom->{name}."_".$field} =~ s/\,$//g if defined($annotation->{$custom->{name}."_".$field});
}
}
return $annotation;
}
# decides whether to print a VF based on user defined consequences
sub filter_by_consequence {
my $config = shift;
my $vf = shift;
my $filters = $config->{filter};
# find it if we only have "no"s
my $only_nos = 0;
$only_nos = 1 if (sort {$a <=> $b} values %$filters)[-1] == 0;
my ($yes, $no) = (0, 0);
# get all consequences across all term types
my @types = ('SO', 'display');
my @cons;
push @cons, @{$vf->consequence_type($_)} for @types;
my $method_mod = $vf->isa('Bio::EnsEMBL::Variation::StructuralVariationFeature') ? 'Structural' : '';
# add regulatory consequences
if(defined($config->{regulatory})) {
foreach my $term_type(@types) {
my $term_method = $term_type.'_term';
my $m1 = 'get_all_RegulatoryFeature'.$method_mod.'Variations';
my $m2 = 'get_all_RegulatoryFeature'.$method_mod.'VariationAlleles';
for my $rfv (@{ $vf->$m1 }) {
for my $rfva(@{$rfv->$m2}) {
push @cons, map {$_->$term_method} @{ $rfva->get_all_OverlapConsequences };
}
}
$m1 = 'get_all_MotifFeature'.$method_mod.'Variations';
$m2 = 'get_all_MotifFeature'.$method_mod.'VariationAlleles';
for my $mfv (@{ $vf->$m1 }) {
for my $mfva(@{$mfv->$m2}) {
push @cons, map {$_->$term_method} @{ $mfva->get_all_OverlapConsequences };
}
}
}
}
foreach my $con(grep {defined($_) && defined($filters->{$_})} @cons) {
if($filters->{$con} == 1) {
$yes = 1;
}
else {
$no = 1;
}
}
# check special case, coding
if(defined($filters->{coding})) {
my $method = 'get_all_Transcript'.$method_mod.'Variations';
if(grep {$_->affects_cds} @{$vf->$method}) {
if($filters->{coding} == 1) {
$yes = 1;
}
else {
$no = 1;
}
}
}
my $ok = 0;
if($only_nos) {
$ok = 1 if !$no;
}
else {
$ok = 1 if $yes && !$no;
}
return $ok;
}
# takes VFs created from input, fixes and checks various things
sub validate_vf {
my $config = shift;
my $vf = shift;
# user specified chr skip list
return 0 if defined($config->{chr}) && !$config->{chr}->{$vf->{chr}};
# fix inputs
$vf->{chr} =~ s/^chr//ig unless $vf->{chr} =~ /^chromosome$/i;
$vf->{chr} = 'MT' if $vf->{chr} eq 'M';
$vf->{strand} ||= 1;
$vf->{strand} = ($vf->{strand} =~ /\-/ ? "-1" : "1");
# sanity checks
unless($vf->{start} =~ /^\d+$/ && $vf->{end} =~ /^\d+$/) {
warn("WARNING: Start ".$vf->{start}." or end ".$vf->{end}." coordinate invalid on line ".$config->{line_number}."\n") unless defined $config->{quiet};
return 0;
}
# structural variation?
return validate_svf($config, $vf) if $vf->isa('Bio::EnsEMBL::Variation::StructuralVariationFeature');
# uppercase allele string
$vf->{allele_string} =~ tr/[a-z]/[A-Z]/;
unless($vf->{allele_string} =~ /([ACGT-]+\/*)+/) {
warn("WARNING: Invalid allele string ".$vf->{allele_string}." on line ".$config->{line_number}." or possible parsing error\n") unless defined $config->{quiet};
return 0;
}
# insertion should have start = end + 1
if($vf->{allele_string} =~ /^\-\// && $vf->{start} != $vf->{end} + 1) {
warn(
"WARNING: Alleles look like an insertion (".
$vf->{allele_string}.
") but coordinates are not start = end + 1 (START=".
$vf->{start}.", END=".$vf->{end}.
") on line ".$config->{line_number}."\n"
) unless defined($config->{quiet});
return 0;
}
# check start <= end + 1
if($vf->{start} > $vf->{end} + 1) {
warn(
"WARNING: start > end+1 : (START=".$vf->{start}.
", END=".$vf->{end}.
") on line ".$config->{line_number}."\n"
) unless defined($config->{quiet});
return 0;
}
# check length of reference matches seq length spanned
my @alleles = split /\//, $vf->{allele_string};
my $ref_allele = shift @alleles;
my $tmp_ref_allele = $ref_allele;
$tmp_ref_allele =~ s/\-//g;
#if(($vf->{end} - $vf->{start}) + 1 != length($tmp_ref_allele)) {
# warn(
# "WARNING: Length of reference allele (".$ref_allele.
# " length ".length($tmp_ref_allele).") does not match co-ordinates ".$vf->{start}."-".$vf->{end}.
# " on line ".$config->{line_number}
# ) unless defined($config->{quiet});
# return 0;
#}
# flag as unbalanced
foreach my $allele(@alleles) {
$allele =~ s/\-//g;
$vf->{indel} = 1 unless length($allele) == length($tmp_ref_allele);
}
# check reference allele if requested
if(defined $config->{check_ref}) {
my $ok = 0;
my $slice_ref_allele;
# insertion, therefore no ref allele to check
if($ref_allele eq '-') {
$ok = 1;
}
else {
my $slice_ref = $vf->{slice}->sub_Slice($vf->{start}, $vf->{end}, $vf->{strand});
if(!defined($slice_ref)) {
warn "WARNING: Could not fetch sub-slice from ".$vf->{start}."\-".$vf->{end}."\(".$vf->{strand}."\) on line ".$config->{line_number} unless defined $config->{quiet};
}
else {
$slice_ref_allele = $slice_ref->seq;
$ok = ($slice_ref_allele eq $ref_allele ? 1 : 0);
}
}
if(!$ok) {
warn
"WARNING: Specified reference allele $ref_allele ",
"does not match Ensembl reference allele",
($slice_ref_allele ? " $slice_ref_allele" : ""),
" on line ".$config->{line_number} unless defined $config->{quiet};
return 0;
}
}
return 1;
}
# validate a structural variation
sub validate_svf {
my $config = shift;
my $svf = shift;
return 1;
}
# takes a hash of VFs and fetches consequences by pre-fetching overlapping transcripts
# from database and/or cache
sub whole_genome_fetch {
my $config = shift;
my $chr = shift;
my $vf_hash = shift;
my (%vf_done, @finished_vfs, %seen_rfs);
if(defined($config->{offline}) && !-e $config->{dir}.'/'.$chr) {
debug("No cache found for chromsome $chr") unless defined($config->{quiet});
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
push @finished_vfs, @{$vf_hash->{$chr}{$chunk}{$pos}};
}
}
return \@finished_vfs;
}
my $slice_cache = $config->{slice_cache};
$slice_cache = build_slice_cache($config, $config->{tr_cache}) unless defined($slice_cache->{$chr});
$slice_cache = build_slice_cache($config, $config->{rf_cache}) unless defined($slice_cache->{$chr});
debug("Analyzing chromosome $chr") unless defined($config->{quiet});
# custom annotations
whole_genome_fetch_custom($config, $vf_hash, $chr) if defined($config->{custom});
# split up normal variations from SVs
my ($tmp_vf_hash, @svfs);
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
foreach my $vf(@{$vf_hash->{$chr}{$chunk}{$pos}}) {
if($vf->isa('Bio::EnsEMBL::Variation::StructuralVariationFeature')) {
push @svfs, $vf;
}
else {
push @{$tmp_vf_hash->{$chr}{$chunk}{$pos}}, $vf;
}
}
}
}
$vf_hash = $tmp_vf_hash;
# transcript annotations
whole_genome_fetch_transcript($config, $vf_hash, $chr)
unless defined($config->{no_consequences});
# regulatory annotations
whole_genome_fetch_reg($config, $vf_hash, $chr)
if defined($config->{regulatory})
&& !defined($config->{no_consequences});
# structural variations
@finished_vfs = @{whole_genome_fetch_sv($config, \@svfs, $chr)}
if scalar @svfs;
# sort results into @finished_vfs array
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
# pinch slice from slice cache if we don't already have it
$_->{slice} ||= $slice_cache->{$chr} for @{$vf_hash->{$chr}{$chunk}{$pos}};
if(defined($config->{regulatory})) {
foreach my $type(@REG_FEAT_TYPES) {
$_->{regulation_variations}->{$type} ||= [] for @{$vf_hash->{$chr}{$chunk}{$pos}};
}
}
if(defined($config->{custom})) {
$_->{custom} ||= {} for @{$vf_hash->{$chr}{$chunk}{$pos}};
}
$_->{transcript_variations} ||= {} for @{$vf_hash->{$chr}{$chunk}{$pos}};
# add to final array
push @finished_vfs, @{$vf_hash->{$chr}{$chunk}{$pos}};
}
}
# sort
@finished_vfs = sort {
($a->{_line_number} || 1) <=> ($b->{_line_number} || 1) ||
$a->{start} <=> $b->{start} ||
$a->{end} <=> $b->{end}
} @finished_vfs;
# clean hash
delete $vf_hash->{$chr};
return \@finished_vfs;
}
sub whole_genome_fetch_custom {
my $config = shift;
my $vf_hash = shift;
my $chr = shift;
return unless scalar @{$config->{custom}};
# create regions based on VFs instead of chunks
my $tmp_regions;
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
foreach my $vf(@{$vf_hash->{$chr}{$chunk}{$pos}}) {
push @{$tmp_regions->{$chr}}, ($vf->{start}-1).'-'.($vf->{end}+1);
}
}
}
return unless defined($tmp_regions->{$chr});
# cache annotations
my $annotation_cache = cache_custom_annotation($config, $tmp_regions, $chr);
# count and report
my $total_annotations = 0;
$total_annotations += scalar keys %{$annotation_cache->{$chr}->{$_}} for keys %{$annotation_cache->{$chr}};
debug("Retrieved $total_annotations custom annotations (", (join ", ", map {(scalar keys %{$annotation_cache->{$chr}->{$_}}).' '.$_} keys %{$annotation_cache->{$chr}}), ")") unless defined($config->{quiet});
# compare annotations to variations in hash
debug("Analyzing custom annotations") unless defined($config->{quiet});
my $total = scalar keys %{$vf_hash->{$chr}};
my $i = 0;
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
progress($config, $i++, $total);
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
foreach my $vf(@{$vf_hash->{$chr}{$chunk}{$pos}}) {
$vf->{custom} = get_custom_annotation($config, $vf, $annotation_cache);
}
}
}
end_progress($config);
}
sub whole_genome_fetch_transcript {
my $config = shift;
my $vf_hash = shift;
my $chr = shift;
my $tr_cache = $config->{tr_cache};
my $slice_cache = $config->{slice_cache};
my $up_down_size = MAX_DISTANCE_FROM_TRANSCRIPT;
# check we have defined regions
return unless defined($vf_hash->{$chr}) && defined($tr_cache->{$chr});
# copy slice from transcript to slice cache
$slice_cache = build_slice_cache($config, $tr_cache) unless defined($slice_cache->{$chr});
debug("Analyzing variants") unless defined($config->{quiet});
my $tr_counter = 0;
my $tr_count = scalar @{$tr_cache->{$chr}};
while($tr_counter < $tr_count) {
progress($config, $tr_counter, $tr_count);
my $tr = $tr_cache->{$chr}->[$tr_counter++];
# do each overlapping VF
my $s = $tr->start - $up_down_size;
my $e = $tr->end + $up_down_size;
# get the chunks this transcript overlaps
my %chunks;
$chunks{$_} = 1 for (int($s/$config->{chunk_size})..int($e/$config->{chunk_size}));
map {delete $chunks{$_} unless defined($vf_hash->{$chr}{$_})} keys %chunks;
# pointer to previous VF
# used to tell plugins this is the last variant analysed in this transcript
my $previous_vf;
foreach my $chunk(keys %chunks) {
foreach my $vf(
grep {$_->{start} <= $e && $_->{end} >= $s}
map {@{$vf_hash->{$chr}{$chunk}{$_}}}
keys %{$vf_hash->{$chr}{$chunk}}
) {
# pinch slice from slice cache if we don't already have it
$vf->{slice} ||= $slice_cache->{$chr};
$vf->{slice} = $slice_cache->{$chr} if defined($vf->{slice}->{is_fake});
my $tv = Bio::EnsEMBL::Variation::TranscriptVariation->new(
-transcript => $tr,
-variation_feature => $vf,
-adaptor => $config->{tva},
-no_ref_check => 1,
-no_transfer => 1
);
# prefetching stuff here prevents doing loads at the
# end and makes progress reporting more useful
$tv->_prefetch_for_vep;
$vf->add_TranscriptVariation($tv);
# cache VF on the transcript if it is an unbalanced sub
push @{$tr->{indels}}, $vf if defined($vf->{indel});
if(defined($config->{individual})) {
# store VF on transcript, weaken reference to avoid circularity
push @{$tr->{vfs}->{$vf->{individual}}}, $vf;
weaken($tr->{vfs}->{$vf->{individual}}->[-1]);
delete $previous_vf->{last_in_transcript}->{$tr->stable_id};
$vf->{last_in_transcript}->{$tr->stable_id} = 1;
}
$previous_vf = $vf;
}
}
}
end_progress($config);
}
sub whole_genome_fetch_reg {
my $config = shift;
my $vf_hash = shift;
my $chr = shift;
my $rf_cache = $config->{rf_cache};
my $slice_cache = $config->{slice_cache};
foreach my $type(keys %{$rf_cache->{$chr}}) {
debug("Analyzing ".$type."s") unless defined($config->{quiet});
my $constructor = 'Bio::EnsEMBL::Variation::'.$type.'Variation';
my $rf_counter = 0;
my $rf_count = scalar @{$rf_cache->{$chr}->{$type}};
while($rf_counter < $rf_count) {
progress($config, $rf_counter, $rf_count);
my $rf = $rf_cache->{$chr}->{$type}->[$rf_counter++];
# do each overlapping VF
my $s = $rf->{start};
my $e = $rf->{end};
# get the chunks this transcript overlaps
my %chunks;
$chunks{$_} = 1 for (int($s/$config->{chunk_size})..int($e/$config->{chunk_size}));
map {delete $chunks{$_} unless defined($vf_hash->{$chr}{$_})} keys %chunks;
foreach my $chunk(keys %chunks) {
foreach my $vf(
grep {$_->{start} <= $e && $_->{end} >= $s}
map {@{$vf_hash->{$chr}{$chunk}{$_}}}
keys %{$vf_hash->{$chr}{$chunk}}
) {
push @{$vf->{regulation_variations}->{$type}}, $constructor->new(
-variation_feature => $vf,
-feature => $rf,
-no_ref_check => 1,
-no_transfer => 1
);
}
}
}
end_progress($config);
}
}
sub whole_genome_fetch_sv {
my $config = shift;
my $svfs = shift;
my $chr = shift;
my $tr_cache = $config->{tr_cache};
my $rf_cache = $config->{rf_cache};
my $slice_cache = $config->{slice_cache};
debug("Analyzing structural variations") unless defined($config->{quiet});
my($i, $total) = (0, scalar @$svfs);
my @finished_vfs;
foreach my $svf(@$svfs) {
progress($config, $i++, $total);
my %done_genes = ();
if(defined($tr_cache->{$chr})) {
foreach my $tr(grep {overlap($_->{start} - MAX_DISTANCE_FROM_TRANSCRIPT, $_->{end} + MAX_DISTANCE_FROM_TRANSCRIPT, $svf->{start}, $svf->{end})} @{$tr_cache->{$chr}}) {
my $svo = Bio::EnsEMBL::Variation::TranscriptStructuralVariation->new(
-transcript => $tr,
-structural_variation_feature => $svf,
-no_transfer => 1
);
$svf->add_TranscriptStructuralVariation($svo);
}
}
$svf->{transcript_structural_variations} ||= {};
# do regulatory features
if(defined($config->{regulatory}) && defined($rf_cache->{$chr})) {
foreach my $rf_type(qw/RegulatoryFeature/) {#keys %{$rf_cache->{$chr}}) {
foreach my $rf(grep {$_->{start} <= $svf->{end} && $_->end >= $svf->{end}} @{$rf_cache->{$chr}->{$rf_type}}) {
my $svo = Bio::EnsEMBL::Variation::StructuralVariationOverlap->new(
-feature => $rf,
-structural_variation_feature => $svf,
-no_transfer => 1
);
push @{$svf->{regulation_structural_variations}->{$rf_type}}, $svo;
}
$svf->{regulation_structural_variations}->{$rf_type} ||= [];
}
}
# sort them
#$svf->_sort_svos;
push @finished_vfs, $svf;
}
end_progress($config);
return \@finished_vfs;
}
# retrieves transcripts given region list
sub fetch_transcripts {
my $config = shift;
my $regions = shift;
my $trim_regions = shift;
my $tr_cache = $config->{tr_cache};
my $slice_cache = $config->{slice_cache};
my ($count_from_mem, $count_from_db, $count_from_cache, $count_duplicates, $count_trimmed) = (0, 0, 0, 0, 0);
my %seen_trs;
$count_from_mem = 0;
my $region_count = 0;
foreach my $chr(keys %{$regions}) {
$count_from_mem += scalar @{$tr_cache->{$chr}} if defined($tr_cache->{$chr}) && ref($tr_cache->{$chr}) eq 'ARRAY';
$region_count += scalar @{$regions->{$chr}};
}
my $counter;
debug("Reading transcript data from cache and/or database") unless defined($config->{quiet});
foreach my $chr(keys %{$regions}) {
foreach my $region(sort {(split /\-/, $a)[0] <=> (split /\-/, $b)[1]} @{$regions->{$chr}}) {
progress($config, $counter++, $region_count);
# skip regions beyond the end of the chr
next if defined($slice_cache->{$chr}) && (split /\-/, $region)[0] > $slice_cache->{$chr}->length;
next if defined($config->{loaded_tr}->{$chr}->{$region});
# force quiet so other methods don't mess up the progress bar
my $quiet = $config->{quiet};
$config->{quiet} = 1;
# try and load cache from disk if using cache
my $tmp_cache;
if(defined($config->{cache})) {
#$tmp_cache = (
# defined($config->{cache_tr_type}) && $config->{cache_tr_type} eq 'tabix' ?
# load_dumped_transcript_cache_tabix($config, $chr, $region) :
# load_dumped_transcript_cache($config, $chr, $region)
#);
$tmp_cache = load_dumped_transcript_cache($config, $chr, $region);
$count_from_cache += scalar @{$tmp_cache->{$chr}} if defined($tmp_cache->{$chr});
$config->{loaded_tr}->{$chr}->{$region} = 1;
}
# no cache found on disk or not using cache
if(!defined($tmp_cache->{$chr})) {
unless(defined($config->{write_cache}) || defined($config->{database})) {
# restore quiet status
$config->{quiet} = $quiet;
debug("WARNING: Could not find cache for $chr\:$region") unless defined($config->{quiet});
next;
}
# spoof temporary region hash
my $tmp_hash;
push @{$tmp_hash->{$chr}}, $region;
$tmp_cache = cache_transcripts($config, $tmp_hash);
# make it an empty arrayref that gets cached
# so we don't get confused and reload next time round
$tmp_cache->{$chr} ||= [];
$count_from_db += scalar @{$tmp_cache->{$chr}};
# dump to disk if writing to cache
(defined($config->{tabix}) ? dump_transcript_cache_tabix($config, $tmp_cache, $chr, $region) : dump_transcript_cache($config, $tmp_cache, $chr, $region)) if defined($config->{write_cache});
$config->{loaded_tr}->{$chr}->{$region} = 1;
}
# add loaded transcripts to main cache
if(defined($tmp_cache->{$chr})) {
while(my $tr = shift @{$tmp_cache->{$chr}}) {
# track already added transcripts by dbID
my $dbID = $tr->dbID;
if($seen_trs{$dbID}) {
$count_duplicates++;
next;
}
# trim out?
#if(defined($trim_regions) && defined($trim_regions->{$chr})) {
# my $tmp_count = scalar grep {
# overlap(
# (split /\-/, $_)[0], (split /\-/, $_)[1],
# $tr->{start}, $tr->{end}
# )
# } @{$trim_regions->{$chr}};
#
# if(!$tmp_count) {
# $count_trimmed++;
# next;
# }
#}
$seen_trs{$dbID} = 1;
push @{$tr_cache->{$chr}}, $tr;
}
}
$tr_cache->{$chr} ||= [];
undef $tmp_cache;
# restore quiet status
$config->{quiet} = $quiet;
# build slice cache
$slice_cache = build_slice_cache($config, $tr_cache) unless defined($slice_cache->{$chr});
}
}
end_progress($config);
my $tr_count = 0;
$tr_count += scalar @{$tr_cache->{$_}} for keys %$tr_cache;
debug("Retrieved $tr_count transcripts ($count_from_mem mem, $count_from_cache cached, $count_from_db DB, $count_duplicates duplicates)") unless defined($config->{quiet});
return $tr_count;
}
sub fetch_regfeats {
my $config = shift;
my $regions = shift;
my $trim_regions = shift;
my $rf_cache = $config->{rf_cache};
my $slice_cache = $config->{slice_cache};
my ($count_from_mem, $count_from_db, $count_from_cache, $count_duplicates, $count_trimmed) = (0, 0, 0, 0, 0);
my %seen_rfs;
$count_from_mem = 0;
my $region_count = 0;
foreach my $chr(keys %$regions) {
if(defined($rf_cache->{$chr}) && ref($rf_cache->{$chr}) eq 'HASH') {
$count_from_mem += scalar @{$rf_cache->{$chr}->{$_}} for keys %{$rf_cache->{$chr}};
}
$region_count += scalar @{$regions->{$chr}};
}
my $counter = 0;
debug("Reading regulatory data from cache and/or database") unless defined($config->{quiet});
foreach my $chr(keys %$regions) {
foreach my $region(sort {(split /\-/, $a)[0] cmp (split /\-/, $b)[1]} @{$regions->{$chr}}) {
progress($config, $counter++, $region_count);
next if defined($config->{loaded_rf}->{$chr}->{$region});
# skip regions beyond the end of the chr
next if defined($slice_cache->{$chr}) && (split /\-/, $region)[0] > $slice_cache->{$chr}->length;
# force quiet so other methods don't mess up the progress bar
my $quiet = $config->{quiet};
$config->{quiet} = 1;
# try and load cache from disk if using cache
my $tmp_cache;
if(defined($config->{cache})) {
$tmp_cache = load_dumped_reg_feat_cache($config, $chr, $region);
#$tmp_cache =
# defined($config->{tabix}) ?
# load_dumped_reg_feat_cache_tabix($config, $chr, $region, $trim_regions) :
# load_dumped_reg_feat_cache($config, $chr, $region);
if(defined($tmp_cache->{$chr})) {
$count_from_cache += scalar @{$tmp_cache->{$chr}->{$_}} for keys %{$tmp_cache->{$chr}};
}
# flag as loaded
$config->{loaded_rf}->{$chr}->{$region} = 1;
}
# no cache found on disk or not using cache
if(!defined($tmp_cache->{$chr})) {
unless(defined($config->{write_cache}) || defined($config->{database})) {
# restore quiet status
$config->{quiet} = $quiet;
debug("WARNING: Could not find cache for $chr\:$region") unless defined($config->{quiet});
next;
}
# spoof temporary region hash
my $tmp_hash;
push @{$tmp_hash->{$chr}}, $region;
$tmp_cache = cache_reg_feats($config, $tmp_hash);
# make it an empty arrayref that gets cached
# so we don't get confused and reload next time round
$tmp_cache->{$chr} ||= {};
$count_from_db += scalar @{$tmp_cache->{$chr}->{$_}} for keys %{$tmp_cache->{$chr}};
# dump to disk if writing to cache
#dump_reg_feat_cache($config, $tmp_cache, $chr, $region) if defined($config->{write_cache});
(defined($config->{tabix}) ? dump_reg_feat_cache_tabix($config, $tmp_cache, $chr, $region) : dump_reg_feat_cache($config, $tmp_cache, $chr, $region)) if defined($config->{write_cache});
# restore deleted coord_system adaptor
foreach my $type(keys %{$tmp_cache->{$chr}}) {
$_->{slice}->{coord_system}->{adaptor} = $config->{csa} for @{$tmp_cache->{$chr}->{$type}};
}
# flag as loaded
$config->{loaded_rf}->{$chr}->{$region} = 1;
}
# add loaded reg_feats to main cache
if(defined($tmp_cache->{$chr})) {
foreach my $type(keys %{$tmp_cache->{$chr}}) {
while(my $rf = shift @{$tmp_cache->{$chr}->{$type}}) {
# filter on cell type
if(defined($config->{cell_type}) && scalar(@{$config->{cell_type}})) {
next unless grep {$rf->{cell_types}->{$_}} @{$config->{cell_type}};
}
# trim out?
#if(defined($trim_regions) && defined($trim_regions->{$chr})) {
# my $tmp_count = scalar grep {
# overlap(
# (split /\-/, $_)[0], (split /\-/, $_)[1],
# $rf->{start}, $rf->{end}
# )
# } @{$trim_regions->{$chr}};
#
# if(!$tmp_count) {
# $count_trimmed++;
# next;
# }
#}
# track already added reg_feats by dbID
my $dbID = $rf->{dbID};
if($seen_rfs{$dbID}) {
$count_duplicates++;
next;
}
$seen_rfs{$dbID} = 1;
push @{$rf_cache->{$chr}->{$type}}, $rf;
}
}
}
undef $tmp_cache;
# restore quiet status
$config->{quiet} = $quiet;
}
}
end_progress($config);
my $rf_count = 0;
foreach my $chr(keys %$rf_cache) {
foreach my $type(keys %{$rf_cache->{$chr}}) {
$rf_count += scalar @{$rf_cache->{$chr}->{$type}};
}
}
debug("Retrieved $rf_count regulatory features ($count_from_mem mem, $count_from_cache cached, $count_from_db DB, $count_duplicates duplicates)") unless defined($config->{quiet});
return $rf_count;
}
# gets existing VFs for a vf_hash
sub check_existing_hash {
my $config = shift;
my $vf_hash = shift;
my $variation_cache;
# we only care about non-SVs here
my %new_hash;
foreach my $chr(keys %{$vf_hash}) {
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}->{$chunk}}) {
foreach my $var(grep {$_->isa('Bio::EnsEMBL::Variation::VariationFeature')} @{$vf_hash->{$chr}->{$chunk}->{$pos}}) {
push @{$new_hash{$chr}->{$chunk}->{$pos}}, $var;
}
}
}
}
$vf_hash = \%new_hash;
debug("Checking for existing variations") unless defined($config->{quiet});
my ($chunk_count, $counter);
$chunk_count += scalar keys %{$vf_hash->{$_}} for keys %{$vf_hash};
foreach my $chr(keys %{$vf_hash}) {
my %loaded_regions;
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
progress($config, $counter++, $chunk_count);
# get the VFs for this chunk
my ($start, $end);
# work out start and end using chunk_size
$start = $config->{chunk_size} * $chunk;
$end = $config->{chunk_size} * ($chunk + 1);
# using cache?
if(defined($config->{cache})) {
my $tmp_regions;
push @{$tmp_regions->{$chr}}, $start.'-'.$end;
my $converted_regions = convert_regions($config, $tmp_regions);
foreach my $region(@{$converted_regions->{$chr}}) {
unless($loaded_regions{$region}) {
my $tmp_cache = load_dumped_variation_cache($config, $chr, $region);
# load from DB if not found in cache
if(!defined($tmp_cache->{$chr})) {
unless(defined($config->{write_cache}) || defined($config->{database})) {
debug("WARNING: Could not find variation cache for $chr\:$region") unless defined($config->{quiet});
next;
}
$tmp_cache->{$chr} = get_variations_in_region($config, $chr, $region);
dump_variation_cache($config, $tmp_cache, $chr, $region) if defined($config->{write_cache});
}
# merge tmp_cache with the main cache
foreach my $key(keys %{$tmp_cache->{$chr}}) {
$variation_cache->{$chr}->{$key} = $tmp_cache->{$chr}->{$key};
delete $tmp_cache->{$chr}->{$key};
}
# clear memory
undef $tmp_cache;
# record this region as fetched
$loaded_regions{$region} = 1;
}
}
}
# no cache, get all variations in region from DB
else {
my ($min, $max);
# we can fetch smaller region when using DB
foreach my $pos(keys %{$vf_hash->{$chr}->{$chunk}}) {
foreach my $var(@{$vf_hash->{$chr}->{$chunk}->{$pos}}) {
foreach my $coord(qw(start end)) {
$min = $var->{$coord} if !defined($min) || $var->{$coord} < $min;
$max = $var->{$coord} if !defined($max) || $var->{$coord} > $max;
}
}
}
$variation_cache->{$chr} = get_variations_in_region($config, $chr, $min.'-'.$max);
}
# now compare retrieved vars with vf_hash
foreach my $pos(keys %{$vf_hash->{$chr}->{$chunk}}) {
foreach my $var(@{$vf_hash->{$chr}->{$chunk}->{$pos}}) {
my @found;
if(defined($variation_cache->{$chr})) {
if(my $existing_vars = $variation_cache->{$chr}->{$pos}) {
foreach my $existing_var(grep {$_->{failed} <= $config->{failed}} @$existing_vars) {
unless(is_var_novel($config, $existing_var, $var)) {
push @found, $existing_var;
}
}
}
}
$var->{existing} = \@found;
$var->{existing} ||= [];
}
}
}
delete $variation_cache->{$chr};
}
end_progress($config);
}
# gets existing VFs for a list of VFs
sub check_existing_tabix {
my $config = shift;
my $listref = shift;
debug("Checking for existing variations") unless defined($config->{quiet});
# we only care about non-SVs here
my %by_chr;
push @{$by_chr{$_->{chr}}}, $_ for grep {$_->isa('Bio::EnsEMBL::Variation::VariationFeature')} @$listref;
my $max = 200;
my $total = scalar @$listref;
my $i = 0;
foreach my $chr(keys %by_chr) {
my $list = $by_chr{$chr};
while(scalar @$list) {
my @tmp_list = sort {$a->{start} <=> $b->{start}} splice @$list, 0, $max;
progress($config, $i, $total);
$i += scalar @tmp_list;
my $region_string = join " ", map {$_->{chr}.':'.($_->{start} > $_->{end} ? $_->{end}.'-'.$_->{start} : $_->{start}.'-'.$_->{end})} @tmp_list;
my $file = get_dump_file_name($config, $chr, "all", "vars");
next unless -e $file;
#die("ERROR: Could not read from file $file\n") unless -e $file;
open VARS, "tabix $file $region_string 2>&1 |"
or die "\nERROR: Could not open tabix pipe for $file\n";
my $i = 0;
VAR: while(<VARS>) {
chomp;
my $existing = parse_variation($config, $_);
#print STDERR "EX ".$existing->{variation_name}." ".$existing->{start}."\n";
# compare to current indexed var
my $input = $tmp_list[$i];
last if !$input;
while($existing->{start} >= $input->{start}) {
#print STDERR "IN ".$input->{variation_name}." ".$input->{start}."\n";
if($existing->{start} == $input->{start} && $existing->{failed} <= $config->{failed}) {
push @{$input->{existing}}, $existing unless is_var_novel($config, $existing, $input);
next VAR;
}
else {
$i++;
$input = $tmp_list[$i];
last if !$input;
}
}
}
close VARS;
$_->{existing} ||= [] for @tmp_list;
}
}
end_progress($config);
}
# gets overlapping SVs for a vf_hash
sub check_svs_hash {
my $config = shift;
my $vf_hash = shift;
debug("Checking for overlapping structural variations") unless defined($config->{quiet});
my ($chunk_count, $counter);
$chunk_count += scalar keys %{$vf_hash->{$_}} for keys %{$vf_hash};
foreach my $chr(keys %{$vf_hash}) {
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
progress($config, $counter++, $chunk_count);
# work out start and end using chunk_size
my ($start, $end);
$start = $config->{chunk_size} * $chunk;
$end = $config->{chunk_size} * ($chunk + 1);
# check for structural variations
if(defined($config->{sa})) {
my $slice = $config->{sa}->fetch_by_region(undef, $chr, $start, $end);
if(defined($slice)) {
my $svs = $config->{svfa}->fetch_all_by_Slice($slice);
foreach my $pos(keys %{$vf_hash->{$chr}->{$chunk}}) {
foreach my $var(@{$vf_hash->{$chr}->{$chunk}->{$pos}}) {
my $string = join ",",
map {$_->variation_name}
grep {$_->seq_region_start <= $var->end && $_->seq_region_end >= $var->start}
@$svs;
$var->{overlapping_svs} = $string if $string;
}
}
}
}
}
}
end_progress($config);
}
# gets a slice from the slice adaptor
sub get_slice {
my $config = shift;
my $chr = shift;
my $otherfeatures = shift;
my $use_db = shift;
$otherfeatures ||= '';
return $config->{slice_cache}->{$chr} if defined($config->{slice_cache}) && defined($config->{slice_cache}->{$chr});
my $slice;
# with a FASTA DB we can just spoof slices
if(defined($config->{fasta_db}) && !defined($use_db)) {
my $length = $config->{fasta_db}->length($chr) || 1;
$slice = Bio::EnsEMBL::Slice->new(
-COORD_SYSTEM => $config->{coord_system},
-START => 1,
-END => $length,
-SEQ_REGION_NAME => $chr,
-SEQ_REGION_LENGTH => $length
);
$slice->{is_fake} = 1;
return $slice;
}
return undef unless defined($config->{sa}) && defined($chr);
# first try to get a chromosome
eval { $slice = $config->{$otherfeatures.'sa'}->fetch_by_region(undef, $chr); };
return $slice;
}
# METHODS THAT DEAL WITH "REGIONS"
##################################
# gets regions from VF hash
sub regions_from_hash {
my $config = shift;
my $vf_hash = shift;
my %include_regions;
# if using cache we just want the regions of cache_region_size
# since that's what we'll get from the cache (or DB if no cache found)
if(defined($config->{cache})) {
my $region_size = $config->{cache_region_size};
foreach my $chr(keys %$vf_hash) {
$include_regions{$chr} = [];
my %temp_regions;
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
my @tmp = sort {$a <=> $b} map {($_->{start}, $_->{end})} @{$vf_hash->{$chr}{$chunk}{$pos}};
my ($s, $e) = ($tmp[0] - MAX_DISTANCE_FROM_TRANSCRIPT, $tmp[-1] + MAX_DISTANCE_FROM_TRANSCRIPT);
my $low = int ($s / $region_size);
my $high = int ($e / $region_size) + 1;
for my $i($low..($high - 1)) {
$temp_regions{(($i * $region_size) + 1).'-'.(($i + 1) * $region_size)} = 1;
}
}
}
@{$include_regions{$chr}} = keys %temp_regions;
}
}
# if no cache we don't want to fetch more than is necessary, so find the
# minimum covered region of the variations in the hash
else {
foreach my $chr(keys %$vf_hash) {
$include_regions{$chr} = [];
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
add_region($_->start, $_->end, $include_regions{$chr}) for @{$vf_hash->{$chr}{$chunk}{$pos}};
}
}
}
# merge regions
merge_regions(\%include_regions, $config);
}
return \%include_regions;
}
# adds a region to region list, expanding existing one if overlaps
sub add_region {
my $start = shift;
my $end = shift;
my $region_list = shift;
# fix end for insertions
$end = $start if $end < $start;
my $added = 0;
my $i = 0;
while ($i < scalar @$region_list) {
my ($region_start, $region_end) = split /\-/, $region_list->[$i];
if($start <= $region_end && $end >= $region_start) {
my $new_region_start = ($start < $end ? $start : $end) - MAX_DISTANCE_FROM_TRANSCRIPT;
my $new_region_end = ($start > $end ? $start : $end) + MAX_DISTANCE_FROM_TRANSCRIPT;
$new_region_start = 1 if $new_region_start < 1;
$region_start = $new_region_start if $new_region_start < $region_start;
$region_end = $new_region_end if $new_region_end > $region_end;
$region_list->[$i] = $region_start.'-'.$region_end;
$added = 1;
}
$i++;
}
unless($added) {
my $s = $start - MAX_DISTANCE_FROM_TRANSCRIPT;
$s = 1 if $s < 1;
push @{$region_list}, $s.'-'.($end + MAX_DISTANCE_FROM_TRANSCRIPT);
}
}
# merges overlapping regions from scans
sub merge_regions {
my $include_regions = shift;
my $config = shift;
my $consecutive = shift;
$consecutive ||= 0;
# now merge overlapping regions
foreach my $chr(keys %$include_regions) {
my $max_index = $#{$include_regions->{$chr}};
my (@new_regions, %skip);
for my $i(0..$max_index) {
next if $skip{$i};
my ($s, $e) = split /\-/, $include_regions->{$chr}[$i];
for my $j(($i+1)..$max_index) {
next if $skip{$j};
my ($ns, $ne) = split /\-/, $include_regions->{$chr}[$j];
if($s <= ($ne + $consecutive) && $e >= ($ns - $consecutive)) {
$s = $ns if $ns < $s;
$e = $ne if $ne > $e;
$skip{$j} = 1;
}
}
push @new_regions, $s.'-'.$e;
}
# replace original
$include_regions->{$chr} = \@new_regions;
$config->{region_count} += scalar @new_regions;
}
return $include_regions;
}
# converts regions as determined by scan_file to regions loadable from cache
sub convert_regions {
my $config = shift;
my $regions = shift;
return undef unless defined $regions;
my $region_size = $config->{cache_region_size};
my %new_regions;
foreach my $chr(keys %$regions) {
my %temp_regions;
foreach my $region(@{$regions->{$chr}}) {
my ($s, $e) = split /\-/, $region;
my $low = int ($s / $region_size);
my $high = int ($e / $region_size) + 1;
for my $i($low..($high - 1)) {
$temp_regions{(($i * $region_size) + 1).'-'.(($i + 1) * $region_size)} = 1;
}
}
@{$new_regions{$chr}} = keys %temp_regions;
}
return \%new_regions;
}
# CACHE METHODS
###############
# prunes a cache to get rid of features not in regions in use
sub prune_cache {
my $config = shift;
my $cache = shift;
my $regions = shift;
my $loaded = shift;
# delete no longer in use chroms
foreach my $chr(keys %$cache) {
delete $cache->{$chr} unless defined $regions->{$chr} && scalar @{$regions->{$chr}};
}
my $new_count = 0;
foreach my $chr(keys %$cache) {
# get total area spanned by regions
my ($min, $max);
foreach my $region(@{$regions->{$chr}}) {
my ($s, $e) = split /\-/, $region;
$min = $s if !defined($min) or $s < $min;
$max = $e if !defined($max) or $e > $max;
}
# transcript cache
if(ref($cache->{$chr}) eq 'ARRAY') {
$cache->{$chr} = prune_min_max($cache->{$chr}, $min, $max);
$new_count += scalar @{$cache->{$chr}};
}
# regfeat cache
elsif(ref($cache->{$chr}) eq 'HASH') {
for(keys %{$cache->{$chr}}) {
$cache->{$chr}->{$_} = prune_min_max($cache->{$chr}->{$_}, $min, $max);
$new_count += scalar @{$cache->{$chr}->{$_}};
}
}
# update loaded regions
my %have_regions = map {$_ => 1} @{$regions->{$chr}};
foreach my $region(keys %{$loaded->{$chr}}) {
delete $loaded->{$chr}->{$region} unless defined $have_regions{$region};
}
}
return $new_count;
}
# does the actual pruning
sub prune_min_max {
my $array = shift;
my $min = shift;
my $max = shift;
# splice out features not in area spanned by min/max
my $i = 0;
my $f_count = scalar @{$array};
my @new_cache;
while($i < $f_count) {
my $f = $array->[$i];
$i++;
if($max - $f->start() > 0 && $f->end - $min > 0) {
push @new_cache, $f;
}
# do some cleaning for transcripts
elsif(defined $f->{translation}) {
delete $f->{translation}->{transcript};
delete $f->{translation};
}
}
undef $array;
return \@new_cache;
}
# get transcripts for slices
sub cache_transcripts {
my $config = shift;
my $include_regions = shift;
my $tr_cache;
my $i;
debug("Caching transcripts") unless defined($config->{quiet});
foreach my $chr(keys %$include_regions) {
my $slice = get_slice($config, $chr, undef, 1);
next unless defined $slice;
# prefetch some things
$slice->is_circular;
# trim bumf off the slice
delete $slice->{coord_system}->{adaptor} if defined($config->{write_cache});
# no regions?
if(!scalar @{$include_regions->{$chr}}) {
my $start = 1;
my $end = $config->{cache_region_size};
while($start < $slice->end) {
push @{$include_regions->{$chr}}, $start.'-'.$end;
$start += $config->{cache_region_size};
$end += $config->{cache_region_size};
}
}
my $region_count;
if(scalar keys %$include_regions == 1) {
my ($chr) = keys %$include_regions;
$region_count = scalar @{$include_regions->{$chr}};
debug("Caching transcripts for chromosome $chr") unless defined($config->{quiet});
}
foreach my $region(@{$include_regions->{$chr}}) {
progress($config, $i++, $region_count || $config->{region_count});
my ($s, $e) = split /\-/, $region;
# sanity check start and end
$s = 1 if $s < 1;
$e = $slice->end if $e > $slice->end;
# get sub-slice
my $sub_slice = $slice->sub_Slice($s, $e);
# add transcripts to the cache, via a transfer to the chrom's slice
if(defined($sub_slice)) {
# for some reason unless seq is called here the sequence becomes Ns later
$sub_slice->seq;
foreach my $gene(map {$_->transfer($slice)} @{$sub_slice->get_all_Genes(undef, undef, 1)}) {
my $gene_stable_id = $gene->stable_id;
my $canonical_tr_id = $gene->{canonical_transcript_id};
my @trs;
foreach my $tr(@{$gene->get_all_Transcripts}) {
$tr->{_gene_stable_id} = $gene_stable_id;
$tr->{_gene} = $gene;
# indicate if canonical
$tr->{is_canonical} = 1 if defined $canonical_tr_id and $tr->dbID eq $canonical_tr_id;
if(defined($config->{prefetch})) {
prefetch_transcript_data($config, $tr);
}
# CCDS
elsif(defined($config->{ccds})) {
my @entries = grep {$_->database eq 'CCDS'} @{$tr->get_all_DBEntries};
$tr->{_ccds} = $entries[0]->display_id if scalar @entries;
}
# strip some unnecessary data from the transcript object
clean_transcript($tr) if defined($config->{write_cache});
push @trs, $tr;
}
# sort the transcripts by translation so we can share sift/polyphen stuff
# between transcripts and save cache space
if(defined($config->{write_cache}) && (defined($config->{sift}) || defined($config->{polyphen}))) {
my $prev_tr;
# sort them by peptide seqeuence as transcripts with identical peptides
# will have identical SIFT/PolyPhen prediction strings
foreach my $tr(sort {$a->{_variation_effect_feature_cache}->{peptide} cmp $b->{_variation_effect_feature_cache}->{peptide}} grep {$_->{_variation_effect_feature_cache}->{peptide}} @trs) {
if(
defined($prev_tr) &&
$prev_tr->{_variation_effect_feature_cache}->{peptide}
eq $tr->{_variation_effect_feature_cache}->{peptide}
) {
foreach my $analysis(qw(sift polyphen)) {
next unless defined($config->{$analysis});
$tr->{_variation_effect_feature_cache}->{protein_function_predictions}->{$analysis} = $prev_tr->{_variation_effect_feature_cache}->{protein_function_predictions}->{$analysis};
}
}
$prev_tr = $tr;
}
}
# clean the gene
clean_gene($gene);
push @{$tr_cache->{$chr}}, @trs;
}
}
}
}
end_progress($config);
return $tr_cache;
}
# gets rid of extra bits of info attached to the transcript that we don't need
sub clean_transcript {
my $tr = shift;
foreach my $key(qw(display_xref external_db external_display_name external_name external_status created_date status description edits_enabled modified_date dbentries is_current analysis transcript_mapper)) {
delete $tr->{$key} if defined($tr->{$key});
}
# clean all attributes but miRNA
if(defined($tr->{attributes})) {
my @new_atts;
foreach my $att(@{$tr->{attributes}}) {
push @new_atts, $att if $att->{code} eq 'miRNA';
}
$tr->{attributes} = \@new_atts;
}
# clean the translation
if(defined($tr->translation)) {
# sometimes the translation points to a different transcript?
$tr->{translation}->{transcript} = $tr;
weaken($tr->{translation}->{transcript});
for my $key(qw(attributes protein_features created_date modified_date)) {
delete $tr->translation->{$key};
}
}
}
# gets rid of extra bits of info attached to genes. At the moment this is almost
# everything as genes are only used for their locations when looking at
# structural variations
sub clean_gene {
my $gene = shift;
# delete almost everything in the gene
map {delete $gene->{$_}}
grep {
$_ ne 'start' &&
$_ ne 'end' &&
$_ ne 'strand' &&
$_ ne 'stable_id'
}
keys %{$gene};
}
# build slice cache from transcript cache
sub build_slice_cache {
my $config = shift;
my $tr_cache = shift;
my %slice_cache;
foreach my $chr(keys %$tr_cache) {
my $tmp = $tr_cache->{$chr};
if(ref($tmp) eq 'HASH') {
foreach my $type(keys %$tmp) {
$slice_cache{$chr} ||= scalar @{$tmp->{$type}} ? $tmp->{$type}->[0]->slice : &get_slice($config, $chr);
}
}
else {
$slice_cache{$chr} ||= scalar @$tmp ? $tmp->[0]->slice : &get_slice($config, $chr);
}
if(!defined($slice_cache{$chr})) {
delete $slice_cache{$chr}
}
else {
# reattach adaptor to the coord system
$slice_cache{$chr}->{coord_system}->{adaptor} ||= $config->{csa};
# log length for stats
$config->{stats}->{chr_lengths}->{$chr} ||= $slice_cache{$chr}->end;
}
}
return \%slice_cache;
}
# pre-fetches per-transcript data
sub prefetch_transcript_data {
my $config = shift;
my $tr = shift;
# introns
my $introns = $tr->get_all_Introns;
if(defined($introns)) {
foreach my $intron(@$introns) {
foreach my $key(qw(adaptor analysis dbID next prev seqname)) {
delete $intron->{$key};
}
}
}
$tr->{_variation_effect_feature_cache}->{introns} ||= $introns;
# translateable_seq, mapper
$tr->{_variation_effect_feature_cache}->{translateable_seq} ||= $tr->translateable_seq;
$tr->{_variation_effect_feature_cache}->{mapper} ||= $tr->get_TranscriptMapper;
# peptide
unless ($tr->{_variation_effect_feature_cache}->{peptide}) {
my $translation = $tr->translate;
$tr->{_variation_effect_feature_cache}->{peptide} = $translation ? $translation->seq : undef;
}
# protein features
if(defined($config->{domains}) || defined($config->{write_cache})) {
my $pfs = $tr->translation ? $tr->translation->get_all_ProteinFeatures : [];
# clean them to save cache space
foreach my $pf(@$pfs) {
# remove everything but the coord, analysis and ID fields
foreach my $key(keys %$pf) {
delete $pf->{$key} unless
$key eq 'start' ||
$key eq 'end' ||
$key eq 'analysis' ||
$key eq 'hseqname';
}
# remove everything from the analysis but the display label
foreach my $key(keys %{$pf->{analysis}}) {
delete $pf->{analysis}->{$key} unless $key eq '_display_label';
}
}
$tr->{_variation_effect_feature_cache}->{protein_features} = $pfs;
}
# codon table
unless ($tr->{_variation_effect_feature_cache}->{codon_table}) {
# for mithocondrial dna we need to to use a different codon table
my $attrib = $tr->slice->get_all_Attributes('codon_table')->[0];
$tr->{_variation_effect_feature_cache}->{codon_table} = $attrib ? $attrib->value : 1;
}
# sift/polyphen
if(defined($config->{pfpma}) && defined($tr->{_variation_effect_feature_cache}->{peptide})) {
my @a = qw(sift);
# full build wants both polyphen scores
if(defined($config->{build})) {
push @a, ('polyphen_humvar', 'polyphen_humdiv');
}
# otherwise just fetch requested
else {
push @a, 'polyphen_'.$config->{polyphen_analysis};
}
foreach my $a(@a) {
next unless defined($config->{(split "_", $a)[0]});
$tr->{_variation_effect_feature_cache}->{protein_function_predictions}->{$a} ||= $config->{pfpma}->fetch_by_analysis_translation_md5($a, md5_hex($tr->{_variation_effect_feature_cache}->{peptide}))
}
}
# gene
$tr->{_gene} ||= $config->{ga}->fetch_by_transcript_stable_id($tr->stable_id);
# gene symbol
if(defined $config->{symbol}) {
# get from gene cache if found already
if(defined($tr->{_gene}->{_symbol})) {
$tr->{_gene_symbol} = $tr->{_gene}->{_symbol};
$tr->{_gene_symbol_source} = $tr->{_gene}->{_symbol_source}
}
else {
$tr->{_gene_symbol} ||= undef;
$tr->{_gene_symbol_source} ||= undef;
if(my $xref = $tr->{_gene}->display_xref) {
$tr->{_gene_symbol} = $xref->display_id;
$tr->{_gene_symbol_source} = $xref->dbname;
}
else {
my ($entry) = @{$tr->{_gene}->get_all_DBEntries('RefSeq_gene_name')};
$tr->{_gene_symbol} = $entry->display_id if $entry;
}
# cache it on the gene object too
$tr->{_gene}->{_symbol} = $tr->{_gene_symbol};
$tr->{_gene}->{_symbol_source} = $tr->{_gene_symbol_source};
}
}
# CCDS
my @entries = grep {$_->database eq 'CCDS'} @{$tr->get_all_DBEntries};
$tr->{_ccds} = $entries[0]->display_id if scalar @entries;
$tr->{_ccds} ||= '-';
# refseq
@entries = grep {$_->database eq 'RefSeq_mRNA'} @{$tr->get_all_DBEntries};
if(scalar @entries) {
$tr->{_refseq} = join ",", map {$_->display_id} @entries;
}
else {
$tr->{_refseq} = '-';
}
# protein stable ID
$tr->{_protein} = $tr->translation ? $tr->translation->stable_id : '-';
return $tr;
}
sub get_dump_file_name {
my $config = shift;
my $chr = shift;
my $region = shift;
my $type = shift;
$type ||= 'transcript';
if($type eq 'transcript') {
$type = '';
}
else {
$type = '_'.$type;
}
#my ($s, $e) = split /\-/, $region;
#my $subdir = int($s / 1e6);
#
#my $dir = $config->{dir}.'/'.$chr.'/'.$subdir;
my $dir = $config->{dir}.'/'.$chr;
my $dump_file = $dir.'/'.$region.$type.'.gz';
# make directory if it doesn't exist
if(!(-e $dir) && defined($config->{write_cache})) {
mkpath($dir);
}
return $dump_file;
}
# dumps out transcript cache to file
sub dump_transcript_cache {
my $config = shift;
my $tr_cache = shift;
my $chr = shift;
my $region = shift;
debug("Dumping cached transcript data") unless defined($config->{quiet});
# clean the slice adaptor before storing
clean_slice_adaptor($config);
strip_transcript_cache($config, $tr_cache);
$config->{reg}->disconnect_all;
delete $config->{sa}->{dbc}->{_sql_helper};
my $dump_file = get_dump_file_name($config, $chr, $region, 'transcript');
debug("Writing to $dump_file") unless defined($config->{quiet});
# storable
open my $fh, "| gzip -9 -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
nstore_fd($tr_cache, $fh);
close $fh;
}
#sub dump_transcript_cache_tabix {
# my $config = shift;
# my $tr_cache = shift;
# my $chr = shift;
# my $region = shift;
#
# debug("Dumping cached transcript data") unless defined($config->{quiet});
#
# # clean the slice adaptor before storing
# clean_slice_adaptor($config);
#
# strip_transcript_cache($config, $tr_cache);
#
# $config->{reg}->disconnect_all;
#
# my $dir = $config->{dir}.'/'.$chr;
# my $dump_file = $dir.'/'.($region || "dump").'_tabix.gz';
#
# # make directory if it doesn't exist
# if(!(-e $dir)) {
# mkpath($dir);
# }
#
# debug("Writing to $dump_file") unless defined($config->{quiet});
#
# use Storable qw(nfreeze);
# use MIME::Base64 qw(encode_base64);
# #open NEW, "| bgzip -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
# #
# #foreach my $tr(sort {$a->start <=> $b->start} @{$tr_cache->{$chr}}) {
# # print NEW join "\t", (
# # $chr,
# # $tr->start,
# # $tr->end,
# # encode_base64(freeze($tr), "")
# # );
# # print NEW "\n";
# #}
# #close NEW;
# #
# ## tabix it
# #my $output = `tabix -s 1 -b 2 -e 3 -f $dump_file 2>&1`;
# #die("ERROR: Failed during tabix indexing\n$output\n") if $output;
# open NEW, "| gzip -9 -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
#
# foreach my $tr(sort {$a->start <=> $b->start} @{$tr_cache->{$chr}}) {
# print NEW join "\t", (
# $chr,
# $tr->start,
# $tr->end,
# encode_base64(freeze($tr), "")
# );
# print NEW "\n";
# }
# close NEW;
#}
# loads in dumped transcript cache to memory
sub load_dumped_transcript_cache {
my $config = shift;
my $chr = shift;
my $region = shift;
my $dump_file = get_dump_file_name($config, $chr, $region, 'transcript');
return undef unless -e $dump_file;
debug("Reading cached transcript data for chromosome $chr".(defined $region ? "\:$region" : "")." from dumped file") unless defined($config->{quiet});
open my $fh, $config->{compress}." ".$dump_file." |" or return undef;
my $tr_cache;
$tr_cache = fd_retrieve($fh);
close $fh;
# reattach adaptors
foreach my $t(@{$tr_cache->{$chr}}) {
if(defined($t->{translation})) {
$t->{translation}->{adaptor} = $config->{tra} if defined $config->{tra};
$t->{translation}->{transcript} = $t;
weaken($t->{translation}->{transcript});
}
$t->{slice}->{adaptor} = $config->{sa};
$_->{slice} ||= $t->{slice} for @{$t->{_trans_exon_array}};
}
return $tr_cache;
}
#sub load_dumped_transcript_cache_tabix {
# my $config = shift;
# my $chr = shift;
# my $region = shift;
#
# my $dir = $config->{dir}.'/'.$chr;
# my $dump_file = $dir.'/all_trs.gz';
#
# #print STDERR "Reading from $dump_file\n";
#
# return undef unless -e $dump_file;
#
# debug("Reading cached transcript data for chromosome $chr".(defined $region ? "\:$region" : "")." from dumped file") unless defined($config->{quiet});
#
# my $tr_cache;
#
# use MIME::Base64 qw(decode_base64);
# use Storable qw(thaw);
#
# $DB::single = 1;
#
# my ($s, $e) = split /\-/, $region;
# #my @regions = grep {overlap($s, $e, (split /\-/, $_))} @{$trim_regions->{$chr}};
# my $regions = "";
# $regions .= " $chr\:$region";
#
# #print STDERR "tabix $dump_file $regions |\n";
# open IN, "tabix $dump_file $regions |";
# #open IN, "gzip -dc $dump_file |";
# while(<IN>) {
# chomp;
# my ($chr, $start, $end, $blob) = split /\t/, $_;
# #next unless grep {overlap($start, $end, (split /\-/, $_))} @regions;
# my $tr = thaw(decode_base64($blob));
# push @{$tr_cache->{$chr}}, $tr;
# }
# close IN;
#
# # reattach adaptors
# foreach my $t(@{$tr_cache->{$chr}}) {
# if(defined($t->{translation})) {
# $t->{translation}->{adaptor} = $config->{tra} if defined $t->{translation}->{adaptor};
# $t->{translation}->{transcript} = $t;
# weaken($t->{translation}->{transcript});
# }
#
# $t->{slice}->{adaptor} = $config->{sa};
# }
#
# # add empty array ref so code doesn't try and fetch from DB too
# $tr_cache->{$chr} ||= [];
#
# return $tr_cache;
#}
# strips cache before writing to disk
sub strip_transcript_cache {
my $config = shift;
my $cache = shift;
foreach my $chr(keys %$cache) {
foreach my $tr(@{$cache->{$chr}}) {
foreach my $exon(@{$tr->{_trans_exon_array}}) {
delete $exon->{slice}->{adaptor};
for(qw(adaptor created_date modified_date is_current version is_constitutive _seq_cache dbID slice)) {
delete $exon->{$_};
}
}
delete $tr->{adaptor};
delete $tr->{slice}->{adaptor};
delete $tr->{translation}->{adaptor} if defined($tr->{translation});
}
}
}
# cleans slice adaptor before storing in cache
sub clean_slice_adaptor{
my $config = shift;
# clean some stuff off the slice adaptor
delete $config->{sa}->{asm_exc_cache};
$config->{sa}->{sr_name_cache} = {};
$config->{sa}->{sr_id_cache} = {};
delete $config->{sa}->{db}->{seq_region_cache};
delete $config->{sa}->{db}->{name_cache};
}
# dump adaptors to cache
sub dump_adaptor_cache {
my $config = shift;
$config->{reg}->disconnect_all;
delete $config->{sa}->{dbc}->{_sql_helper};
my $dir = $config->{dir};
my $dump_file = $dir.'/adaptors.gz';
# make directory if it doesn't exist
if(!(-e $dir)) {
mkpath($dir);
}
open my $fh, "| gzip -9 -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
nstore_fd($config, $fh);
close $fh;
}
# load dumped adaptors
sub load_dumped_adaptor_cache {
my $config = shift;
my $dir = $config->{dir};
my $dump_file = $dir.'/adaptors.gz';
return undef unless -e $dump_file;
debug("Reading cached adaptor data") unless defined($config->{quiet});
open my $fh, $config->{compress}." ".$dump_file." |" or return undef;
my $cached_config;
$cached_config = fd_retrieve($fh);
close $fh;
$config->{$_} = $cached_config->{$_} for qw(sa ga ta vfa svfa tva pfpma mca csa RegulatoryFeature_adaptor MotifFeature_adaptor);
return 1;
}
# dumps cached variations to disk
sub dump_variation_cache {
my $config = shift;
my $v_cache = shift;
my $chr = shift;
my $region = shift;
my $dump_file = get_dump_file_name($config, $chr, $region, 'var');
open DUMP, "| gzip -9 -c > ".$dump_file or die "ERROR: Could not write to adaptor dump file $dump_file";
foreach my $pos(keys %{$v_cache->{$chr}}) {
foreach my $v(@{$v_cache->{$chr}->{$pos}}) {
my @tmp = (
$v->{variation_name},
$v->{failed} == 0 ? '' : $v->{failed},
$v->{start},
$v->{end} == $v->{start} ? '' : $v->{end},
$v->{allele_string},
$v->{strand} == 1 ? '' : $v->{strand},
$v->{minor_allele} || '',
defined($v->{minor_allele_freq}) ? sprintf("%.4f", $v->{minor_allele_freq}) : '',
);
if(have_clin_sig($config) && defined($config->{clin_sig})) {
push @tmp, $config->{clin_sig}->{$v->{variation_name}} || '';
}
if(have_pubmed($config) && defined($config->{pubmed})) {
push @tmp, $config->{pubmed}->{$v->{variation_name}} || '';
}
if(defined($config->{freqs})) {
push @tmp, $config->{'freqs'}->{$v->{variation_name}} || '';
}
print DUMP join(" ", @tmp);
print DUMP "\n";
}
}
close DUMP;
}
# loads dumped variation cache
sub load_dumped_variation_cache {
my $config = shift;
my $chr = shift;
my $region = shift;
my $dump_file = get_dump_file_name($config, $chr, $region, 'var');
return undef unless -e $dump_file;
open DUMP, $config->{compress}." ".$dump_file." |" or return undef;
my $v_cache;
while(<DUMP>) {
chomp;
my $v = parse_variation($config, $_);
push @{$v_cache->{$chr}->{$v->{start}}}, $v;
}
close DUMP;
return $v_cache;
}
sub parse_variation {
my $config = shift;
my $line = shift;
my @cols = @{get_variation_columns($config)};
my @data = split / |\t/, $line;
# assumption fix for old cache files
if(scalar @data > scalar @cols) {
push @cols, ('AFR', 'AMR', 'ASN', 'EUR');
}
my %v = map {$cols[$_] => $data[$_] eq '.' ? undef : $data[$_]} (0..$#data);
$v{failed} ||= 0;
$v{end} ||= $v{start};
$v{strand} ||= 1;
# hack for odd frequency data
foreach my $pop(qw(AFR AMR ASN EUR)) {
$v{$pop} = 1 - $v{$pop} if defined($v{$pop}) && $v{$pop} =~ /\d+/ && $v{$pop} > 0.5;
}
return \%v;
}
# gets variation cache columns
sub get_variation_columns {
my $config = shift;
if(!defined($config->{cache_variation_cols})) {
$config->{cache_variation_cols} = \@VAR_CACHE_COLS;
push @{$config->{cache_variation_cols}}, 'clin_sig' if have_clin_sig($config) && defined($config->{clin_sig});
push @{$config->{cache_variation_cols}}, 'pubmed' if have_pubmed($config) && defined($config->{pubmed});
push @{$config->{cache_variation_cols}}, @{$config->{freq_file_pops}} if defined($config->{freq_file_pops});
}
return $config->{cache_variation_cols};
}
# caches regulatory features
sub cache_reg_feats {
my $config = shift;
my $include_regions = shift;
my $rf_cache;
my $i;
debug("Caching regulatory features") unless defined($config->{quiet});
foreach my $chr(keys %$include_regions) {
my $slice = get_slice($config, $chr, undef, 1);
next unless defined $slice;
# prefetch some things
$slice->is_circular;
# no regions?
if(!scalar @{$include_regions->{$chr}}) {
my $start = 1;
my $end = $config->{cache_region_size};
while($start < $slice->end) {
push @{$include_regions->{$chr}}, $start.'-'.$end;
$start += $config->{cache_region_size};
$end += $config->{cache_region_size};
}
}
my $region_count;
if(scalar keys %$include_regions == 1) {
my ($chr) = keys %$include_regions;
$region_count = scalar @{$include_regions->{$chr}};
debug("Caching transcripts for chromosome $chr") unless defined($config->{quiet});
}
foreach my $region(@{$include_regions->{$chr}}) {
progress($config, $i++, $region_count || $config->{region_count});
my ($s, $e) = split /\-/, $region;
# sanity check start and end
$s = 1 if $s < 1;
$e = $slice->end if $e > $slice->end;
# get sub-slice
my $sub_slice = $slice->sub_Slice($s, $e);
next unless defined($sub_slice);
$sub_slice->{coord_system}->{adaptor} = $config->{csa};
foreach my $type(@REG_FEAT_TYPES) {
my $features = $config->{$type.'_adaptor'}->fetch_all_by_Slice($sub_slice);
next unless defined($features);
# cell types
if(defined($config->{cell_type}) && scalar(@{$config->{cell_type}})) {
foreach my $rf(@$features) {
my %cl;
# get cell type by fetching all from stable ID
if($type eq 'RegulatoryFeature') {
%cl = map {
$_->feature_set->cell_type->name => $_->feature_type->name
} @{$rf->adaptor->fetch_all_by_stable_ID($rf->stable_id)};
}
# get cell type by fetching regfeats that contain this MotifFeature
elsif($type eq 'MotifFeature') {
%cl = map {
$_->feature_set->cell_type->name => $_->feature_type->name
} @{$config->{'RegulatoryFeature_adaptor'}->fetch_all_by_attribute_feature($rf)};
}
$rf->{cell_types} = \%cl;
}
}
push @{$rf_cache->{$chr}->{$type}},
map { clean_reg_feat($_) }
map { $_->transfer($slice) }
@{$features};
}
}
# delete reference to slice adaptor before we write to cache
delete $slice->{adaptor};
}
end_progress($config);
return $rf_cache;
}
# cleans reg feats for caching
sub clean_reg_feat {
my $rf = shift;
foreach my $key(qw/adaptor binary_string bound_start bound_end attribute_cache feature_type feature_set analysis set/) {
delete $rf->{$key};
}
if(defined($rf->{binding_matrix})) {
$rf->{_variation_effect_feature_cache}->{seq} = $rf->seq;
foreach my $key(qw/adaptor feature_type analysis dbID/) {
delete $rf->{binding_matrix}->{$key};
}
}
return $rf;
}
# dumps out reg feat cache to file
sub dump_reg_feat_cache {
my $config = shift;
my $rf_cache = shift;
my $chr = shift;
my $region = shift;
debug("Dumping cached reg feat data for $chr:$region") unless defined($config->{quiet});
# clean the slice adaptor before storing
clean_slice_adaptor($config);
$config->{reg}->disconnect_all;
delete $config->{sa}->{dbc}->{_sql_helper};
foreach my $chr(keys %{$rf_cache}) {
foreach my $type(keys %{$rf_cache->{$chr}}) {
delete $_->{slice}->{coord_system}->{adaptor} for @{$rf_cache->{$chr}->{$type}};
}
}
my $dump_file = get_dump_file_name($config, $chr, $region, 'reg');
debug("Writing to $dump_file") unless defined($config->{quiet});
# storable
open my $fh, "| gzip -9 -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
nstore_fd($rf_cache, $fh);
close $fh;
}
#sub dump_reg_feat_cache_tabix {
# my $config = shift;
# my $rf_cache = shift;
# my $chr = shift;
# my $region = shift;
#
# debug("Dumping cached reg feat data") unless defined($config->{quiet});
#
# # clean the slice adaptor before storing
# clean_slice_adaptor($config);
#
# $config->{reg}->disconnect_all;
# delete $config->{sa}->{dbc}->{_sql_helper};
#
# $config->{reg}->disconnect_all;
#
# my $dump_file = get_dump_file_name($config, $chr, $region, 'reg');
#
# debug("Writing to $dump_file") unless defined($config->{quiet});
#
# use Storable qw(nfreeze);
# use MIME::Base64 qw(encode_base64);
# open NEW, "| gzip -9 -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
#
# foreach my $type(keys %{$rf_cache->{$chr}}) {
# foreach my $rf(sort {$a->start <=> $b->start} @{$rf_cache->{$chr}->{$type}}) {
# print NEW join "\t", (
# $chr,
# $rf->start,
# $rf->end,
# $type,
# encode_base64(freeze($rf), "")
# );
# print NEW "\n";
# }
# }
# close NEW;
#}
# loads in dumped transcript cache to memory
sub load_dumped_reg_feat_cache {
my $config = shift;
my $chr = shift;
my $region = shift;
my $dump_file = get_dump_file_name($config, $chr, $region, 'reg');
return undef unless -e $dump_file;
debug("Reading cached reg feat data for chromosome $chr".(defined $region ? "\:$region" : "")." from dumped file") unless defined($config->{quiet});
open my $fh, $config->{compress}." ".$dump_file." |" or return undef;
my $rf_cache;
$rf_cache = fd_retrieve($fh);
close $fh;
return $rf_cache;
}
#sub load_dumped_reg_feat_cache_tabix {
# my $config = shift;
# my $chr = shift;
# my $region = shift;
# my $trim_regions = shift;
#
# my $dump_file = get_dump_file_name($config, $chr, $region, 'reg');
#
# #print STDERR "Reading from $dump_file\n";
#
# return undef unless -e $dump_file;
#
# debug("Reading cached reg feat data for chromosome $chr".(defined $region ? "\:$region" : "")." from dumped file") unless defined($config->{quiet});
#
# my $rf_cache;
#
# use MIME::Base64 qw(decode_base64);
# use Storable qw(thaw);
#
# my ($s, $e) = split /\-/, $region;
# my @regions = grep {overlap($s, $e, (split /\-/, $_))} @{$trim_regions->{$chr}};
# my $regions = "";
# $regions .= " $chr\:$_" for @regions;
#
# #print STDERR "tabix $dump_file $regions |\n";
# #open IN, "tabix $dump_file $regions |";
# open IN, "gzip -dc $dump_file |";
# while(<IN>) {
# my ($chr, $start, $end, $type, $blob) = split /\t/, $_;
# next unless grep {overlap($start, $end, (split /\-/, $_))} @regions;
# my $rf = thaw(decode_base64($blob));
# push @{$rf_cache->{$chr}->{$type}}, $rf;
# }
# close IN;
#
# $rf_cache->{$chr}->{$_} ||= [] for @REG_FEAT_TYPES;
#
# return $rf_cache;
#}
# get custom annotation for a region
sub cache_custom_annotation {
my $config = shift;
my $include_regions = shift;
my $chr = shift;
#$include_regions = merge_regions($include_regions, $config, 1);
my $annotation = {};
my $total = scalar @{$config->{custom}} * scalar @{$include_regions->{$chr}};
my $counter = 0;
my $max_regions_per_tabix = 1000;
debug("Caching custom annotations") unless defined($config->{quiet});
foreach my $custom(@{$config->{custom}}) {
my @regions = @{$include_regions->{$chr}};
while(scalar @regions) {
my $got_features = 0;
my @tmp_regions = splice @regions, 0, $max_regions_per_tabix;
progress($config, $counter, $total);
$counter += scalar @tmp_regions;
# some files may have e.g. chr10 instead of 10
for my $tmp_chr($chr, 'chr'.$chr) {
# bigwig needs to use bigWigToWig utility
if($custom->{format} eq 'bigwig') {
my @tmp_files;
die "\nERROR: Could not find temporary directory ".$config->{tmpdir}." - use --tmpdir [dir] to define an existing directory\n" unless -d $config->{tmpdir};
foreach my $region(@tmp_regions) {
my ($s, $e) = split /\-/, $region;
my $tmp_file = $config->{tmpdir}.'/vep_tmp_'.$$.'_'.$tmp_chr.'_'.$s.'_'.$e;
push @tmp_files, $tmp_file;
my $bigwig_file = $custom->{file};
my $bigwig_output = `bigWigToWig -chrom=$tmp_chr -start=$s -end=$e $bigwig_file $tmp_file 2>&1`;
die "\nERROR: Problem using bigwig file $bigwig_file\n$bigwig_output" if $bigwig_output;
}
# concatenate all the files together
my $string = join(" ", @tmp_files);
my $tmp_file = $config->{tmpdir}.'/vep_tmp_'.$$;
`cat $string > $tmp_file`;
open CUSTOM, $tmp_file
or die "\nERROR: Could not read from temporary WIG file $tmp_file\n";
# unlink smaller files
unlink($_) for @tmp_files;
}
# otherwise use tabix
else {
# tabix can fetch multiple regions, so construct a string
my $region_string = join " ", map {$tmp_chr.':'.$_} @tmp_regions;
open CUSTOM, "tabix ".$custom->{file}." $region_string 2>&1 |"
or die "\nERROR: Could not open tabix pipe for ".$custom->{file}."\n";
}
# set an error flag so we don't have to check every line
my $error_flag = 1;
# create a hash for storing temporary params (used by bigWig)
my %tmp_params = ();
while(<CUSTOM>) {
chomp;
# check for errors
if($error_flag) {
die "\nERROR: Problem using annotation file ".$custom->{file}."\n$_\n" if /invalid pointer|tabix|get_intv/;
$error_flag = 0;
}
my @data = split /\t/, $_;
my $feature;
if($custom->{format} eq 'bed') {
$feature = {
chr => $chr,
start => $data[1],
end => $data[2],
name => $data[3],
};
}
elsif($custom->{format} eq 'vcf') {
my $tmp_vf = parse_vcf($config, $_)->[0];
$feature = {
chr => $chr,
start => $tmp_vf->{start},
end => $tmp_vf->{end},
name => $tmp_vf->{variation_name},
};
foreach my $field(@{$custom->{fields}}) {
if(m/$field\=(.+?)(\;|\s|$)/) {
$feature->{$field} = $1;
}
}
}
elsif($custom->{format} eq 'gff' || $custom->{format} eq 'gtf') {
my $name;
# try and get a feature name from the attributes column
foreach my $attrib(split /\s*\;\s*/, $data[8]) {
my ($key, $value) = split /\=/, $attrib;
$name = $value if $key eq 'ID';
}
$name ||= $data[2]."_".$data[0].":".$data[3]."-".$data[4];
$feature = {
chr => $chr,
start => $data[3],
end => $data[4],
name => $name,
};
}
elsif($custom->{format} eq 'bigwig') {
# header line from wiggle file
if(/^(fixed|variable)Step|^\#bedGraph/i) {
my @split = split /\s+/;
$tmp_params{type} = shift @split;
$tmp_params{type} =~ s/^\#//g;
foreach my $pair(@split) {
my ($key, $value) = split /\=/, $pair;
$tmp_params{$key} = $value;
}
# default to span of 1
$tmp_params{span} ||= 1;
}
elsif(defined($tmp_params{type})) {
if($tmp_params{type} eq 'fixedStep') {
$feature = {
chr => $chr,
start => $tmp_params{start},
end => ($tmp_params{start} + $tmp_params{span}) - 1,
name => $data[0],
};
$tmp_params{start} += $tmp_params{step};
}
elsif($tmp_params{type} eq 'variableStep') {
$feature = {
chr => $chr,
start => $data[0],
end => ($data[0] + $tmp_params{span}) - 1,
name => $data[1]
};
}
elsif($tmp_params{type} eq 'bedGraph') {
$feature = {
chr => $chr,
start => $data[1] + 1,
end => $data[2],
name => $data[3]
};
}
}
else {
die("ERROR: Cannot parse line from bigWigtoWig output: \n$_\n");
}
}
if(defined($feature)) {
$got_features = 1;
if(!defined($feature->{name}) || $custom->{coords}) {
$feature->{name} = $feature->{chr}.":".$feature->{start}."-".$feature->{end};
}
# add the feature to the cache
$annotation->{$chr}->{$custom->{name}}->{$feature->{start}}->{$feature->{name}} = $feature;
}
}
close CUSTOM;
# unlink temporary wig files
unlink($config->{tmpdir}.'/vep_tmp_'.$$) if $custom->{format} eq 'bigwig';
# no need to fetch e.g. "chr21" features if just "21" worked
last if $got_features;
}
}
}
end_progress($config);
return $annotation;
}
# builds a full cache for this species
sub build_full_cache {
my $config = shift;
my @slices;
if($config->{build} =~ /all/i) {
@slices = @{$config->{sa}->fetch_all('toplevel')};
push @slices, map {$_->alternate_slice} map {@{$_->get_all_AssemblyExceptionFeatures}} @slices;
push @slices, @{$config->{sa}->fetch_all('lrg', undef, 1, undef, 1)} if defined($config->{lrg});
}
else {
foreach my $val(split /\,/, $config->{build}) {
my @nnn = split /\-/, $val;
foreach my $chr($nnn[0]..$nnn[-1]) {
my $slice = get_slice($config, $chr, undef, 1);
push @slices, $slice if defined($slice);
}
}
}
# check and load clin_sig
$config->{clin_sig} = get_clin_sig($config) if have_clin_sig($config);
# check and load pubmed
$config->{pubmed} = get_pubmed($config) if have_pubmed($config);
foreach my $slice(@slices) {
my $chr = $slice->seq_region_name;
# check for features, we don't want a load of effectively empty dirs
my $dbc = $config->{sa}->db->dbc;
my $sth = $dbc->prepare("SELECT COUNT(*) FROM transcript WHERE seq_region_id = ?");
$sth->execute($slice->get_seq_region_id);
my $count;
$sth->bind_columns(\$count);
$sth->fetch;
$sth->finish;
next unless $count > 0;
my $regions;
# for progress
my $region_count = int($slice->end / $config->{cache_region_size}) + 1;
my $counter = 0;
# initial region
my $start = 1 + ($config->{cache_region_size} * int($slice->start / $config->{cache_region_size}));
my $end = ($start - 1) + $config->{cache_region_size};
debug((defined($config->{rebuild}) ? "Rebuild" : "Creat")."ing cache for chromosome $chr") unless defined($config->{quiet});
while($start < $slice->end) {
progress($config, $counter++, $region_count);
# store quiet status
my $quiet = $config->{quiet};
$config->{quiet} = 1;
# spoof regions
$regions->{$chr} = [$start.'-'.$end];
# store transcripts
if($config->{build_parts} =~ /t/) {
my $tmp_cache = (defined($config->{rebuild}) ? load_dumped_transcript_cache($config, $chr, $start.'-'.$end) : cache_transcripts($config, $regions));
$tmp_cache->{$chr} ||= [];
#(defined($config->{tabix}) ? dump_transcript_cache_tabix($config, $tmp_cache, $chr, $start.'-'.$end) : dump_transcript_cache($config, $tmp_cache, $chr, $start.'-'.$end));
dump_transcript_cache($config, $tmp_cache, $chr, $start.'-'.$end);
undef $tmp_cache;
}
# store reg feats
if($config->{build_parts} =~ /r/ && defined($config->{regulatory})) {
my $rf_cache = cache_reg_feats($config, $regions);
$rf_cache->{$chr} ||= {};
dump_reg_feat_cache($config, $rf_cache, $chr, $start.'-'.$end);
#(defined($config->{tabix}) ? dump_reg_feat_cache_tabix($config, $rf_cache, $chr, $start.'-'.$end) : dump_reg_feat_cache($config, $rf_cache, $chr, $start.'-'.$end));
undef $rf_cache;
# this gets cleaned off but needs to be there for the next loop
$slice->{coord_system}->{adaptor} = $config->{csa};
}
# store variations
if($config->{build_parts} =~ /v/) {
my $variation_cache;
$variation_cache->{$chr} = get_variations_in_region($config, $chr, $start.'-'.$end);
$variation_cache->{$chr} ||= {};
dump_variation_cache($config, $variation_cache, $chr, $start.'-'.$end);
undef $variation_cache;
}
# restore quiet status
$config->{quiet} = $quiet;
# increment by cache_region_size to get next region
$start += $config->{cache_region_size};
$end += $config->{cache_region_size};
}
end_progress($config);
undef $regions;
}
write_cache_info($config);
}
# write an info file that defines what is in the cache
sub write_cache_info {
my $config = shift;
my $info_file = $config->{dir}.'/info.txt';
open OUT, ">>$info_file" or die "ERROR: Could not write to cache info file $info_file\n";
print OUT "# CACHE UPDATED ".get_time()."\n";
foreach my $param(qw(
host
port
user
build
regulatory
sift
polyphen
)) {
print OUT "$param\t".(defined $config->{$param} ? $config->{$param} : '-')."\n";
}
# cell types
if(defined($config->{cell_type}) && scalar(@{$config->{cell_type}})) {
my $cta = $config->{RegulatoryFeature_adaptor}->db->get_CellTypeAdaptor();
print OUT "cell_types\t".(join ",", map {$_->name} @{$cta->fetch_all});
print OUT "\n";
}
# sift/polyphen versions
foreach my $tool(qw(sift polyphen)) {
if(defined($config->{$tool})) {
my $var_mca = $config->{reg}->get_adaptor($config->{species}, 'variation', 'metacontainer');
if(defined($var_mca)) {
my $version = $var_mca->list_value_by_key($tool.'_version');
print OUT "$tool\_version\t".$version->[0]."\n" if defined($version) and scalar @$version;
}
}
}
# variation columns
print OUT "variation_cols\t".(join ",", @{get_variation_columns($config)});
print OUT "\n";
close OUT;
}
# reads in cache info file
sub read_cache_info {
my $config = shift;
my $info_file = $config->{dir}.'/info.txt';
open IN, $info_file or return 0;
while(<IN>) {
next if /^#/;
chomp;
my ($param, $value) = split /\t/;
if($param =~ /variation_col/) {
$config->{'cache_'.$param} = [split /\,/, $value];
}
else {
$config->{'cache_'.$param} = $value unless defined $value && $value eq '-';
}
}
close IN;
return 1;
}
# format coords for printing
sub format_coords {
my ($start, $end) = @_;
if(defined($start)) {
if(defined($end)) {
if($start > $end) {
return $end.'-'.$start;
}
elsif($start == $end) {
return $start;
}
else {
return $start.'-'.$end;
}
}
else {
return $start.'-?';
}
}
elsif(defined($end)) {
return '?-'.$end;
}
else {
return '-';
}
}
# METHODS TO FIND CO-LOCATED / EXISTING VARIATIONS
##################################################
# compare a new vf to one from the cache / DB
sub is_var_novel {
my $config = shift;
my $existing_var = shift;
my $new_var = shift;
my $is_novel = 1;
$is_novel = 0 if $existing_var->{start} == $new_var->start && $existing_var->{end} == $new_var->end;
if(defined($config->{check_alleles})) {
my %existing_alleles;
$existing_alleles{$_} = 1 for split /\//, $existing_var->{allele_string};
my $seen_new = 0;
foreach my $a(split /\//, ($new_var->allele_string || "")) {
reverse_comp(\$a) if $new_var->strand ne $existing_var->{strand};
$seen_new = 1 unless defined $existing_alleles{$a};
}
$is_novel = 1 if $seen_new;
}
return $is_novel;
}
# check frequencies of existing var against requested params
sub check_frequencies {
my $config = shift;
my $var = shift;
my $var_name = $var->{variation_name};
my $freq_pop = $config->{freq_pop};
my $freq_freq = $config->{freq_freq};
my $freq_gt_lt = $config->{freq_gt_lt};
my $pass = 0;
my $checked_cache = 0;
delete $config->{filtered_freqs};
# if we can, check using cached frequencies as this is way quicker than
# going to the DB
if($freq_pop =~ /1kg|esp/i) {
my $freq;
my $sub_pop = uc((split /\_/, $freq_pop)[-1]);
$freq = $var->{minor_allele_freq} if $sub_pop =~ /all/i;
if(!defined($freq)) {
$freq = $var->{$sub_pop} if defined($var->{$sub_pop});
}
if(defined($freq) && $freq =~ /\d/) {
$pass = 1 if $freq >= $freq_freq and $freq_gt_lt eq 'gt';
$pass = 1 if $freq <= $freq_freq and $freq_gt_lt eq 'lt';
push @{$config->{filtered_freqs}}, $freq_pop.':'.$freq;
}
$checked_cache = 1;
}
if(defined($config->{va}) && $checked_cache == 0) {
my $v = $config->{va}->fetch_by_name($var_name);
my $freq_pop_name = (split /\_/, $freq_pop)[-1];
$freq_pop_name = undef if $freq_pop_name =~ /1kg|hap|any/;
foreach my $a(@{$v->get_all_Alleles}) {
next unless defined $a->{population} || defined $a->{'_population_id'};
next unless defined $a->frequency;
next if $a->frequency > 0.5;
my $pop_name = $a->population->name;
if($freq_pop =~ /1kg/) { next unless $pop_name =~ /^1000.+(low|phase).+/i; }
if($freq_pop =~ /hap/) { next unless $pop_name =~ /^CSHL-HAPMAP/i; }
if($freq_pop =~ /any/) { next unless $pop_name =~ /^(CSHL-HAPMAP)|(1000.+(low|phase).+)/i; }
if(defined $freq_pop_name) { next unless $pop_name =~ /$freq_pop_name/i; }
$pass = 1 if $a->frequency >= $freq_freq and $freq_gt_lt eq 'gt';
$pass = 1 if $a->frequency <= $freq_freq and $freq_gt_lt eq 'lt';
$pop_name =~ s/\:/\_/g;
push @{$config->{filtered_freqs}}, $pop_name.':'.$a->frequency;
#warn "Comparing allele ", $a->allele, " ", $a->frequency, " for $var_name in population ", $a->population->name, " PASS $pass";
}
}
return 0 if $config->{freq_filter} eq 'exclude' and $pass == 1;
return 0 if $config->{freq_filter} eq 'include' and $pass == 0;
return 1;
}
# gets all variations in a region
sub get_variations_in_region {
my $config = shift;
my $chr = shift;
my $region = shift;
my ($start, $end) = split /\-/, $region;
my %variations;
if(defined($config->{vfa}->db)) {
my $sr_cache = $config->{seq_region_cache};
if(!defined($sr_cache)) {
$sr_cache = cache_seq_region_ids($config);
$config->{seq_region_cache} = $sr_cache;
}
# no seq_region_id?
return {} unless defined($sr_cache) && defined($sr_cache->{$chr});
my $maf_cols = have_maf_cols($config) ? 'vf.minor_allele, vf.minor_allele_freq' : 'NULL, NULL';
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT vf.variation_id, vf.variation_name, IF(fv.variation_id IS NULL, 0, 1), vf.seq_region_start, vf.seq_region_end, vf.allele_string, vf.seq_region_strand, $maf_cols
FROM variation_feature vf
LEFT JOIN failed_variation fv ON fv.variation_id = vf.variation_id
WHERE vf.seq_region_id = ?
AND vf.seq_region_start >= ?
AND vf.seq_region_start <= ?
});
$sth->execute($sr_cache->{$chr}, $start, $end);
my %v;
$v{$_} = undef for @VAR_CACHE_COLS;
my ($var_id, %vars_by_id);
$sth->bind_col(1, \$var_id);
if(have_maf_cols($config)) {
$sth->bind_col($_+2, \$v{$VAR_CACHE_COLS[$_]}) for (0..$#VAR_CACHE_COLS);
}
else {
$sth->bind_col($_+2, \$v{$VAR_CACHE_COLS[$_]}) for (0..4);
}
while($sth->fetch) {
my %v_copy = %v;
$v_copy{allele_string} =~ s/\s+/\_/g;
push @{$variations{$v{start}}}, \%v_copy;
# store by var_id too to get stuff from variation table
$vars_by_id{$var_id} = \%v_copy;
}
$sth->finish();
# now get stuff from variation table
#if(scalar keys %vars_by_id) {
# my $max_size = 200;
# my @id_list = keys %vars_by_id;
#
# while(@id_list) {
# my @ids;
# if(@id_list > $max_size) {
# @ids = splice(@id_list, 0, $max_size);
# }
# else {
# @ids = splice(@id_list, 0);
# }
#
# my $id_str;
# if(@ids > 1) {
# $id_str = " IN (" . join(',', @ids). ")";
# }
# else {
# $id_str = " = ".$ids[0];
# }
#
# $sth = $config->{vfa}->db->dbc->prepare(qq{
# SELECT variation_id, ancestral_allele
# FROM variation
# WHERE variation_id $id_str
# });
#
# my $ancestral_allele;
# $sth->execute();
# $sth->bind_columns(\$var_id, \$ancestral_allele);
#
# while($sth->fetch) {
# $vars_by_id{$var_id}->{ancestral_allele} = $ancestral_allele;
# }
#
# $sth->finish();
# }
#}
}
return \%variations;
}
sub cache_seq_region_ids {
my $config = shift;
my (%cache, $chr, $id);
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT seq_region_id, name FROM seq_region
});
$sth->execute();
$sth->bind_columns(\$id, \$chr);
$cache{$chr} = $id while $sth->fetch();
$sth->finish;
return \%cache;
}
sub have_maf_cols {
my $config = shift;
if(!defined($config->{have_maf_cols})) {
if(defined($config->{vfa}) && defined($config->{vfa}->db)) {
my $sth = $config->{vfa}->db->dbc->prepare(qq{
DESCRIBE variation_feature
});
$sth->execute();
my @cols = map {$_->[0]} @{$sth->fetchall_arrayref};
$sth->finish();
$config->{have_maf_cols} = 0;
$config->{have_maf_cols} = 1 if grep {$_ eq 'minor_allele'} @cols;
}
else {
$config->{have_maf_cols} = 0;
}
}
return $config->{have_maf_cols};
}
sub have_clin_sig {
my $config = shift;
return 0 if defined($config->{build_test});
if(!defined($config->{have_clin_sig})) {
if(defined($config->{vfa}) && defined($config->{vfa}->db)) {
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT COUNT(*) FROM variation
WHERE clinical_significance IS NOT NULL
});
$sth->execute;
my $count;
$sth->bind_columns(\$count);
$sth->fetch();
$sth->finish();
$config->{have_clin_sig} = $count;
}
else {
$config->{have_clin_sig} = 0;
}
}
return $config->{have_clin_sig};
}
sub get_clin_sig {
my $config = shift;
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT name, clinical_significance
FROM variation
WHERE clinical_significance IS NOT NULL
});
$sth->execute;
my ($v, $c, %cs);
$sth->bind_columns(\$v, \$c);
$cs{$v} = $c while $sth->fetch();
$sth->finish();
return \%cs;
}
sub have_pubmed {
my $config = shift;
return 0 if defined($config->{build_test});
if(!defined($config->{have_pubmed})) {
if(defined($config->{vfa}) && defined($config->{vfa}->db)) {
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT COUNT(*) FROM variation_citation
});
$sth->execute;
my $count;
$sth->bind_columns(\$count);
$sth->fetch();
$sth->finish();
$config->{have_pubmed} = $count;
}
else {
$config->{have_pubmed} = 0;
}
}
return $config->{have_pubmed};
}
sub get_pubmed {
my $config = shift;
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT v.name, GROUP_CONCAT(p.pmid)
FROM variation v, variation_citation c, publication p
WHERE v.variation_id = c.variation_id
AND c.publication_id = p.publication_id
AND p.pmid IS NOT NULL
GROUP BY v.variation_id
});
$sth->execute;
my ($v, $p, %pm);
$sth->bind_columns(\$v, \$p);
$pm{$v} = $p while $sth->fetch();
$sth->finish();
return \%pm;
}
sub merge_hashes {
my ($x, $y, $add) = @_;
foreach my $k (keys %$y) {
if (!defined($x->{$k})) {
$x->{$k} = $y->{$k};
} else {
if(ref($x->{$k}) eq 'ARRAY') {
$x->{$k} = merge_arrays($x->{$k}, $y->{$k});
}
elsif(ref($x->{$k}) eq 'HASH') {
$x->{$k} = merge_hashes($x->{$k}, $y->{$k}, $add);
}
else {
$x->{$k} = ($add && $x->{$k} =~ /^[0-9\.]+$/ && $y->{$k} =~ /^[0-9\.]+$/ ? $x->{$k} + $y->{$k} : $y->{$k});
}
}
}
return $x;
}
sub merge_arrays {
my ($x, $y) = @_;
my %tmp = map {$_ => 1} (@$x, @$y);
return [keys %tmp];
}
# DEBUG AND STATUS METHODS
##########################
# gets time
sub get_time() {
my @time = localtime(time());
# increment the month (Jan = 0)
$time[4]++;
# add leading zeroes as required
for my $i(0..4) {
$time[$i] = "0".$time[$i] if $time[$i] < 10;
}
# put the components together in a string
my $time =
($time[5] + 1900)."-".
$time[4]."-".
$time[3]." ".
$time[2].":".
$time[1].":".
$time[0];
return $time;
}
# prints debug output with time
sub debug {
my $text = (@_ ? (join "", @_) : "No message");
my $time = get_time;
print $time." - ".$text.($text =~ /\n$/ ? "" : "\n");
}
# finds out memory usage
sub memory {
my @mem;
open IN, "ps -o rss,vsz $$ |";
while(<IN>) {
next if $_ =~ /rss/i;
chomp;
@mem = split;
}
close IN;
return \@mem;
}
sub mem_diff {
my $config = shift;
my $mem = memory();
my @diffs;
if(defined($config->{memory})) {
for my $i(0..(scalar @{$config->{memory}} - 1)) {
push @diffs, $mem->[$i] - $config->{memory}->[$i];
}
}
else {
@diffs = @$mem;
}
$config->{memory} = $mem;
return \@diffs;
}
# update or initiate progress bar
sub progress {
my ($config, $i, $total) = @_;
return if defined($config->{quiet}) || defined($config->{no_progress});
$i = $total if $i > $total;
my $width = $config->{terminal_width} || 60;
my $percent = int(($i/$total) * 100);
my $numblobs = int((($i/$total) * $width) - 2);
# this ensures we're not writing to the terminal too much
return if(defined($config->{prev_prog})) && $numblobs.'-'.$percent eq $config->{prev_prog};
$config->{prev_prog} = $numblobs.'-'.$percent;
#printf("\r%s of %s", $i, $total);
printf("\r% -${width}s% 1s% 10s", '['.('=' x $numblobs).($numblobs == $width - 2 ? '=' : '>'), ']', "[ " . $percent . "% ]");
}
# end progress bar
sub end_progress {
my $config = shift;
return if defined($config->{quiet}) || defined($config->{no_progress});
progress($config, 1,1);
print "\n";
delete $config->{prev_prog};
}
1;
| dbolser-ebi/ensembl-variation | modules/Bio/EnsEMBL/Variation/Utils/VEP.pm | Perl | apache-2.0 | 191,590 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::TextSequence::Markup::Codons;
use strict;
use warnings;
use parent qw(EnsEMBL::Web::TextSequence::Markup);
sub markup {
my ($self, $sequence, $markup, $config) = @_;
my $i = 0;
my ($class, $seq);
foreach my $data (@$markup) {
$seq = $sequence->[$i]->legacy;
foreach (sort { $a <=> $b } keys %{$data->{'codons'}}) {
$class = $data->{'codons'}{$_}{'class'} || 'co';
$seq->[$_]{'class'} .= "$class ";
$seq->[$_]{'title'} .= ($seq->[$_]{'title'} ? "\n" : '') . $data->{'codons'}{$_}{'label'} if ($config->{'title_display'}||'off') ne 'off';
if ($class eq 'cu') {
$config->{'key'}{'other'}{'utr'} = 1;
} else {
$config->{'key'}{'codons'}{$class} = 1;
}
}
$i++;
}
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/TextSequence/Markup/Codons.pm | Perl | apache-2.0 | 1,507 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::ProteinFeature
=head1 SYNOPSIS
my $feature = Bio::EnsEMBL::ProteinFeature->new(
-start => $start,
-end => $end,
-hstart => $hit_start,
-hend => $hit_end,
-hseqname => $hit_name
);
=head1 DESCRIPTION
ProteinFeature objects represent domains or other features of interest
on a peptide sequence.
=head1 METHODS
=cut
package Bio::EnsEMBL::ProteinFeature;
use strict;
use Bio::EnsEMBL::FeaturePair;
use Bio::EnsEMBL::BaseAlignFeature;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use parent qw(Bio::EnsEMBL::BaseAlignFeature);
=head2 new
Arg [IDESC] : (optional) string An interpro description
Arg [INTERPRO_AC] : (optional) string An interpro accession
Arg [TRANSLATION_ID] : (optional) integer A translation dbID
Arg [...] : named arguments to FeaturePair superclass
Example :
$pf =
Bio::EnsEMBL::ProteinFeature->new( -IDESC => $idesc,
-INTERPRO_AC => $iac,
@fp_args );
Description: Instantiates a Bio::EnsEMBL::ProteinFeature
Returntype : Bio::EnsEMBL::FeaturePair
Exceptions : none
Caller : general
Status : Stable
=cut
sub new {
my ($proto, @args) = @_;
my $class = ref($proto) || $proto;
my $self;
my ($idesc, $ilabel, $interpro_ac, $translation_id, $external_data, $hit_description, $cigar_string, $align_type, $slice) = rearrange(['IDESC', 'ILABEL', 'INTERPRO_AC', 'TRANSLATION_ID', 'EXTERNAL_DATA', 'HDESCRIPTION', 'CIGAR_STRING', 'ALIGN_TYPE', 'SLICE'], @args);
# BaseAlignFeature expects cigar_line or features
if($cigar_string && $align_type){
$self = $class->SUPER::new(@args);
}else{
#call the grand parent directly
$self = $class->Bio::EnsEMBL::FeaturePair::new(@args);
}
# the strand of protein features is always 0
$self->{'strand'} = 0;
$self->{'idesc'} = $idesc || '';
$self->{'ilabel'} = $ilabel || '';
$self->{'interpro_ac'} = $interpro_ac || '';
$self->{'translation_id'} = $translation_id || '';
$self->{'external_data'} = $external_data || '';
$self->{'hit_description'} = $hit_description || '';
$self->{'cigar_string'} = $cigar_string || '';
$self->{'align_type'} = $align_type;
return $self;
}
=head2 strand
Arg [1] : Ignored
Description: Overwrites Bio::EnsEMBL::Feature->strand to not allow
: the strand to be set.
Returntype : int
Status : Stable
=cut
#do not allow the strand to be set
sub strand {
my $self = shift;
return $self->{'strand'};
}
=head2 idesc
Arg [1] : (optional) string The interpro description
Example : print $protein_feature->idesc();
Description: Getter/Setter for the interpro description of this protein
feature.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub idesc {
my $self = shift;
$self->{'idesc'} = shift if (@_);
return $self->{'idesc'};
}
=head2 ilabel
Arg [1] : (optional) string The interpro label
Example : print $protein_feature->ilabel();
Description: Getter/Setter for the interpro label of this protein
feature.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub ilabel {
my $self = shift;
$self->{'ilabel'} = shift if (@_);
return $self->{'ilabel'};
}
=head2 interpro_ac
Arg [1] : (optional) string The interpro accession
Example : print $protein_feature->interpro_ac();
Description: Getter/Setter for the interpro accession of this protein
feature.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub interpro_ac {
my $self = shift;
$self->{'interpro_ac'} = shift if (@_);
return $self->{'interpro_ac'};
}
=head2 translation_id
Arg [1] : (optional) integer The dbID of the translation
Example : print $protein_feature->translation_id();
Description: Getter/Setter for the translation dbID of this protein
feature.
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub translation_id {
my $self = shift;
$self->{'translation_id'} = shift if (@_);
return $self->{'translation_id'};
}
sub external_data {
my $self = shift;
$self->{'external_data'} = shift if (@_);
return $self->{'external_data'};
}
=head2 summary_as_hash
Example : $protein_feature_summary = $protein_feature->summary_as_hash();
Description : Retrieves a textual summary of this Protein feature.
Not inherited from Feature.
Returns : hashref of arrays of descriptive strings
Status : Intended for internal use
=cut
sub summary_as_hash {
my $self = shift;
my %summary;
$summary{'type'} = $self->analysis->db;
$summary{'id'} = $self->display_id;
$summary{'start'} = $self->start;
$summary{'end'} = $self->end;
$summary{'interpro'} = $self->interpro_ac;
$summary{'description'} = $self->idesc;
$summary{'hit_start'} = $self->hstart;
$summary{'hit_end'} = $self->hend;
$summary{'cigar_string'} = $self->cigar_string;
$summary{'align_type'} = $self->align_type;
$summary{'hseqname'} = $self->hseqname;
$summary{'translation_id'} = $self->translation_id;
return \%summary;
}
=head2 alignment_strings
Arg [1] : list of string $flags
Example : $pf->alignment_strings
Description: Allows to rebuild the alignment string of both the query and target sequence
using the sequence from translation object and
MD Z String for mismatching positions. Regex : [0-9]+(([A-Z]|\^[A-Z]+)[0-9]+)* (Refer: SAM/BAM specification)
eg: MD:Z:96^RHKTDSFVGLMGKRALNS0V14
Returntype : array reference containing 2 strings
the first corresponds to seq
the second corresponds to hseq
Exceptions : none
Caller : general
Status : Stable
=cut
sub alignment_strings {
my $self = shift;
#Translations
my $transl_adaptor = $self->adaptor->db->get_TranslationAdaptor();
my $transl_object = $transl_adaptor->fetch_by_dbID($self->translation_id);
my $seq;
if(defined $transl_object && $transl_object->isa('Bio::EnsEMBL::Translation')) {
$seq = $transl_object->transcript()->translate()->seq();
}
if ($self->align_type eq 'mdtag') {
if(defined $seq && defined $self->cigar_string){
return $self->_mdz_alignment_string($seq,$self->cigar_string);
}else{
warn "sequence or cigar_line not found for " . $self->translation_id;
}
} else {
throw("alignment_strings method not implemented for " . $self->align_type);
}
return;
}
sub transform {
my $self = shift;
$self->throw( "ProteinFeature cant be transformed directly as".
" they are not on EnsEMBL coord system" );
return;
}
=head2 _hit_unit
Arg [1] : none
Description: PRIVATE implementation of abstract superclass method. Returns
1 as the 'unit' used for the hit sequence.
Returntype : int
Exceptions : none
Caller : Bio::EnsEMBL::BaseAlignFeature
Status : Stable
=cut
sub _hit_unit {
return 3;
}
=head2 _query_unit
Arg [1] : none
Description: PRIVATE implementation of abstract superclass method. Returns
3 as the 'unit' used for the query sequence.
Returntype : int
Exceptions : none
Caller : Bio::EnsEMBL::BaseAlignFeature
Status : Stable
=cut
sub _query_unit {
return 3;
}
1;
| muffato/ensembl | modules/Bio/EnsEMBL/ProteinFeature.pm | Perl | apache-2.0 | 8,535 |
#! /usr/bin/env perl
# Copyright 2015-2016 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the OpenSSL license (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
use strict;
use warnings;
# Recognise VERBOSE and V which is common on other projects.
BEGIN {
$ENV{HARNESS_VERBOSE} = "yes" if $ENV{VERBOSE} || $ENV{V};
}
use File::Spec::Functions qw/catdir catfile curdir abs2rel rel2abs/;
use File::Basename;
use FindBin;
use lib "$FindBin::Bin/../util/perl";
use OpenSSL::Glob;
use Test::Harness qw/runtests $switches/;
my $srctop = $ENV{SRCTOP} || $ENV{TOP};
my $bldtop = $ENV{BLDTOP} || $ENV{TOP};
my $recipesdir = catdir($srctop, "test", "recipes");
my $testlib = catdir($srctop, "test", "testlib");
my $utillib = catdir($srctop, "util");
# It seems that $switches is getting interpreted with 'eval' or something
# like that, and that we need to take care of backslashes or they will
# disappear along the way.
$testlib =~ s|\\|\\\\|g if $^O eq "MSWin32";
$utillib =~ s|\\|\\\\|g if $^O eq "MSWin32";
# Test::Harness provides the variable $switches to give it
# switches to be used when it calls our recipes.
$switches = "-w \"-I$testlib\" \"-I$utillib\"";
my @tests = ( "alltests" );
if (@ARGV) {
@tests = @ARGV;
}
my $list_mode = scalar(grep /^list$/, @tests) != 0;
if (grep /^(alltests|list)$/, @tests) {
@tests = grep {
basename($_) =~ /^[0-9][0-9]-[^\.]*\.t$/
} glob(catfile($recipesdir,"*.t"));
} else {
my @t = ();
foreach (@tests) {
push @t, grep {
basename($_) =~ /^[0-9][0-9]-[^\.]*\.t$/
} glob(catfile($recipesdir,"*-$_.t"));
}
@tests = @t;
}
if ($list_mode) {
@tests = map { $_ = basename($_); $_ =~ s/^[0-9][0-9]-//; $_ =~ s/\.t$//;
$_ } @tests;
print join("\n", @tests), "\n";
} else {
@tests = map { abs2rel($_, rel2abs(curdir())); } @tests;
runtests(sort @tests);
}
| openweave/openweave-core | third_party/openssl/openssl/test/run_tests.pl | Perl | apache-2.0 | 2,047 |
#*******************************************************************************
# Copyright (c) 2006-2010 eBay Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#*******************************************************************************
my $file = shift @ARGV;
open(FILE, $file) or die "Can't open file: $file\n";
my $skip = 0;
while (<FILE>) {
if (/\@XmlType.*propOrder = {/) {
$skip = 1;
next;
}
if (/\@XmlElements.*{/) {
$skip = 1;
next;
}
if ($skip) {
if (/}\)/) {
$skip = 0;
}
next;
}
next if ($. <= 6);
next if /\* <p>The following/;
next if /<pre>/;
next if /<\/pre>/;
next if /import javax.xml.bind/;
next if /\*\s+</;
next if /^\s*@/;
print;
}
| vthangathurai/SOA-Runtime | soa-client/src/main/schemas/dejaxb.pl | Perl | apache-2.0 | 926 |
#!/usr/bin/env perl
=head1 NAME
list_input_ids.pl - generate region restriction info for rulemanager.pl
=head1 SYNOPSIS
list_input_ids.pl -dataset human [ -purpose <label> ] -set chr14-04,chr15-06
list_input_ids.pl -dataset human [ -purpose <label> ] chr14-04 chr15-06
=head1 DESCRIPTION
This script lists the input_ids of the seq_region name provided.
Multiple seq_regions may be listed with commas or separate words, or
by calling the script for each one piped through C<sort -u>.
=head2 Duplicates
Contigs are listed at most once per input seq_region.
=head1 OPTIONS
-dataset the species or dataset to connect to
-set|name the seq_region name(s) you want to prime
-cs the coordinate system associated with the seq_region name (default: chromosome)
-cs_version the version of the coord system you want (default: Otter)
-target_cs the target coordinate system you want slices in (default: contig)
-target_cs_version the version of the target coord system you want (optional)
-purpose Write input_ids to the input_id_purpose table.
If used, you must supply the purpose field.
Switches taking no argument,
-add-target-cs include in the output an extra column showing the target_cs
-verbose (unused so far)
-help Displays script documentation with PERLDOC
=head1 CONTACT
Michael Gray B<email> mg13@sanger.ac.uk
=cut
use strict;
use warnings;
use Bio::Otter::Lace::Defaults;
use Bio::EnsEMBL::Pipeline::Analysis;
use Bio::EnsEMBL::Pipeline::DBSQL::Finished::DBAdaptor;
use Bio::EnsEMBL::Pipeline::Utils::InputIDFactory;
use Bio::EnsEMBL::Pipeline::DBSQL::StateInfoContainer;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
sub main {
my $dataset_name;
my $cs = 'chromosome';
my $cs_version = 'Otter';
my $target_cs = 'contig';
my $target_cs_version;
my $add_target_cs;
my $seqreg_name;
my $verbose;
my $purpose;
my $help = 0;
Bio::Otter::Lace::Defaults::do_getopt(
'dataset=s' => \$dataset_name,
'set|name:s' => \$seqreg_name,
'cs:s' => \$cs,
'cs_version:s' => \$cs_version,
'target_cs:s' => \$target_cs,
'target_cs_version:s' => \$target_cs_version,
'purpose=s' => \$purpose,
'add-target-cs!' => \$add_target_cs,
'verbose!' => \$verbose,
'h|help' => \$help
);
if ($help) {
exec( 'perldoc', $0 );
}
if ( !$dataset_name ) {
throw("You must specify a dataset name (-dataset option");
}
my @seqregion = @ARGV;
push @seqregion, split ',', $seqreg_name if defined $seqreg_name;
if (!@seqregion) {
throw("You must specify a seq_region name (-set option or trailing arguments)");
}
# Client communicates with otter HTTP server
my $cl = Bio::Otter::Lace::Defaults::make_Client();
# DataSet interacts directly with an otter database
my $ds = $cl->get_DataSet_by_name($dataset_name);
my $pipe_dba = $ds->get_pipeline_DBAdaptor(1);
my $exit = 0; # set bits on fail, per slice
foreach my $seqreg_name (@seqregion) {
$exit |= do_slice
($dataset_name, $pipe_dba,
$cs, $cs_version, $seqreg_name,
$purpose,
$add_target_cs, $target_cs, $target_cs_version);
}
return $exit;
}
sub do_slice {
my ($dataset_name, $pipe_dba,
$cs, $cs_version, $seqreg_name,
$purpose,
$add_target_cs, $target_cs, $target_cs_version) = @_;
my $slice_a = $pipe_dba->get_SliceAdaptor;
# This table is experimental, but proved itself useful to me.
# CREATE TABLE `input_id_purpose` (
# `iip_id` int(10) unsigned NOT NULL AUTO_INCREMENT COMMENT 'useful for taking range slices of some work',
# `input_id` varchar(100) NOT NULL,
# `purpose` varchar(20) NOT NULL COMMENT 'some programmer-given label',
# PRIMARY KEY (`iip_id`),
# KEY `purp_iid` (`purpose`,`input_id`)
# ) COMMENT='bring input_id list in, to ease mauling of jobs by SQL'
my $purph = $pipe_dba->prepare
(q{INSERT INTO input_id_purpose (input_id, purpose) VALUES (?,?)});
my $slice = $slice_a->fetch_by_region( $cs, $seqreg_name, undef, undef, undef, $cs_version );
if ( !$slice ) {
warn "No seq_region [$seqreg_name] found in dataset [$dataset_name] ".
"for coord_system [$cs] and cs_version [$cs_version] - skipping\n";
return 4;
}
my %seen; # key = slice name
my $target_projection = $slice->project($target_cs);
foreach my $ct (@$target_projection) {
my $target_slice = $ct->to_Slice();
my $target =
$slice_a->fetch_by_region( $target_cs,
$target_slice->seq_region_name,
undef, undef, undef, $target_cs_version );
# once per chromosome
next if $seen{ $target->name } ++;
if (defined $purpose) {
$purph->execute($target->name, $purpose);
}
if ($add_target_cs) {
print STDOUT $target->name(), "\t", uc $target_cs, "\n";
} else {
print STDOUT $target->name(), "\n";
}
}
return 0;
}
exit main();
| Ensembl/ensembl-pipeline | scripts/Finished/list_input_ids.pl | Perl | apache-2.0 | 5,484 |
#!/usr/bin/env perl
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <ensembl-dev@ebi.ac.uk>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=head1 NAME
ensembl-efg convert_htilist_to_features.pl
=head1 SYNOPSIS
convert_hitlist_to_features.pl [options]
Options:
Mandatory
Optional
=head1 OPTIONS
=over 8
=item B<-name|n>
Mandatory: Instance name for the data set, this is the directory where the native data files are located
=item B<-format|f>
Mandatory: The format of the data files e.g. nimblegen
=over 8
=item B<-group|g>
Mandatory: The name of the experimental group
=over 8
=item B<-data_root>
The root data dir containing native data and pipeline data, default = $ENV{'EFG_DATA'}
=over 8
=item B<-fasta>
Flag to turn on dumping of all probe_features in fasta format for the remapping pipeline
=item B<-norm>
Normalisation method, deafult is the Bioconductor vsn package which performs generalised log ratio transformations
=item B<-species|s>
Species name for the array.
=item B<-debug>
Turns on and defines the verbosity of debugging output, 1-3, default = 0 = off
=over 8
=item B<-help>
Print a brief help message and exits.
=item B<-man>
Prints the manual page and exits.
=back
=head1 DESCRIPTION
B<This program> takes a input redundant probe name fasta file and generates an NR probe dbID fasta file.
=cut
#add @INC stuff here, or leave to .bashrc/.efg?
BEGIN{
if (! defined $ENV{'EFG_DATA'}) {
if (-f "~/src/ensembl-functgenomics/scripts/.efg") {
system (". ~/src/ensembl-functgenomics/scripts/.efg");
} else {
die ("This script requires the .efg file available from ensembl-functgenomics\n".
"Please source it before running this script\n");
}
}
}
#use Bio::EnsEMBL::Root; #Only used for rearrange see pdocs
#Roll own Root object to handle debug levels, logging, dumps etc.
use strict;
### MODULES ###
use Getopt::Long;
#use Carp;#For dev only? cluck not exported by default Remove this and implement in Helper
use Pod::Usage;
#POSIX? File stuff
use File::Path;
use Data::Dumper;
#use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Utils::Exception qw( throw warning );
use Bio::EnsEMBL::Funcgen::Utils::EFGUtils qw (open_file run_system_cmd backup_file);
use Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Funcgen::FeatureType;
use Bio::EnsEMBL::Funcgen::FeatureSet;
use Bio::EnsEMBL::Funcgen::DataSet;
use Bio::EnsEMBL::Funcgen::PredictedFeature;
use Bio::EnsEMBL::Funcgen::Utils::Encode qw(get_encode_regions);
use Bio::EnsEMBL::Analysis;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use strict;
$| = 1; #autoflush
my ($pass, $dbname, $help, $man, $ftname, $file, $species, @focus_set_names, @union_set_names);
my ($clobber, $write_features, $dump_features, $data_version, $host, $multiplex, $chr_name);
my (%focus_sets, %focus_names, %union_sets, %union_names, $slice_name, $no_focus, @slices);
#my $reg = "Bio::EnsEMBL::Registry";
my $out_dir ='.';
#my $data_dir = $ENV{'EFG_DATA'};
my $user = "ensadmin";
#my $host = 'localhost';
my $port = '3306';
#Definitely need some sort of Defs modules for each array?
$main::_debug_level = 0;
$main::_tee = 0;
#Use some sort of DBDefs for now, but need to integrate with Register, and have put SQL into (E)FGAdaptor?
#Use ArrayDefs.pm module for some of these, class, vendor, format?
#ArrayDefs would also contain paths to data and vendor specific parse methods?
GetOptions (
"pass|p=s" => \$pass,
"port=s" => \$port,
"host|h=s" => \$host,
"user|u=s" => \$user,
"dbname|d=s" => \$dbname,
"species=s" => \$species,
"help|?" => \$help,
"man|m" => \$man,
"focus_sets=s" => \@focus_set_names,
"no_focus" => \$no_focus,
"union_sets=s" => \@union_set_names,
"write_features" => \$write_features,
"dump_features" => \$dump_features,
"clobber" => \$clobber,
"data_version=s" => \$data_version,
"out_dir|o=s" => \$out_dir,
"multiplex" => \$multiplex,
"slice_name=s" => \$slice_name,
"chr_name=s" => \$chr_name,
);
@union_set_names = split/,/, join(',', @union_set_names);
@focus_set_names = split/,/, join(',', @focus_set_names);
pod2usage(1) if $help;
pod2usage(-exitstatus => 0, -verbose => 2) if $man;
if (! ($pass && $host && $dbname && $data_version && $species)) {
throw("Some mandatory parameters are not set, you must specify:\n".
"-pass\t-port\t-host\t-dbname\t-data_version\t-species");
}
run_system_cmd("mkdir -p $out_dir") if(! -d $out_dir);
if (! $no_focus) {
throw('You must specificy some focus feature sets to build on') if(! @focus_set_names);
} elsif (@focus_set_names) {
throw('You cannot specify -no_focus and -focus_sets');
}
print 'No union sets specified, using all feature set on $data_version' if (! @union_set_names);
if (! ($write_features || $dump_features)) {
print "No output type specified turning on dump_features\n";
$dump_features = 1;
}
my $cdb = Bio::EnsEMBL::DBSQL::DBAdaptor->new(
#-host => 'ensembldb.ensembl.org',
-host => 'ens-livemirror',
-user => 'ensro',
-dbname => $species."_core_".$data_version,
-species => $species,
);
my $db = Bio::EnsEMBL::Funcgen::DBSQL::DBAdaptor->new(
-dbname => $dbname,
-port => $port,
-pass => $pass,
-host => $host,
-user => $user,
-dnadb => $cdb,
);
my $fset_a = $db->get_FeatureSetAdaptor();
my $dset_a = $db->get_DataSetAdaptor();
my $anal_a = $db->get_AnalysisAdaptor();
my $ft_adaptor = $db->get_FeatureTypeAdaptor();
my $slice_a = $db->get_SliceAdaptor();
my $pfa = $db->get_PredictedFeatureAdaptor();
my $anal = Bio::EnsEMBL::Analysis->new(
-logic_name => 'Co-occurrence',
-db => 'NULL',
-db_version => 'NULL',
-db_file => 'NULL',
-program => 'NULL',
-program_version => 'NULL',
-program_file => 'NULL',
-gff_source => 'NULL',
-gff_feature => 'NULL',
-module => 'NULL',
-module_version => 'NULL',
-parameters => 'NULL',
-created => 'NULL',
-description => 'Co-occurrence of FeatureTypes',
-display_label => 'Co-occurrence',
-displayable => 1,
);
$anal_a->store($anal);
print "Running in multiplex mode\n" if $multiplex;
if (! $no_focus) {
foreach my $name (@focus_set_names) {
if (! ($focus_sets{$name} = $fset_a->fetch_by_name($name))) {
throw("One of your sepcified focus FeatureSets does not exist:\t$name");
} else {
$focus_names{$focus_sets{$name}->dbID()} = $name;
}
}
} else {
print "No focus, using all available feature sets\n";
foreach my $fset (@{$fset_a->fetch_all()}) {
push @focus_set_names, $fset->name();
$focus_sets{$fset->name()} = $fset;
$focus_names{$fset->dbID()} = $fset->name();
}
}
print "Focus FeatureSets are:\t".join("\t", sort values %focus_names)."\n";
if ($no_focus) {
print "Setting union sets to focus sets";
%union_sets = %focus_sets;
%union_names = %focus_names;
@union_set_names = @focus_set_names;
} else {
if (! @union_set_names) {
print "No union FeatureSets specified, using all FeatureSets\n";
foreach my $fset (@{$fset_a->fetch_all()}) {
#next if(grep($fset->name(), @focus_set_names));
push @union_set_names, $fset->name();
$union_sets{$fset->name()} = $fset;
$union_names{$fset->dbID()} = $fset->name();
}
} else {
foreach my $name (@union_set_names) {
if (! ($union_sets{$name} = $fset_a->fetch_by_name($name))){
throw("One of your sepcified union FeatureSets does not exist:\t$name");
} else {
$union_names{$union_sets{$name}->dbID()} = $name;
}
}
#print "now add focus sets to union sets\n";
foreach my $name (@focus_set_names) {
if (! exists $union_sets{$name}){
$union_names{$focus_sets{$name}->dbID()} = $name;
$union_sets{$name} = $focus_sets{$name};
}
}
}
}
print "Union FeatureSets are:\t".join("\t", sort values %union_names)."\n";
throw("Cannot specifiy a slice name and a chr name:\t$slice_name\t$chr_name") if($slice_name && $chr_name);
if ($slice_name) {
if ($slice_name eq 'ENCODE') {
my $encode_regions = &get_encode_regions($cdb);
my @encode_region_names = sort keys %{$encode_regions};
map {push @slices, $slice_a->fetch_by_name($encode_regions->{$_});} @encode_region_names;
#push @slices, $slice_a->fetch_by_name($encode_regions->{ENr333});
#print scalar(@slices), "\n";
} else {
@slices = $slice_a->fetch_by_name($slice_name);
}
if (! @slices) {
throw("-slice name did not retrieve a valid slice:\t$slice_name\n");
}
} elsif ($chr_name) {
@slices = $slice_a->fetch_by_region('chromosome', $chr_name);
if (! @slices) {
throw("-chr_name did not retrieve a valid slice:\t$chr_name\n");
}
} else {
@slices = @{$slice_a->fetch_all('toplevel')};
}
print "Building co-occurrence features on slices:\n".join("\t", map $_->seq_region_name(), @slices)."\n";
my (%starts, %ends, %current_unions, %union_fsets, %union_ftypes, %file_handles, %union_cnts);
my (@start_ids, @names);
#my $current_pos = 1;
foreach my $slice (@slices) {
my $transfer=0;
my $transfer_slice;
# get slice union features need to be transfered onto
if( $slice->start != 1 || $slice->strand != 1) {
$transfer=1;
$transfer_slice = $slice_a->fetch_by_region
(
$slice->coord_system->name(),
$slice->seq_region_name(),
undef, #start
undef, #end
undef, #strand
$slice->coord_system->version()
);
#print Dumper $transfer_slice;
}
foreach my $feature (@{$pfa->fetch_all_by_Slice($slice)}) {
#skip non union sets
next if(! exists $union_sets{$feature->feature_set->name()});
my ($first_end) = sort{$b <=> $a} values %ends;
print join("\t",$feature->start(), $feature->end(),$feature->feature_set->dbID(),
$feature->display_label()), "\n";
if ((defined $first_end) && ($feature->start() > $first_end)) {
#build co-occurence feature for feature with first_end and
#flush start end values appropriately
print "***first_end ($first_end) < feature_start (".$feature->start().")\n";
#warn "first end is $first_end and next start is ".$feature->start()." ".$feature->feature_set->name();
foreach my $end_id (keys %ends) {
print "\tend_id:\t$end_id\t$starts{$end_id}\t$ends{$end_id}\t$union_names{$end_id}\n";
if ($ends{$end_id} == $first_end) { #find currently ending/ed features
#print "\tend{$end_id} $ends{$end_id} == first_end $first_end\n";
#now compare other start end vals of focus sets or all
#if end_id is an focus set or no_focus sets specified
if(scalar (keys %ends) > 1) {
#got over lap
#warn "got overlap with ended feature ".$union_names{$end_id}." $first_end";
@start_ids = sort { $starts{$a} <=> $starts{$b} } keys %starts;
#print "\tstart_ids by start: @start_ids \n";
if (exists $focus_names{$end_id}) {
print "\t*Focus feature $focus_names{$end_id}\n";
print "\tend_id:\t\t$end_id\t$starts{$end_id}\t$ends{$end_id}\t$union_names{$end_id}\n";
#compare vs all union sets next if end_id is same
#we need to check whether we're duplicating last feature for a given co-occurence set
#will this not be solved by cleaning the start ends as we go move along the features?
#features should be named by sorting feature type names of co-occuring features
#need to build current co-occur feature hash to stop duplicates being formed
#we need to build these in order of starts to get multiplex co-occurences
@names = ($focus_names{$end_id});
foreach my $start_id (@start_ids) {
next if $start_id == $end_id;
next if $starts{$end_id} > $ends{$start_id};
print "\t start_id:\t$start_id\t$starts{$start_id}\t$ends{$start_id}\t$union_names{$start_id}\n";
if ($multiplex) {
push @names, $union_names{$start_id};
} else {
@names = ($focus_names{$end_id}, $union_names{$start_id});
}
print "\tFound union of @names\n";
@names = sort @names;
#shift names to move thro' all possible co-occurences
while (scalar(@names) > 1) {
my $union_set_name = join(':', @names);
my ($start,$end) = (sort {$a<=>$b} ($starts{$start_id},
$ends{$start_id},
$starts{$end_id},
$ends{$end_id}))[1,2];
my $union_feature = Bio::EnsEMBL::Funcgen::PredictedFeature->new
(
-slice => $slice,
-start => $start,
-end => $end,
-strand => 0,
-feature_set => &get_union_FeatureSet($union_set_name),
);
if ($transfer) {
#warn("original uf:\t", join("\t", $union_feature->start, $union_feature->end), "\n");
$union_feature = $union_feature->transfer($transfer_slice);
#warn("transfered uf:\t", join("\t", $union_feature->start, $union_feature->end), "\n");
}
shift @names;
if (! exists $current_unions{$union_set_name}) {
$current_unions{$union_set_name} = $union_feature;
} else {
#this should happen as we're deleting the A start end values after AB,
#before we have a chance to build BA
warn "Found duplicate union sets for $union_set_name at:\t".$slice->name;
}
}
}
} else {
print "\t*Union feature $union_names{$end_id}\n";
print "\tend_id:\t\t$end_id\t$starts{$end_id}\t$ends{$end_id}\t$union_names{$end_id}\n";
#just compare vs focus_sets
#no need to next as we know it's not present in focus_names
@names = ($union_names{$end_id});
#warn "Non focus feature @names";
foreach my $start_id (@start_ids) {
#next if $start_id == $end_id;
if ($focus_names{$start_id} && $ends{$start_id} > $starts{$end_id}) {
print "\t start_id:\t$start_id\t$starts{$start_id}\t$ends{$start_id}\t$union_names{$start_id}\n";
#print "focus_name $focus_names{$start_id} exists\n";
if ($multiplex) {
push @names, $focus_names{$start_id};
} else {
@names = ($union_names{$end_id}, $focus_names{$start_id});
}
@names = sort @names;
print "\tFound union of @names\n";
#warn "Found union of @names";
#exit;
#shift names to move thro' all possible co-occurences
while (scalar(@names) > 1) {
my $union_set_name = join(':', @names);
#print $union_set_name, "\n";
my ($start,$end) = (sort {$a<=>$b} ($starts{$start_id},
$ends{$start_id},
$starts{$end_id},
$ends{$end_id}))[1,2];
my $union_feature = Bio::EnsEMBL::Funcgen::PredictedFeature->new
(
-slice => $slice,
-start => $start,
-end => $end,
-strand => 0,
-feature_set => &get_union_FeatureSet(\@names),
);
if ($transfer) {
#warn("original uf:\t", join("\t", $union_feature->start, $union_feature->end), "\n");
$union_feature = $union_feature->transfer($transfer_slice);
#warn("transfered uf:\t", join("\t", $union_feature->start, $union_feature->end), "\n");
}
shift @names;
if (! exists $current_unions{$union_set_name}) {
$current_unions{$union_set_name} = $union_feature;
} else {
#this should happen as we're deleting the A start end values after AB,
#before we have a chance to build BA
warn "Found duplicate union sets for $union_set_name at:\t".$slice->name;
}
}
}
}
}
#end of if exists focus_names
}
#remove feature start ends for expired feature
delete $starts{$end_id};
delete $ends{$end_id};
}
}
#print and load here as required
#we could cache here by pusing the hash
foreach my $uset_name (keys %current_unions) {
$union_cnts{$uset_name} ||= 0;
$union_cnts{$uset_name} ++;
$pfa->store($current_unions{$uset_name}) if $write_features;
if ($dump_features) {
#warn "dumping $uset_name features";
#we should test for file and clobber here too
$file_handles{$uset_name} ||= open_file($out_dir."/${uset_name}.hitlist", '>');
my $fh = $file_handles{$uset_name};
print $fh 'chr'.$current_unions{$uset_name}->slice->seq_region_name()."\t".
$current_unions{$uset_name}->start()."\t".$current_unions{$uset_name}->end()."\t\n";
print "chr".$current_unions{$uset_name}->slice->seq_region_name()."\t".
$current_unions{$uset_name}->start()."\t".$current_unions{$uset_name}->end()."\t$uset_name\n";
#score field empty
}
}
%current_unions = ();
}
$starts{$feature->feature_set->dbID()} = $feature->start();
$ends{$feature->feature_set->dbID()} = $feature->end();
}
}
foreach my $uset_name(keys %union_cnts){
print 'Found '.$union_cnts{$uset_name}." union features of type $uset_name\n";
}
foreach my $fh (values %file_handles) {
close($fh);
}
sub get_union_FeatureSet{
my $set_name = shift;
if (! exists $union_fsets{$set_name}) {
$union_fsets{$set_name} = $fset_a->fetch_by_name($set_name);
if ($union_fsets{$set_name}) {
if ($clobber && $write_features) {
my $cs_id = $db->get_FGCoordSystemAdaptor->fetch_by_name('chromosome')->dbID();
my $sql = 'DELETE from predicted_feature where feature_set_id='.
$union_fsets{$set_name}->dbID().' and coord_system_id='.$cs_id;
$db->dbc->do($sql)
or throw('Failed to roll back predicted_features for feature_set_id'.
$union_fsets{$set_name}->dbID());
}
elsif ($write_features) {
throw("Their is a pre-existing FeatureSet with the name '$set_name'\n".
'You must specify clobber is you want to delete and overwrite all'.
' pre-existing PredictedFeatures');
}
} else { #generate new fset
#get ftype first
if (! exists $union_ftypes{$set_name}) {
$union_ftypes{$set_name} = $ft_adaptor->fetch_by_name($set_name);
if (! $union_ftypes{$set_name}) {
$union_ftypes{$set_name} = Bio::EnsEMBL::Funcgen::FeatureType->new
(
-name => $set_name,
-description => "Co-occurence of features $set_name",
);
($union_ftypes{$set_name}) = @{$ft_adaptor->store($union_ftypes{$set_name})}
if $write_features;
}
}
$union_fsets{$set_name} = Bio::EnsEMBL::Funcgen::FeatureSet->new
(
-analysis => $anal,
-feature_type => $union_ftypes{$set_name},
-name => $set_name,
-feature_class=> 'annotated',
);
if ($write_features) {
($union_fsets{$set_name}) = @{$fset_a->store($union_fsets{$set_name})};
#generate data_set here too
my $dset = Bio::EnsEMBL::Funcgen::DataSet->new
(
-feature_set => $union_fsets{$set_name},
-name => $set_name,
);
$dset_a->store($dset);
}
}
}
return $union_fsets{$set_name};
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-functgenomics/scripts/regulatory_build/build_union_features.pl | Perl | apache-2.0 | 26,119 |
#!/usr/bin/perl
#
# Public domain
#
# Search $UMANPATH for a user-installed manual page and write it to
# the standard output (via groff).
#
# Links can also be specified (uman will search BSDBuild-generated
# .manlinks.mk files for them).
#
if (!defined($ENV{'UMANPATH'})) {
print "The \$UMANPATH environment variable is not set; see uman(1).\n";
exit(1);
}
# Search these directories for manual page source.
@SRC_DIRS = split(':', $ENV{'UMANPATH'});
# Formatting engine
#$NROFF = 'nroff -Tascii -mandoc';
$NROFF = 'groff -S -P-h -Wall -mtty-char -man -Tascii -P-c -mandoc';
if (@ARGV < 1) { die "Usage: uman [manpage]"; }
my $query = $ARGV[0];
my $pager = 'less';
if (exists($ENV{'PAGER'})) { $pager = $ENV{'PAGER'}; }
sub ReadPage ($)
{
my $page = shift;
system("cat $page |$NROFF $page |$pager");
}
# Search a BSDBuild-generated .manlinks.mk file.
sub SearchManlinksMK ($$)
{
my $path = shift;
my $q = shift;
unless (open(ML, $path)) {
print STDERR "$path: $!; ignoring\n";
return;
}
foreach $_ (<ML>) {
if (/^MANLINKS\+=([\w\-]+)\.(\d):([\w\-]+)\.(\d)$/) {
my $from = $1.'.'.$2;
if (lc($3) eq $q) {
return ($from);
}
}
}
close(ML);
return (undef);
}
# Recursively search a directory.
sub SearchDir ($$$$)
{
my $dir = shift;
my $q = lc(shift);
my $depth = shift;
my $maxDepth = shift;
unless (opendir(DIR, $dir)) {
return;
}
foreach my $ent (readdir(DIR)) {
my $path = $dir.'/'.$ent;
if ($ent eq '.manlinks.mk') {
if ((my $rv = SearchManlinksMK($path, $q))) {
ReadPage($dir.'/'.$rv);
exit(0);
}
next;
}
if ($ent =~ /^\./) { next; }
if ($ent =~ /^([\w\-\.]+)\.(\d)$/) {
if ($q eq lc($1) ||
$q eq lc($1.'.'.$2)) {
ReadPage($path);
exit(0);
}
}
if (-d $path && $depth+1 <= $maxDepth) {
SearchDir($path, $q, $depth+1, $maxDepth);
}
}
closedir(DIR);
}
foreach my $dir (@SRC_DIRS) {
SearchDir($dir, $query, 1, 10);
}
| stqism/ToxBuild | uman.pl | Perl | bsd-2-clause | 1,929 |
#!/usr/bin/perl
# Copyright (c) 2006-2010 by cisco Systems, Inc.
# All rights reserved.
sub Usage
{
print STDERR "cm_rpcgen.pl [-in <in-file>] [-common <file>] [-server <file>] [-client <file>] [-marker <marker>] [-module <name>] \n";
}
my $do_common = 0;
my $do_server = 0;
my $do_client = 0;
my $module_name = "cm";
my $in_file = "";
my $common_file = "";
my $server_file = "";
my $client_file = "";
my @markers;
$markers[0] = "RPC";
while ($#ARGV >= 0) {
$arg = shift @ARGV;
if ($arg eq "-common") {
$do_common = 1;
$common_file = shift @ARGV;
} elsif ($arg eq "-in") {
$in_file = shift @ARGV;
} elsif ($arg eq "-server") {
$do_server = 1;
$server_file = shift @ARGV;
} elsif ($arg eq "-client") {
$do_client = 1;
$client_file = shift @ARGV;
} elsif ($arg eq "-marker") {
$markers[$#markers + 1] = shift @ARGV;
} elsif ($arg eq "-module") {
if ($#ARGV >= 0) {
$module_name = shift @ARGV;
} else {
&Usage;
}
} else {
&Usage;
die "Bad option $arg\n";
}
}
open IN,'<',$in_file || die;
if ($do_common) {
open(COMMON,'>',$common_file) || die "cannot open $common_file\n";
}
if ($do_client) {
open(CLIENT, '>',$client_file) || die;
}
if ($do_server) {
open(SERVER, '>',$server_file) || die;
}
# strip comments
$/ = undef;
$_ = <IN>;
s#/\*[^*]*\*+([^/*][^*]*\*+)*/|([^/"']*("[^"\\]*(\\[\d\D][^"\\]*)*"[^/"']*|'[^'\\]*(\\[\d\D][^'\\]*)*'[^/"']*|/+[^*/][^/"']*)*)#$2#g;
# strip preprocessor lines
s/^#.*//mg;
#
# Parse all RPCs into the @rpc array.
my $remaining = $_;
my $num_rpcs = 0;
my @rpcs;
my $marker_re = '(?:';
my $marker_num;
for ($marker_num=0;
$marker_num <= $#markers;
$marker_num++) {
if ($marker_num > 0) {
$marker_re .= '|';
}
$marker_re .= $markers[$marker_num];
}
$marker_re .= ')';
print $marker_re."\n";
while ($remaining =~ /((?:$marker_re)[ \t\n][^;]*;)(.*)/s) {
# split off current function prototype from those remaining in file
# "current" holds the current prototype being worked on
# "remaining" holds the remaining prototypes not yet processed
$remaining = $2;
$current = $1;
# search the current prototype for an OUTARRAY() macro and expand it
# to its arguments, if found
#
# NOTE: this only handles a single OUTARRAY macro per function prototype
$_ = $1;
if (/OUTARRAY[\(](([^\)]*))/) {
$fields = $1;
@split_fields = split /[ \n\t]*,[ \n\t]*/s,$fields;
if ($#split_fields ne 1) {
die "Bad macro input";
}
$array_args = "RPC_OUTARRAY_PTR<". $split_fields[1]. "> ". $split_fields[0]. " *_array, RPC_OUTARRAY_COUNT uint32_t _count, RPC_OUTARRAY_USED uint32_t *_used";
s/OUTARRAY[\(](([^\)]*\)))/$array_args/g;
$current = $_;
}
# search the current prototype for an INARRAY() macro and expand it
# to its arguments, if found
#
# NOTE: this only handles a single INARRAY macro per function prototype
if (/INARRAY[\(](([^\)]*))/) {
$fields = $1;
@split_fields = split /[ \n\t]*,[ \n\t]*/s,$fields;
if ($#split_fields ne 1) {
die "Bad macro input";
}
$array_args = "RPC_INARRAY_PTR<". $split_fields[1]. "> ". $split_fields[0]. " *__array, RPC_INARRAY_COUNT uint32_t __count";
s/INARRAY[\(](([^\)]*\)))/$array_args/g;
$current = $_;
}
# decompose the expanded function prototype into its parts and process...
($pre_tokens, $arg_list) = split /[\(\)]/s,$current;
@split_pretokens = split /[ \n\t][ \n\t]*/s,$pre_tokens;
shift @split_pretokens;
$function_name = $split_pretokens[$#split_pretokens];
$return_type = $split_pretokens[0];
for ($i=1; $i<$#split_pretokens; $i++) {
$return_type .= " $split_pretokens[$i]";
}
@split_args = split /[ \n\t]*,[ \n\t]*/s, $arg_list;
if ($#split_args == 0 && ($split_args[0] =~ /[ \t\n]*void[ \t\n]*/s)) {
$#split_args = -1;
}
$args = [];
$arg_dir = 0;
for ($i=0; $i <= $#split_args; $i++) {
$split_args[$i] =~ s/\*/ \* /g;
@aw = split /[ \n\t][ \n\t]*/s,"x ".$split_args[$i];
shift @aw;
$arg_bound = 0;
$arg_name = $aw[$#aw];
if ($aw[0] =~ /(RPC_OUTARRAY_PTR<.*>)/s) {
$arg_dir = $arg_dir | 0x2;
$arg_class = shift(@aw);
$arg_bound = $arg_class;
$arg_bound =~ s/.*<//;
$arg_bound =~ s/>.*//;
$arg_class =~ s/<.*>//;
} elsif ($aw[0] =~ /(RPC_INARRAY_PTR<.*>)/s) {
$arg_dir = $arg_dir | 0x1;
$arg_class = shift(@aw);
$arg_bound = $arg_class;
$arg_bound =~ s/.*<//;
$arg_bound =~ s/>.*//;
$arg_class =~ s/<.*>//;
} elsif ($aw[0] =~ /(INV|INR|INR_OPT|RPC_INARRAY_COUNT|RPC_INARRAY_USED)/s) {
$arg_dir = $arg_dir | 0x1;
$arg_class = shift(@aw);
} elsif ($aw[0] =~ /(OUT|OUT_OPT|RPC_OUTARRAY_COUNT|RPC_OUTARRAY_USED)/s) {
$arg_dir = $arg_dir | 0x2;
$arg_class = shift(@aw);
} elsif ($aw[0] =~ /(INOUT)/s) {
$arg_dir = $arg_dir | 0x3;
$arg_class = shift(@aw);
} else {
$arg_class = "NONE";
}
$arg_type = $aw[0];
while ($#aw > 1) {
shift @aw;
$arg_type .= $aw[0];
}
$args->[$i] = { CLASS => $arg_class, TYPE => $arg_type, NAME => $arg_name, BOUND => $arg_bound };
}
$rpcs[$num_rpcs] = { RET => $return_type, NAME => $function_name, , DIR => $arg_dir, ARGS => $args, LASTARG => $#split_args};
$num_rpcs++;
}
# Dump out the enum for the different RPCs
sub mk_rpc_id
{
my $fname = shift;
return "__" . uc($module_name) . "_RPC_ID_" . uc($fname);
}
sub mk_rpc_enum_name
{
return "__" . $module_name . "_rpc_id_";
}
sub mk_req_struct_name
{
my $fname = shift;
return "__" . $module_name . "_" . $fname . "_req_";
}
sub mk_rsp_struct_name
{
my $fname = shift;
return "__" . $module_name . "_" . $fname . "_rsp_";
}
if ($do_common) {
print COMMON "/* THIS IS GENERATED CODE. DO NOT EDIT. */\n\n";
print COMMON "#define VQEC_DEV_IPC_BUF_LEN 4096\n";
print COMMON "#define VQEC_DEV_IPC_BUF_REQ_OFFSET 0\n";
print COMMON "#define VQEC_DEV_IPC_BUF_RSP_OFFSET VQEC_DEV_IPC_BUF_LEN\n";
print COMMON "#define VQEC_DEV_IPC_IOCTL_TYPE 0xCA\n";
print COMMON "#define VQEC_DEV_IPC_IOCTL_WRITE 0x1\n";
print COMMON "#define VQEC_DEV_IPC_IOCTL_READ 0x2\n";
print COMMON "typedef enum " . &mk_rpc_enum_name . " {\n";
my $name = &mk_rpc_id("INVALID");
print COMMON " $name = 0,\n";
for ($i = 0; $i <= $#rpcs; $i++) {
$name = &mk_rpc_id($rpcs[$i]->{'NAME'});
print COMMON " $name";
if ($i < $#rpcs) {
print COMMON ",\n";
} else {
print COMMON ", " . &mk_rpc_id("MAX") . "\n";
}
}
print COMMON "} " . &mk_rpc_enum_name . "t;\n\n";
print COMMON "#define " . &mk_rpc_enum_name . "strings__ \\\n \"INVALID\", \\\n";
for ($i = 0; $i <= $#rpcs; $i++) {
$name = $rpcs[$i]->{'NAME'};
print COMMON " \"$name\"";
if ($i < $#rpcs) {
print COMMON ", \\\n";
} else {
print COMMON "\n\n";
}
}
# print COMMON out request and response structure defs
for ($i=0; $i<$num_rpcs; $i++) {
my $ret_type = $rpcs[$i]->{'RET'};
my $fcn_name = $rpcs[$i]->{'NAME'};
my $num_args = $rpcs[$i]->{'LASTARG'} + 1;
my $args_ref = $rpcs[$i]->{'ARGS'};
my @args = @{$args_ref};
my $req_struct_name = &mk_req_struct_name($fcn_name);
my $rsp_struct_name = &mk_rsp_struct_name($fcn_name);
# the request structure
print COMMON "typedef struct $req_struct_name {\n";
print COMMON " int32_t __rpc_req_len;\n";
print COMMON " " . &mk_rpc_enum_name . "t __rpc_fcn_num;\n";
print COMMON " uint32_t __rpc_ver_num;\n";
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
my $maxbound = $args[$j]->{'BOUND'};
if ($class eq "INV") {
print COMMON " $type $name ;\n";
} elsif ($class eq "RPC_INARRAY_COUNT") {
print COMMON " $type $name ;\n";
} elsif ($class eq "RPC_OUTARRAY_COUNT") {
print COMMON " $type $name ;\n";
} elsif ($class eq "INR" || $class eq "INOUT") {
print COMMON " __typeof__ (*(($type)0)) $name ;\n";
} elsif ($class eq "INR_OPT") {
print COMMON " boolean $name" . "_valid;\n";
print COMMON " __typeof__ (*(($type)0)) $name ;\n";
} elsif ($class eq "RPC_INARRAY_PTR") {
print COMMON " __typeof__ (*(($type)0)) $name\[$maxbound\];\n";
}
}
print COMMON "} " . $req_struct_name . "t;\n";
# the resp structure
print COMMON "typedef struct $rsp_struct_name {\n";
print COMMON " int32_t __rpc_rsp_len;\n";
print COMMON " " . &mk_rpc_enum_name . "t __rpc_fcn_num;\n";
print COMMON " uint32_t __rpc_ver_num;\n";
print COMMON " $ret_type __ret_val;\n";
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
my $maxbound = $args[$j]->{'BOUND'};
if ($class eq "OUT" || $class eq "OUT_OPT" || $class eq "INOUT") {
print COMMON " __typeof__ (*(($type)0)) $name ;\n";
} elsif ($class eq "RPC_OUTARRAY_PTR") {
print COMMON " __typeof__ (*(($type)0)) $name\[$maxbound\];\n";
} elsif ($class eq "RPC_OUTARRAY_USED") {
print COMMON " __typeof__ (*(($type)0)) $name ;\n";
}
}
print COMMON "} " . $rsp_struct_name . "t;\n";
}
print COMMON "\ntypedef union __" . $module_name . "_rpc_all_req_ {\n";
print COMMON " struct {\n";
print COMMON " int32_t len;\n";
print COMMON " " . &mk_rpc_enum_name . "t fcn_num;\n";
print COMMON " uint32_t vqec_api_ver;\n";
print COMMON "} ol;\n";
for ($i=0; $i<$num_rpcs; $i++) {
my $fcn_name = $rpcs[$i]->{'NAME'};
my $req_struct_name = &mk_req_struct_name($fcn_name);
print COMMON $req_struct_name . "t $fcn_name" . "_req;\n";
}
print COMMON "} __" . $module_name . "_rpc_all_req_t;\n\n";
print COMMON "\ntypedef union __" . $module_name . "_rpc_all_rsp_ {\n";
print COMMON " struct {\n";
print COMMON " int32_t len;\n";
print COMMON " " . &mk_rpc_enum_name . "t fcn_num;\n";
print COMMON " uint32_t vqec_api_ver;\n";
print COMMON "} ol;\n";
for ($i=0; $i<$num_rpcs; $i++) {
my $fcn_name = $rpcs[$i]->{'NAME'};
my $rsp_struct_name = &mk_rsp_struct_name($fcn_name);
print COMMON $rsp_struct_name . "t $fcn_name" . "_rsp;\n";
}
print COMMON "} __" . $module_name . "_rpc_all_rsp_t;\n\n";
print COMMON "int32_t $module_name" . "_rpc_server (__" . $module_name . "_rpc_id_t __id, uint32_t req_size, __" . $module_name .
"_rpc_all_req_t *req, __" . $module_name . "_rpc_all_rsp_t *rsp, uint32_t *rsp_size);\n";
}
if ($do_client) {
# Produce the client stubs
print CLIENT "/* THIS IS GENERATED CODE. DO NOT EDIT. */\n\n";
for ($i=0; $i<$num_rpcs; $i++) {
my $ret_type = $rpcs[$i]->{'RET'};
my $fcn_name = $rpcs[$i]->{'NAME'};
my $num_args = $rpcs[$i]->{'LASTARG'} + 1;
my $args_ref = $rpcs[$i]->{'ARGS'};
my $dir = $rpcs[$i]->{'DIR'};
my @args = @{$args_ref};
my $req_struct_name = &mk_req_struct_name($fcn_name);
my $rsp_struct_name = &mk_rsp_struct_name($fcn_name);
my $fcn_num = &mk_rpc_id($fcn_name);
print CLIENT "\n$ret_type $fcn_name(\n";
if ($num_args == 0) {
print CLIENT "void";
} else {
for ($j=0; $j< $num_args; $j++) {
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
print CLIENT " $type $name";
if ($j < ($num_args-1)) {
print CLIENT ",\n";
}
}
}
print CLIENT ")\n{\n";
print CLIENT " int32_t rsp_size, result;\n";
print CLIENT " volatile ".$req_struct_name . "t *req = VQEC_RPC_CLIENT_SHM_REQBUF_PTR;\n";
print CLIENT " volatile ".$rsp_struct_name . "t *rsp = VQEC_RPC_CLIENT_SHM_RSPBUF_PTR;\n";
print CLIENT " if ((req == (void *)-1) || (rsp == (void *)-1)) {\n";
print CLIENT " return VQEC_DP_ERR_INTERNAL;\n";
print CLIENT " }\n";
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "INR" || $class eq "INOUT") {
print CLIENT " if (! $name) {\n";
print CLIENT " return VQEC_DP_ERR_INVALIDARGS;\n";
print CLIENT " }\n";
}
}
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "OUT" ||
$class eq "RPC_OUTARRAY_PTR" ||
$class eq "RPC_OUTARRAY_USED" ||
$class eq "RPC_INARRAY_PTR") {
print CLIENT " if (! $name) {\n";
print CLIENT " return VQEC_DP_ERR_INVALIDARGS;\n";
print CLIENT " }\n";
}
}
print CLIENT " vqec_lock_lock(vqec_ipc_lock);\n";
print CLIENT " req->__rpc_fcn_num = $fcn_num;\n";
print CLIENT " req->__rpc_req_len = sizeof(*req);\n";
print CLIENT " VQEC_ASSERT(req->__rpc_req_len <= VQEC_DEV_IPC_BUF_LEN);\n";
print CLIENT " req->__rpc_ver_num = VQEC_DP_API_VERSION;\n";
$inarray_cnt_name = "";
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "INV") {
print CLIENT " req->$name = $name;\n";
} elsif ($class eq "INR" || $class eq "INOUT") {
print CLIENT " req->$name = *($name);\n";
} elsif ($class eq "INR_OPT") {
print CLIENT " if ($name != NULL) {\n";
print CLIENT " req->$name = *($name);\n";
print CLIENT " req->$name" . "_valid = TRUE;\n";
print CLIENT " } else {\n";
print CLIENT " req->$name" . "_valid = FALSE;\n";
print CLIENT " }\n";
} elsif ($class eq "RPC_OUTARRAY_COUNT") {
print CLIENT " req->$name = $name;\n";
} elsif ($class eq "RPC_INARRAY_COUNT") {
print CLIENT " req->$name = $name;\n";
$inarray_cnt_name = $name;
}
}
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "RPC_INARRAY_PTR") {
print CLIENT " if ((sizeof (*$name) * $inarray_cnt_name) > VQEC_DEV_IPC_BUF_LEN) {\n";
print CLIENT " vqec_lock_unlock(vqec_ipc_lock);\n";
print CLIENT " return (VQEC_DP_ERR_INVALIDARGS);\n";
print CLIENT " } else {\n";
print CLIENT " memcpy(&req->$name, $name, sizeof (*$name) * $inarray_cnt_name);\n";
print CLIENT " }\n";
}
}
print CLIENT " rsp_size = sizeof(*rsp);\n";
print CLIENT " result = RPC_SND_RCV(req,sizeof(*req),rsp,&rsp_size,$dir);\n";
print CLIENT " if (result || rsp->__rpc_rsp_len != sizeof(*rsp) || rsp_size != sizeof(*rsp) || rsp->__rpc_fcn_num != req->__rpc_fcn_num || rsp->__rpc_ver_num != VQEC_DP_API_VERSION) {\n";
print CLIENT " RPC_BAD_RSP(result,rsp->__rpc_rsp_len, sizeof(*rsp), rsp->__rpc_fcn_num, req->__rpc_fcn_num, rsp->__rpc_ver_num);\n";
print CLIENT " }\n";
print CLIENT " if (!result) {\n";
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "OUT" || $class eq "INOUT") {
print CLIENT " *$name = rsp->$name;\n";
} elsif ($class eq "OUT_OPT") {
print CLIENT " if ($name) {\n";
print CLIENT " *$name = rsp->$name;\n";
print CLIENT " }\n";
} elsif ($class eq "RPC_OUTARRAY_PTR") {
$array_name = $name;
$array_type = $type;
} elsif ($class eq "RPC_OUTARRAY_COUNT") {
$array_cnt_name = $name;
} elsif ($class eq "RPC_OUTARRAY_USED") {
print CLIENT " VQEC_ASSERT(rsp->$name <= $array_cnt_name);\n";
print CLIENT " *$name = rsp->$name;\n";
print CLIENT " memcpy($array_name, &rsp->$array_name, sizeof(*$array_name) * rsp->$name);\n";
}
}
print CLIENT " vqec_lock_unlock(vqec_ipc_lock);\n";
print CLIENT " return rsp->__ret_val;\n";
print CLIENT " } else {\n";
for ($j=0; $j< $num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "OUT" || $class eq "INOUT") {
print CLIENT " memset($name, 0, sizeof(*$name));\n";
} elsif ($class eq "OUT_OPT") {
print CLIENT " if ($name) {\n";
print CLIENT " memset($name, 0, sizeof(*$name));\n";
print CLIENT " }\n";
} elsif ($class eq "RPC_OUTARRAY_USED") {
print CLIENT " *$name = 0;\n";
}
}
print CLIENT " vqec_lock_unlock(vqec_ipc_lock);\n";
print CLIENT " return VQEC_DP_ERR_INTERNAL;\n";
print CLIENT " }\n";
print CLIENT "}\n";
}
}
if ($do_server) {
# Generate the server side
print SERVER "/* THIS IS GENERATED CODE. DO NOT EDIT. */\n\n";
print SERVER "static const char * __rpc_fcn_name_strings[] = { " . &mk_rpc_enum_name . "strings__ };\n";
print SERVER "int32_t $module_name" . "_rpc_server (__" . $module_name . "_rpc_id_t __id, uint32_t req_size, __" . $module_name .
"_rpc_all_req_t *req, __" . $module_name . "_rpc_all_rsp_t *rsp, uint32_t *rsp_size)\n";
print SERVER "{\n";
print SERVER " ASSERT(sizeof(*req) <= VQEC_DEV_IPC_BUF_LEN);\n";
print SERVER " ASSERT(sizeof(*rsp) <= VQEC_DEV_IPC_BUF_LEN);\n";
print SERVER " *rsp_size = 0;\n";
print SERVER " if (req_size < sizeof(req->ol) || req_size != req->ol.len || req->ol.fcn_num <= 0 || req->ol.fcn_num >= ".&mk_rpc_id("MAX")." ||
req->ol.vqec_api_ver != VQEC_DP_API_VERSION || __id != req->ol.fcn_num) {\n";
print SERVER " RPC_REQ_ERROR(__id, req_size, req->ol.len, req->ol.fcn_num, req->ol.vqec_api_ver);\n";
print SERVER " return (-EINVAL);\n";
print SERVER " }\n\n";
print SERVER " RPC_TRACE(__rpc_fcn_name_strings[req->ol.fcn_num]);\n";
print SERVER " switch (req->ol.fcn_num) {\n";
for ($i=0; $i<$num_rpcs; $i++) {
my $ret_type = $rpcs[$i]->{'RET'};
my $fcn_name = $rpcs[$i]->{'NAME'};
my $num_args = $rpcs[$i]->{'LASTARG'} + 1;
my $args_ref = $rpcs[$i]->{'ARGS'};
my @args = @{$args_ref};
my $req_struct_name = &mk_req_struct_name($fcn_name);
my $rsp_struct_name = &mk_rsp_struct_name($fcn_name);
my $fcn_num = &mk_rpc_id($fcn_name);
print SERVER " case $fcn_num:\n";
print SERVER " {\n";
print SERVER " $req_struct_name" . "t * req_p = (struct $req_struct_name *) req;\n";
print SERVER " $rsp_struct_name" . "t * rsp_p = (struct $rsp_struct_name *) rsp;\n";
print SERVER " $ret_type ret_val = 0;\n";
for ($j=0; $j<$num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "INV" || $class eq "RPC_INARRAY_COUNT" || $class eq "RPC_OUTARRAY_COUNT") {
print SERVER " $type $name;\n";
} elsif ($class eq "INR" || $class eq "OUT" || $class eq "INR_OPT"
|| $class eq "OUT_OPT" || $class eq "RPC_OUTARRAY_PTR"
|| $class eq "RPC_INARRAY_PTR" || $class eq "RPC_OUTARRAY_USED"
|| $class eq "INOUT") {
print SERVER " __typeof__ ((($type)0)) $name;\n";
}
}
print SERVER " if (req->ol.len != sizeof(*req_p)) {\n";
print SERVER " RPC_REQ_ERROR(__id, req_size, req->ol.len, req->ol.fcn_num, req->ol.vqec_api_ver);\n";
print SERVER " return (-EINVAL);\n";
print SERVER " }\n";
for ($j=0; $j<$num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "INR" || $class eq "INR_OPT") {
print SERVER " $name = &req_p->$name;\n";
} elsif ($class eq "RPC_INARRAY_PTR") {
print SERVER " $name = req_p->$name;\n";
} elsif ($class eq "INV" || $class eq "RPC_INARRAY_COUNT" || $class eq "RPC_OUTARRAY_COUNT") {
print SERVER " $name = req_p->$name;\n";
} elsif ($class eq "OUT" || $class eq "OUT_OPT" || $class eq "RPC_OUTARRAY_USED") {
print SERVER " $name = &rsp_p->$name;\n";
} elsif ($class eq "RPC_OUTARRAY_PTR") {
print SERVER " $name = rsp_p->$name;\n";
} elsif ($class eq "INOUT") {
print SERVER " rsp_p->$name = req_p->$name;\n";
print SERVER " $name = &rsp_p->$name;\n";
}
}
for ($j=0; $j<$num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "OUT" || $class eq "OUT_OPT") {
print SERVER " memset($name, 0, sizeof(*$name));\n";
}
}
print SERVER " vqec_lock_lock(g_vqec_dp_lock);\n";
print SERVER " ret_val = $fcn_name(";
for ($j=0; $j<$num_args; $j++) {
my $class = $args[$j]->{'CLASS'};
my $name = $args[$j]->{'NAME'};
my $type = $args[$j]->{'TYPE'};
if ($class eq "INV" || $class eq "RPC_INARRAY_COUNT" || $class eq "RPC_OUTARRAY_COUNT") {
print SERVER $name;
} elsif ($class eq "INR" || $class eq "OUT" || $class eq "OUT_OPT" ||
$class eq "RPC_OUTARRAY_PTR" || $class eq "RPC_INARRAY_PTR" ||
$class eq "RPC_OUTARRAY_USED" || $class eq "INOUT") {
print SERVER "$name";
} elsif ($class eq "INR_OPT") {
print SERVER "((req_p->$name"."_valid) ? &req_p->$name : NULL)";
}
if ($j < ($num_args - 1)) {
print SERVER ",";
}
}
print SERVER ");\n";
print SERVER " vqec_lock_unlock(g_vqec_dp_lock);\n";
print SERVER " rsp_p->__ret_val = ret_val;\n";
print SERVER " rsp_p->__rpc_rsp_len = sizeof(*rsp_p);\n";
print SERVER " rsp_p->__rpc_fcn_num = req_p->__rpc_fcn_num;\n";
print SERVER " rsp_p->__rpc_ver_num = VQEC_DP_API_VERSION;\n";
print SERVER " *rsp_size = sizeof(*rsp_p);\n";
print SERVER " } break;\n\n";
}
print SERVER " default:\n";
print SERVER " {\n";
print SERVER " RPC_REQ_ERROR(__id, req_size, req->ol.len, req->ol.fcn_num, req->ol.vqec_api_ver);\n";
print SERVER " return (-EINVAL);\n";
print SERVER " } break;\n\n";
print SERVER " }\n";
print SERVER " return (0);\n";
print SERVER "}\n";
}
#for ($i=0; $i<$num_rpcs; $i++) {
# print "$rpcs[$i]->{'RET'} xxx $rpcs[$i]->{'NAME'}\n";
# $max = ${$rpcs[$i]}{'LASTARG'};
# for ($j=0;
# $j <= $max;
# $j++) {
# %arg = %{$rpcs[$i]->{"ARGS"}->[$j]};
# print "\t$arg{'CLASS'} xxx $arg{'TYPE'} xxx $arg{'NAME'}\n";
# }
#}
| wmanley/cisco-vqe-client | eva/vqec_rpcgen.pl | Perl | bsd-3-clause | 25,872 |
%%% The following code has been produced by the CHR compiler
:- ( current_module(chr) -> true ; use_module(library(chr)) ).
:- get_flag(variable_names, Val), setval(variable_names_flag, Val), set_flag(variable_names, off).
card(_11752, _11751, _11750) :- length(_11750, _11749), _11752 =< _11751, 0 =< _11751, _11752 =< _11749, card(_11752, _11751, _11750, _11749).
set_to_ones([]).
set_to_ones([1|_12539]) :- set_to_ones(_12539).
set_to_zeros([]).
set_to_zeros([0|_12565]) :- set_to_zeros(_12565).
:- op(100, fy, ~~).
:- op(100, xfy, #).
solve_bool(_12634, _12633) :- var(_12634), !, _12634 = _12633.
solve_bool(_12660, _12659) :- atomic(_12660), !, _12660 = _12659.
solve_bool(_12699 * _12695, _12689) ?- !, solve_bool(_12699, _12691), solve_bool(_12695, _12690), and(_12691, _12690, _12689).
solve_bool(_12739 + _12735, _12729) ?- !, solve_bool(_12739, _12731), solve_bool(_12735, _12730), or(_12731, _12730, _12729).
solve_bool(#(_12779, _12775), _12769) ?- !, solve_bool(_12779, _12771), solve_bool(_12775, _12770), exor(_12771, _12770, _12769).
solve_bool(~~(_12810), _12805) ?- !, solve_bool(_12810, _12806), neg(_12806, _12805).
solve_bool(_12854 -> _12850, _12841) ?- !, solve_bool(_12854, _12846), solve_bool(_12850, _12845), imp(_12846, _12845), _12841 = 1.
solve_bool(_12899 = _12895, _12886) ?- !, solve_bool(_12899, _12891), solve_bool(_12895, _12890), _12891 = _12890, _12886 = 1.
bool_portray(and(_12930, _12929, _12925), _12931) ?- !, _12931 = (_12930 * _12929 = _12925).
bool_portray(or(_12963, _12962, _12958), _12964) ?- !, _12964 = (_12963 + _12962 = _12958).
bool_portray(exor(_12996, _12995, _12991), _12997) ?- !, _12997 = (#(_12996, _12995) = _12991).
bool_portray(neg(_13027, _13026), _13028) ?- !, _13028 = (_13027 = ~~(_13026)).
bool_portray(imp(_13055, _13054), _13056) ?- !, _13056 = (_13055 -> _13054).
bool_portray(card(_13085, _13084, _13083, _13095), _13086) ?- !, _13086 = card(_13085, _13084, _13083).
:- define_macro(type(compound), bool_portray / 2, [write]).
label_bool([]).
label_bool([_13147|_13140]) :- (_13147 = 0 ; _13147 = 1), label_bool(_13140).
%%% Callables for boolean / 1
'CHRlabel_with'(boolean(A), B, C) ?-
coca(try_clause(C, boolean(A), boolean(D), true)),
coca(clause_fired(C)),
'CHR='(B, clause_boolean(A)).
%%% Callables for and / 3
'CHRlabel_with'(and(A, B, C), D, E) ?-
coca(try_clause(E, and(A, B, C), and(F, G, H), true)),
coca(clause_fired(E)),
'CHR='(D, clause_and(A, B, C)).
%%% Callables for or / 3
'CHRlabel_with'(or(A, B, C), D, E) ?-
coca(try_clause(E, or(A, B, C), or(F, G, H), true)),
coca(clause_fired(E)),
'CHR='(D, clause_or(A, B, C)).
'CHRlabel_with'(or(A, B, C), D, E) ?-
coca(try_clause(E, or(A, B, C), or(F, G, H), true)),
coca(clause_fired(E)),
'CHR='(D, clause_or(A, B, C)).
%%% Callables for neg / 2
'CHRlabel_with'(neg(A, B), C, D) ?-
coca(try_clause(D, neg(A, B), neg(E, F), true)),
coca(clause_fired(D)),
'CHR='(C, clause_neg(A, B)).
%%% Callables for imp / 2
'CHRlabel_with'(imp(A, B), C, D) ?-
coca(try_clause(D, imp(A, B), imp(E, F), true)),
coca(clause_fired(D)),
'CHR='(C, clause_imp(A, B)).
%%% Callables for card / 4
'CHRlabel_with'(card(A, B, C, D), E, F) ?-
coca(try_clause(F, card(A, B, C, D), card(G, H, I, J), true)),
coca(clause_fired(F)),
'CHR='(E, clause_card(A, B, C, D)).
%%% Prolog clauses for boolean / 1
clause_boolean(0).
clause_boolean(1).
:- current_macro(clause_boolean / 1, _14192, _14193, _14194) -> true ; define_macro(clause_boolean / 1, tr_chr / 2, [write]).
boolean(A) :-
'CHRgen_num'(B),
coca(add_one_constraint(B, boolean(A))),
'CHRboolean_1'(boolean(A), C, D, B).
%%% Rules handling for boolean / 1
'CHRboolean_1'(boolean(A), B, C, D) :-
(
'CHRnonvar'(B)
;
'CHRalready_in'('CHRboolean_1'(boolean(A), B, C, D)),
coca(already_in)
),
!.
'CHRboolean_1'(boolean(0), A, B, C) ?-
coca(try_rule(C, boolean(0), anonymous("0"), boolean(0), replacement, true, true)),
!,
'CHRkill'(A),
coca(fired_rule(anonymous("0"))).
'CHRboolean_1'(boolean(1), A, B, C) ?-
coca(try_rule(C, boolean(1), anonymous("1"), boolean(1), replacement, true, true)),
!,
'CHRkill'(A),
coca(fired_rule(anonymous("1"))).
'CHRboolean_1'(boolean(A), B, C, D) :-
'CHRboolean_1__60'(boolean(A), B, C, D).
:- set_flag('CHRboolean_1' / 4, leash, notrace).
:- current_macro('CHRboolean_1' / 4, _14790, _14791, _14792) -> true ; define_macro('CHRboolean_1' / 4, tr_chr / 2, [write]).
'CHRboolean_1__60'(A, B, C, D) :-
'CHRboolean_1__61'(A, B, C, D).
:- set_flag('CHRboolean_1__60' / 4, leash, notrace).
'CHRboolean_1__61'(boolean(A), B, C, D) :-
(
'CHRvar'(B)
->
'CHRdelay'([B, boolean(A)], 'CHRboolean_1'(boolean(A), B, C, D))
;
true
).
:- set_flag('CHRboolean_1__61' / 4, leash, notrace).
%%% Prolog clauses for and / 3
clause_and(0, A, 0).
clause_and(1, A, A).
:- current_macro(clause_and / 3, _15139, _15140, _15141) -> true ; define_macro(clause_and / 3, tr_chr / 2, [write]).
and(A, B, C) :-
'CHRgen_num'(D),
coca(add_one_constraint(D, and(A, B, C))),
'CHRand_3'(and(A, B, C), E, F, D).
%%% Rules handling for and / 3
'CHRand_3'(and(A, B, C), D, E, F) :-
(
'CHRnonvar'(D)
;
'CHRalready_in'('CHRand_3'(and(A, B, C), D, E, F)),
coca(already_in)
),
!.
'CHRand_3'(and(0, A, B), C, D, E) ?-
coca(try_rule(E, and(0, A, B), anonymous("2"), and(0, F, G), replacement, true, G = 0)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("2"))),
B = 0.
'CHRand_3'(and(A, 0, B), C, D, E) ?-
coca(try_rule(E, and(A, 0, B), anonymous("3"), and(F, 0, G), replacement, true, G = 0)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("3"))),
B = 0.
'CHRand_3'(and(1, A, B), C, D, E) ?-
coca(try_rule(E, and(1, A, B), anonymous("4"), and(1, F, G), replacement, true, G = F)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("4"))),
B = A.
'CHRand_3'(and(A, 1, B), C, D, E) ?-
coca(try_rule(E, and(A, 1, B), anonymous("5"), and(F, 1, G), replacement, true, G = F)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("5"))),
B = A.
'CHRand_3'(and(A, B, 1), C, D, E) ?-
coca(try_rule(E, and(A, B, 1), anonymous("6"), and(F, G, 1), replacement, true, (F = 1, G = 1))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("6"))),
[A, B] = [1, 1].
'CHRand_3'(and(A, A, B), C, D, E) ?-
coca(try_rule(E, and(A, A, B), anonymous("7"), and(F, F, G), replacement, true, F = G)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("7"))),
A = B.
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRand_3__63'(G, [C, A], [], H),
coca(try_double(F, and(A, B, C), H, neg(A, C), and(I, J, K), neg(I, K), replacement, true, (I = 1, J = 0, K = 0), anonymous("36"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("36"))),
[A, [B, C]] = [1, [0, 0]].
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRand_3__64'(G, [C, A], [], H),
coca(try_double(F, and(A, B, C), H, neg(C, A), and(I, J, K), neg(K, I), replacement, true, (I = 1, J = 0, K = 0), anonymous("37"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("37"))),
[A, [B, C]] = [1, [0, 0]].
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRand_3__65'(G, [C, B], [], H),
coca(try_double(F, and(A, B, C), H, neg(B, C), and(I, J, K), neg(J, K), replacement, true, (I = 0, J = 1, K = 0), anonymous("38"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("38"))),
[A, [B, C]] = [0, [1, 0]].
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRand_3__66'(G, [C, B], [], H),
coca(try_double(F, and(A, B, C), H, neg(C, B), and(I, J, K), neg(K, J), replacement, true, (I = 0, J = 1, K = 0), anonymous("39"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("39"))),
[A, [B, C]] = [0, [1, 0]].
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRand_3__67'(G, [B, A], [H], I),
coca(try_double(F, and(A, B, C), I, and(A, B, H), and(J, K, L), and(J, K, M), keep_second, true, M = L, anonymous("8"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("8"))),
H = C.
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRand_3__68'(G, [B, A], [H], I),
coca(try_double(F, and(A, B, C), I, and(B, A, H), and(J, K, L), and(K, J, M), keep_second, true, M = L, anonymous("9"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("9"))),
H = C.
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRand_3__69'(G, [B, A], [], H),
coca(try_double(F, and(A, B, C), H, neg(A, B), and(I, J, K), neg(I, J), keep_second, true, K = 0, anonymous("34"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("34"))),
C = 0.
'CHRand_3'(and(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRand_3__70'(G, [B, A], [], H),
coca(try_double(F, and(A, B, C), H, neg(B, A), and(I, J, K), neg(J, I), keep_second, true, K = 0, anonymous("35"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("35"))),
C = 0.
'CHRand_3'(and(A, B, C), D, E, F) :-
'CHRand_3__62'(and(A, B, C), D, E, F).
'CHRand_3__63'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRand_3__63'([A|B], C, D, E) :-
'CHRand_3__63'(B, C, D, E).
:- set_flag('CHRand_3__63' / 4, leash, notrace).
'CHRand_3__64'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRand_3__64'([A|B], C, D, E) :-
'CHRand_3__64'(B, C, D, E).
:- set_flag('CHRand_3__64' / 4, leash, notrace).
'CHRand_3__65'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRand_3__65'([A|B], C, D, E) :-
'CHRand_3__65'(B, C, D, E).
:- set_flag('CHRand_3__65' / 4, leash, notrace).
'CHRand_3__66'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRand_3__66'([A|B], C, D, E) :-
'CHRand_3__66'(B, C, D, E).
:- set_flag('CHRand_3__66' / 4, leash, notrace).
'CHRand_3__67'(['CHRand_3'(and(A, B, C), D, E, F)|G], [B, A], [H], I) ?-
'CHRvar'(D),
'CHR='([C], [H]),
'CHR='(F, I).
'CHRand_3__67'([A|B], C, D, E) :-
'CHRand_3__67'(B, C, D, E).
:- set_flag('CHRand_3__67' / 4, leash, notrace).
'CHRand_3__68'(['CHRand_3'(and(A, B, C), D, E, F)|G], [A, B], [H], I) ?-
'CHRvar'(D),
'CHR='([C], [H]),
'CHR='(F, I).
'CHRand_3__68'([A|B], C, D, E) :-
'CHRand_3__68'(B, C, D, E).
:- set_flag('CHRand_3__68' / 4, leash, notrace).
'CHRand_3__69'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRand_3__69'([A|B], C, D, E) :-
'CHRand_3__69'(B, C, D, E).
:- set_flag('CHRand_3__69' / 4, leash, notrace).
'CHRand_3__70'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRand_3__70'([A|B], C, D, E) :-
'CHRand_3__70'(B, C, D, E).
:- set_flag('CHRand_3__70' / 4, leash, notrace).
:- set_flag('CHRand_3' / 4, leash, notrace).
:- current_macro('CHRand_3' / 4, _20401, _20402, _20403) -> true ; define_macro('CHRand_3' / 4, tr_chr / 2, [write]).
'CHRand_3__62'(A, B, C, D) :-
'CHRand_3__71'(A, B, C, D).
:- set_flag('CHRand_3__62' / 4, leash, notrace).
'CHRand_3__71'(and(A, B, C), D, E, F) ?-
'CHRvar'(D),
!,
'CHRget_delayed_goals'(B, G),
'CHRand_3__71__72'(G, D, and(A, B, C), E, F).
'CHRand_3__71'(and(A, B, C), D, E, F) :-
'CHRand_3__71__73'(and(A, B, C), D, E, F).
:- set_flag('CHRand_3__71' / 4, leash, notrace).
'CHRand_3__71__72'(['CHRand_3'(and(A, B, C), D, E, F)|G], H, and(A, B, I), J, K) ?-
'CHRvar'(D),
coca(try_double(K, and(A, B, I), F, and(A, B, C), and(L, M, N), and(L, M, O), keep_first, true, N = O, anonymous("8"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("8"))),
'CHRand_3__71__72'(G, H, and(A, B, I), J, K),
I = C.
'CHRand_3__71__72'([A|B], C, D, E, F) :-
'CHRand_3__71__72'(B, C, D, E, F).
'CHRand_3__71__72'([], A, B, C, D) :-
'CHRand_3__71__73'(B, A, C, D).
:- set_flag('CHRand_3__71__72' / 5, leash, notrace).
'CHRand_3__71__73'(and(A, B, C), D, E, F) ?-
'CHRvar'(D),
!,
'CHRget_delayed_goals'(B, G),
'CHRand_3__71__73__74'(G, D, and(A, B, C), E, F).
'CHRand_3__71__73'(and(A, B, C), D, E, F) :-
'CHRand_3__71__73__75'(and(A, B, C), D, E, F).
:- set_flag('CHRand_3__71__73' / 4, leash, notrace).
'CHRand_3__71__73__74'(['CHRand_3'(and(A, B, C), D, E, F)|G], H, and(B, A, I), J, K) ?-
'CHRvar'(D),
coca(try_double(K, and(B, A, I), F, and(A, B, C), and(L, M, N), and(M, L, O), keep_first, true, N = O, anonymous("9"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("9"))),
'CHRand_3__71__73__74'(G, H, and(B, A, I), J, K),
I = C.
'CHRand_3__71__73__74'([A|B], C, D, E, F) :-
'CHRand_3__71__73__74'(B, C, D, E, F).
'CHRand_3__71__73__74'([], A, B, C, D) :-
'CHRand_3__71__73__75'(B, A, C, D).
:- set_flag('CHRand_3__71__73__74' / 5, leash, notrace).
'CHRand_3__71__73__75'(and(A, B, C), D, E, F) :-
(
'CHRvar'(D)
->
'CHRdelay'([D, and(A, B, C)], 'CHRand_3'(and(A, B, C), D, E, F))
;
true
).
:- set_flag('CHRand_3__71__73__75' / 4, leash, notrace).
%%% Prolog clauses for or / 3
clause_or(0, A, A).
clause_or(1, A, 1).
:- current_macro(clause_or / 3, _21932, _21933, _21934) -> true ; define_macro(clause_or / 3, tr_chr / 2, [write]).
or(A, B, C) :-
'CHRgen_num'(D),
coca(add_one_constraint(D, or(A, B, C))),
'CHRor_3'(or(A, B, C), E, F, D).
%%% Rules handling for or / 3
'CHRor_3'(or(A, B, C), D, E, F) :-
(
'CHRnonvar'(D)
;
'CHRalready_in'('CHRor_3'(or(A, B, C), D, E, F)),
coca(already_in)
),
!.
'CHRor_3'(or(0, A, B), C, D, E) ?-
coca(try_rule(E, or(0, A, B), anonymous("10"), or(0, F, G), replacement, true, G = F)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("10"))),
B = A.
'CHRor_3'(or(A, 0, B), C, D, E) ?-
coca(try_rule(E, or(A, 0, B), anonymous("11"), or(F, 0, G), replacement, true, G = F)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("11"))),
B = A.
'CHRor_3'(or(A, B, 0), C, D, E) ?-
coca(try_rule(E, or(A, B, 0), anonymous("12"), or(F, G, 0), replacement, true, (F = 0, G = 0))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("12"))),
[A, B] = [0, 0].
'CHRor_3'(or(1, A, B), C, D, E) ?-
coca(try_rule(E, or(1, A, B), anonymous("13"), or(1, F, G), replacement, true, G = 1)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("13"))),
B = 1.
'CHRor_3'(or(A, 1, B), C, D, E) ?-
coca(try_rule(E, or(A, 1, B), anonymous("14"), or(F, 1, G), replacement, true, G = 1)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("14"))),
B = 1.
'CHRor_3'(or(A, A, B), C, D, E) ?-
coca(try_rule(E, or(A, A, B), anonymous("15"), or(F, F, G), replacement, true, F = G)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("15"))),
A = B.
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRor_3__77'(G, [C, A], [], H),
coca(try_double(F, or(A, B, C), H, neg(A, C), or(I, J, K), neg(I, K), replacement, true, (I = 0, J = 1, K = 1), anonymous("42"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("42"))),
[A, [B, C]] = [0, [1, 1]].
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRor_3__78'(G, [C, A], [], H),
coca(try_double(F, or(A, B, C), H, neg(C, A), or(I, J, K), neg(K, I), replacement, true, (I = 0, J = 1, K = 1), anonymous("43"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("43"))),
[A, [B, C]] = [0, [1, 1]].
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRor_3__79'(G, [C, B], [], H),
coca(try_double(F, or(A, B, C), H, neg(B, C), or(I, J, K), neg(J, K), replacement, true, (I = 1, J = 0, K = 1), anonymous("44"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("44"))),
[A, [B, C]] = [1, [0, 1]].
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRor_3__80'(G, [C, B], [], H),
coca(try_double(F, or(A, B, C), H, neg(C, B), or(I, J, K), neg(K, J), replacement, true, (I = 1, J = 0, K = 1), anonymous("45"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("45"))),
[A, [B, C]] = [1, [0, 1]].
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRor_3__81'(G, [B, A], [H], I),
coca(try_double(F, or(A, B, C), I, or(A, B, H), or(J, K, L), or(J, K, M), keep_second, true, M = L, anonymous("16"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("16"))),
H = C.
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRor_3__82'(G, [B, A], [H], I),
coca(try_double(F, or(A, B, C), I, or(B, A, H), or(J, K, L), or(K, J, M), keep_second, true, M = L, anonymous("17"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("17"))),
H = C.
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRor_3__83'(G, [B, A], [], H),
coca(try_double(F, or(A, B, C), H, neg(A, B), or(I, J, K), neg(I, J), keep_second, true, K = 1, anonymous("40"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("40"))),
C = 1.
'CHRor_3'(or(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRor_3__84'(G, [B, A], [], H),
coca(try_double(F, or(A, B, C), H, neg(B, A), or(I, J, K), neg(J, I), keep_second, true, K = 1, anonymous("41"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("41"))),
C = 1.
'CHRor_3'(or(A, B, C), D, E, F) :-
'CHRor_3__76'(or(A, B, C), D, E, F).
'CHRor_3__77'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRor_3__77'([A|B], C, D, E) :-
'CHRor_3__77'(B, C, D, E).
:- set_flag('CHRor_3__77' / 4, leash, notrace).
'CHRor_3__78'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRor_3__78'([A|B], C, D, E) :-
'CHRor_3__78'(B, C, D, E).
:- set_flag('CHRor_3__78' / 4, leash, notrace).
'CHRor_3__79'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRor_3__79'([A|B], C, D, E) :-
'CHRor_3__79'(B, C, D, E).
:- set_flag('CHRor_3__79' / 4, leash, notrace).
'CHRor_3__80'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRor_3__80'([A|B], C, D, E) :-
'CHRor_3__80'(B, C, D, E).
:- set_flag('CHRor_3__80' / 4, leash, notrace).
'CHRor_3__81'(['CHRor_3'(or(A, B, C), D, E, F)|G], [B, A], [H], I) ?-
'CHRvar'(D),
'CHR='([C], [H]),
'CHR='(F, I).
'CHRor_3__81'([A|B], C, D, E) :-
'CHRor_3__81'(B, C, D, E).
:- set_flag('CHRor_3__81' / 4, leash, notrace).
'CHRor_3__82'(['CHRor_3'(or(A, B, C), D, E, F)|G], [A, B], [H], I) ?-
'CHRvar'(D),
'CHR='([C], [H]),
'CHR='(F, I).
'CHRor_3__82'([A|B], C, D, E) :-
'CHRor_3__82'(B, C, D, E).
:- set_flag('CHRor_3__82' / 4, leash, notrace).
'CHRor_3__83'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRor_3__83'([A|B], C, D, E) :-
'CHRor_3__83'(B, C, D, E).
:- set_flag('CHRor_3__83' / 4, leash, notrace).
'CHRor_3__84'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRor_3__84'([A|B], C, D, E) :-
'CHRor_3__84'(B, C, D, E).
:- set_flag('CHRor_3__84' / 4, leash, notrace).
:- set_flag('CHRor_3' / 4, leash, notrace).
:- current_macro('CHRor_3' / 4, _27194, _27195, _27196) -> true ; define_macro('CHRor_3' / 4, tr_chr / 2, [write]).
'CHRor_3__76'(A, B, C, D) :-
'CHRor_3__85'(A, B, C, D).
:- set_flag('CHRor_3__76' / 4, leash, notrace).
'CHRor_3__85'(or(A, B, C), D, E, F) ?-
'CHRvar'(D),
!,
'CHRget_delayed_goals'(B, G),
'CHRor_3__85__86'(G, D, or(A, B, C), E, F).
'CHRor_3__85'(or(A, B, C), D, E, F) :-
'CHRor_3__85__87'(or(A, B, C), D, E, F).
:- set_flag('CHRor_3__85' / 4, leash, notrace).
'CHRor_3__85__86'(['CHRor_3'(or(A, B, C), D, E, F)|G], H, or(A, B, I), J, K) ?-
'CHRvar'(D),
coca(try_double(K, or(A, B, I), F, or(A, B, C), or(L, M, N), or(L, M, O), keep_first, true, N = O, anonymous("16"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("16"))),
'CHRor_3__85__86'(G, H, or(A, B, I), J, K),
I = C.
'CHRor_3__85__86'([A|B], C, D, E, F) :-
'CHRor_3__85__86'(B, C, D, E, F).
'CHRor_3__85__86'([], A, B, C, D) :-
'CHRor_3__85__87'(B, A, C, D).
:- set_flag('CHRor_3__85__86' / 5, leash, notrace).
'CHRor_3__85__87'(or(A, B, C), D, E, F) ?-
'CHRvar'(D),
!,
'CHRget_delayed_goals'(B, G),
'CHRor_3__85__87__88'(G, D, or(A, B, C), E, F).
'CHRor_3__85__87'(or(A, B, C), D, E, F) :-
'CHRor_3__85__87__89'(or(A, B, C), D, E, F).
:- set_flag('CHRor_3__85__87' / 4, leash, notrace).
'CHRor_3__85__87__88'(['CHRor_3'(or(A, B, C), D, E, F)|G], H, or(B, A, I), J, K) ?-
'CHRvar'(D),
coca(try_double(K, or(B, A, I), F, or(A, B, C), or(L, M, N), or(M, L, O), keep_first, true, N = O, anonymous("17"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("17"))),
'CHRor_3__85__87__88'(G, H, or(B, A, I), J, K),
I = C.
'CHRor_3__85__87__88'([A|B], C, D, E, F) :-
'CHRor_3__85__87__88'(B, C, D, E, F).
'CHRor_3__85__87__88'([], A, B, C, D) :-
'CHRor_3__85__87__89'(B, A, C, D).
:- set_flag('CHRor_3__85__87__88' / 5, leash, notrace).
'CHRor_3__85__87__89'(or(A, B, C), D, E, F) :-
(
'CHRvar'(D)
->
'CHRdelay'([D, or(A, B, C)], 'CHRor_3'(or(A, B, C), D, E, F))
;
true
).
:- set_flag('CHRor_3__85__87__89' / 4, leash, notrace).
%%% Prolog clauses for exor / 3
clause_exor(0, A, A).
clause_exor(1, A, B) :-
neg(A, B).
:- current_macro(clause_exor / 3, _28726, _28727, _28728) -> true ; define_macro(clause_exor / 3, tr_chr / 2, [write]).
exor(A, B, C) :-
'CHRgen_num'(D),
coca(add_one_constraint(D, exor(A, B, C))),
'CHRexor_3'(exor(A, B, C), E, F, D).
%%% Rules handling for exor / 3
'CHRexor_3'(exor(A, B, C), D, E, F) :-
(
'CHRnonvar'(D)
;
'CHRalready_in'('CHRexor_3'(exor(A, B, C), D, E, F)),
coca(already_in)
),
!.
'CHRexor_3'(exor(0, A, B), C, D, E) ?-
coca(try_rule(E, exor(0, A, B), anonymous("18"), exor(0, F, G), replacement, true, F = G)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("18"))),
A = B.
'CHRexor_3'(exor(A, 0, B), C, D, E) ?-
coca(try_rule(E, exor(A, 0, B), anonymous("19"), exor(F, 0, G), replacement, true, F = G)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("19"))),
A = B.
'CHRexor_3'(exor(A, B, 0), C, D, E) ?-
coca(try_rule(E, exor(A, B, 0), anonymous("20"), exor(F, G, 0), replacement, true, F = G)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("20"))),
A = B.
'CHRexor_3'(exor(1, A, B), C, D, E) ?-
coca(try_rule(E, exor(1, A, B), anonymous("21"), exor(1, F, G), replacement, true, neg(F, G))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("21"))),
neg(A, B).
'CHRexor_3'(exor(A, 1, B), C, D, E) ?-
coca(try_rule(E, exor(A, 1, B), anonymous("22"), exor(F, 1, G), replacement, true, neg(F, G))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("22"))),
neg(A, B).
'CHRexor_3'(exor(A, B, 1), C, D, E) ?-
coca(try_rule(E, exor(A, B, 1), anonymous("23"), exor(F, G, 1), replacement, true, neg(F, G))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("23"))),
neg(A, B).
'CHRexor_3'(exor(A, A, B), C, D, E) ?-
coca(try_rule(E, exor(A, A, B), anonymous("24"), exor(F, F, G), replacement, true, G = 0)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("24"))),
B = 0.
'CHRexor_3'(exor(A, B, A), C, D, E) ?-
coca(try_rule(E, exor(A, B, A), anonymous("25"), exor(F, G, F), replacement, true, G = 0)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("25"))),
B = 0.
'CHRexor_3'(exor(A, B, B), C, D, E) ?-
coca(try_rule(E, exor(A, B, B), anonymous("26"), exor(F, G, G), replacement, true, F = 0)),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("26"))),
A = 0.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRexor_3__91'(G, [B, A], [H], I),
coca(try_double(F, exor(A, B, C), I, exor(A, B, H), exor(J, K, L), exor(J, K, M), keep_second, true, M = L, anonymous("27"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("27"))),
H = C.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRexor_3__92'(G, [B, A], [H], I),
coca(try_double(F, exor(A, B, C), I, exor(B, A, H), exor(J, K, L), exor(K, J, M), keep_second, true, M = L, anonymous("28"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("28"))),
H = C.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRexor_3__93'(G, [B, A], [], H),
coca(try_double(F, exor(A, B, C), H, neg(A, B), exor(I, J, K), neg(I, J), keep_second, true, K = 1, anonymous("46"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("46"))),
C = 1.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(B, G),
'CHRexor_3__94'(G, [B, A], [], H),
coca(try_double(F, exor(A, B, C), H, neg(B, A), exor(I, J, K), neg(J, I), keep_second, true, K = 1, anonymous("47"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("47"))),
C = 1.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRexor_3__95'(G, [C, A], [], H),
coca(try_double(F, exor(A, B, C), H, neg(A, C), exor(I, J, K), neg(I, K), keep_second, true, J = 1, anonymous("48"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("48"))),
B = 1.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRexor_3__96'(G, [C, A], [], H),
coca(try_double(F, exor(A, B, C), H, neg(C, A), exor(I, J, K), neg(K, I), keep_second, true, J = 1, anonymous("49"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("49"))),
B = 1.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRexor_3__97'(G, [C, B], [], H),
coca(try_double(F, exor(A, B, C), H, neg(B, C), exor(I, J, K), neg(J, K), keep_second, true, I = 1, anonymous("50"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("50"))),
A = 1.
'CHRexor_3'(exor(A, B, C), D, E, F) ?-
'CHRget_delayed_goals'(C, G),
'CHRexor_3__98'(G, [C, B], [], H),
coca(try_double(F, exor(A, B, C), H, neg(C, B), exor(I, J, K), neg(K, J), keep_second, true, I = 1, anonymous("51"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("51"))),
A = 1.
'CHRexor_3'(exor(A, B, C), D, E, F) :-
'CHRexor_3__90'(exor(A, B, C), D, E, F).
'CHRexor_3__91'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], [B, A], [H], I) ?-
'CHRvar'(D),
'CHR='([C], [H]),
'CHR='(F, I).
'CHRexor_3__91'([A|B], C, D, E) :-
'CHRexor_3__91'(B, C, D, E).
:- set_flag('CHRexor_3__91' / 4, leash, notrace).
'CHRexor_3__92'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], [A, B], [H], I) ?-
'CHRvar'(D),
'CHR='([C], [H]),
'CHR='(F, I).
'CHRexor_3__92'([A|B], C, D, E) :-
'CHRexor_3__92'(B, C, D, E).
:- set_flag('CHRexor_3__92' / 4, leash, notrace).
'CHRexor_3__93'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRexor_3__93'([A|B], C, D, E) :-
'CHRexor_3__93'(B, C, D, E).
:- set_flag('CHRexor_3__93' / 4, leash, notrace).
'CHRexor_3__94'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRexor_3__94'([A|B], C, D, E) :-
'CHRexor_3__94'(B, C, D, E).
:- set_flag('CHRexor_3__94' / 4, leash, notrace).
'CHRexor_3__95'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRexor_3__95'([A|B], C, D, E) :-
'CHRexor_3__95'(B, C, D, E).
:- set_flag('CHRexor_3__95' / 4, leash, notrace).
'CHRexor_3__96'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRexor_3__96'([A|B], C, D, E) :-
'CHRexor_3__96'(B, C, D, E).
:- set_flag('CHRexor_3__96' / 4, leash, notrace).
'CHRexor_3__97'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRexor_3__97'([A|B], C, D, E) :-
'CHRexor_3__97'(B, C, D, E).
:- set_flag('CHRexor_3__97' / 4, leash, notrace).
'CHRexor_3__98'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRexor_3__98'([A|B], C, D, E) :-
'CHRexor_3__98'(B, C, D, E).
:- set_flag('CHRexor_3__98' / 4, leash, notrace).
:- set_flag('CHRexor_3' / 4, leash, notrace).
:- current_macro('CHRexor_3' / 4, _34041, _34042, _34043) -> true ; define_macro('CHRexor_3' / 4, tr_chr / 2, [write]).
'CHRexor_3__90'(A, B, C, D) :-
'CHRexor_3__99'(A, B, C, D).
:- set_flag('CHRexor_3__90' / 4, leash, notrace).
'CHRexor_3__99'(exor(A, B, C), D, E, F) ?-
'CHRvar'(D),
!,
'CHRget_delayed_goals'(B, G),
'CHRexor_3__99__100'(G, D, exor(A, B, C), E, F).
'CHRexor_3__99'(exor(A, B, C), D, E, F) :-
'CHRexor_3__99__101'(exor(A, B, C), D, E, F).
:- set_flag('CHRexor_3__99' / 4, leash, notrace).
'CHRexor_3__99__100'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, exor(A, B, I), J, K) ?-
'CHRvar'(D),
coca(try_double(K, exor(A, B, I), F, exor(A, B, C), exor(L, M, N), exor(L, M, O), keep_first, true, N = O, anonymous("27"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("27"))),
'CHRexor_3__99__100'(G, H, exor(A, B, I), J, K),
I = C.
'CHRexor_3__99__100'([A|B], C, D, E, F) :-
'CHRexor_3__99__100'(B, C, D, E, F).
'CHRexor_3__99__100'([], A, B, C, D) :-
'CHRexor_3__99__101'(B, A, C, D).
:- set_flag('CHRexor_3__99__100' / 5, leash, notrace).
'CHRexor_3__99__101'(exor(A, B, C), D, E, F) ?-
'CHRvar'(D),
!,
'CHRget_delayed_goals'(B, G),
'CHRexor_3__99__101__102'(G, D, exor(A, B, C), E, F).
'CHRexor_3__99__101'(exor(A, B, C), D, E, F) :-
'CHRexor_3__99__101__103'(exor(A, B, C), D, E, F).
:- set_flag('CHRexor_3__99__101' / 4, leash, notrace).
'CHRexor_3__99__101__102'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, exor(B, A, I), J, K) ?-
'CHRvar'(D),
coca(try_double(K, exor(B, A, I), F, exor(A, B, C), exor(L, M, N), exor(M, L, O), keep_first, true, N = O, anonymous("28"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("28"))),
'CHRexor_3__99__101__102'(G, H, exor(B, A, I), J, K),
I = C.
'CHRexor_3__99__101__102'([A|B], C, D, E, F) :-
'CHRexor_3__99__101__102'(B, C, D, E, F).
'CHRexor_3__99__101__102'([], A, B, C, D) :-
'CHRexor_3__99__101__103'(B, A, C, D).
:- set_flag('CHRexor_3__99__101__102' / 5, leash, notrace).
'CHRexor_3__99__101__103'(exor(A, B, C), D, E, F) :-
(
'CHRvar'(D)
->
'CHRdelay'([D, exor(A, B, C)], 'CHRexor_3'(exor(A, B, C), D, E, F))
;
true
).
:- set_flag('CHRexor_3__99__101__103' / 4, leash, notrace).
%%% Prolog clauses for neg / 2
clause_neg(0, 1).
clause_neg(1, 0).
:- current_macro(clause_neg / 2, _35573, _35574, _35575) -> true ; define_macro(clause_neg / 2, tr_chr / 2, [write]).
neg(A, B) :-
'CHRgen_num'(C),
coca(add_one_constraint(C, neg(A, B))),
'CHRneg_2'(neg(A, B), D, E, C).
%%% Rules handling for neg / 2
'CHRneg_2'(neg(A, B), C, D, E) :-
(
'CHRnonvar'(C)
;
'CHRalready_in'('CHRneg_2'(neg(A, B), C, D, E)),
coca(already_in)
),
!.
'CHRneg_2'(neg(0, A), B, C, D) ?-
coca(try_rule(D, neg(0, A), anonymous("29"), neg(0, E), replacement, true, E = 1)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("29"))),
A = 1.
'CHRneg_2'(neg(A, 0), B, C, D) ?-
coca(try_rule(D, neg(A, 0), anonymous("30"), neg(E, 0), replacement, true, E = 1)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("30"))),
A = 1.
'CHRneg_2'(neg(1, A), B, C, D) ?-
coca(try_rule(D, neg(1, A), anonymous("31"), neg(1, E), replacement, true, E = 0)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("31"))),
A = 0.
'CHRneg_2'(neg(A, 1), B, C, D) ?-
coca(try_rule(D, neg(A, 1), anonymous("32"), neg(E, 1), replacement, true, E = 0)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("32"))),
A = 0.
'CHRneg_2'(neg(A, A), B, C, D) ?-
coca(try_rule(D, neg(A, A), anonymous("33"), neg(E, E), replacement, true, fail)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("33"))),
fail.
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__105'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, and(A, G, B), neg(I, J), and(I, K, J), replacement, true, (I = 1, K = 0, J = 0), anonymous("36"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("36"))),
[A, [G, B]] = [1, [0, 0]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__106'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, and(B, G, A), neg(I, J), and(J, K, I), replacement, true, (J = 1, K = 0, I = 0), anonymous("37"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("37"))),
[B, [G, A]] = [1, [0, 0]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__107'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, and(G, A, B), neg(I, J), and(K, I, J), replacement, true, (K = 0, I = 1, J = 0), anonymous("38"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("38"))),
[G, [A, B]] = [0, [1, 0]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__108'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, and(G, B, A), neg(I, J), and(K, J, I), replacement, true, (K = 0, J = 1, I = 0), anonymous("39"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("39"))),
[G, [B, A]] = [0, [1, 0]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__109'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, or(A, G, B), neg(I, J), or(I, K, J), replacement, true, (I = 0, K = 1, J = 1), anonymous("42"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("42"))),
[A, [G, B]] = [0, [1, 1]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__110'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, or(B, G, A), neg(I, J), or(J, K, I), replacement, true, (J = 0, K = 1, I = 1), anonymous("43"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("43"))),
[B, [G, A]] = [0, [1, 1]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__111'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, or(G, A, B), neg(I, J), or(K, I, J), replacement, true, (K = 1, I = 0, J = 1), anonymous("44"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("44"))),
[G, [A, B]] = [1, [0, 1]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__112'(F, [B, A], [G], H),
coca(try_double(E, neg(A, B), H, or(G, B, A), neg(I, J), or(K, J, I), replacement, true, (K = 1, J = 0, I = 1), anonymous("45"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("45"))),
[G, [B, A]] = [1, [0, 1]].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__113'(F, [B, A], [], G),
coca(try_double(E, neg(A, B), G, imp(A, B), neg(H, I), imp(H, I), replacement, true, (H = 0, I = 1), anonymous("52"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("52"))),
[A, B] = [0, 1].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__114'(F, [B, A], [], G),
coca(try_double(E, neg(A, B), G, imp(B, A), neg(H, I), imp(I, H), replacement, true, (I = 0, H = 1), anonymous("53"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("53"))),
[B, A] = [0, 1].
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(A, F),
'CHRneg_2__115'(F, [A], [G], H),
coca(try_double(E, neg(A, B), H, neg(G, A), neg(I, J), neg(K, I), keep_second, true, K = J, neg_neg)),
!,
'CHRkill'(C),
coca(fired_rule(neg_neg)),
G = B.
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRneg_2__116'(F, [B], [G], H),
coca(try_double(E, neg(A, B), H, neg(G, B), neg(I, J), neg(K, J), keep_second, true, K = I, neg_neg)),
!,
'CHRkill'(C),
coca(fired_rule(neg_neg)),
G = A.
'CHRneg_2'(neg(A, B), C, D, E) ?-
'CHRget_delayed_goals'(A, F),
'CHRneg_2__117'(F, [A], [G], H),
coca(try_double(E, neg(A, B), H, neg(A, G), neg(I, J), neg(I, K), keep_second, true, K = J, neg_neg)),
!,
'CHRkill'(C),
coca(fired_rule(neg_neg)),
G = B.
'CHRneg_2'(neg(A, B), C, D, E) :-
'CHRneg_2__104'(neg(A, B), C, D, E).
'CHRneg_2__105'(['CHRand_3'(and(A, B, C), D, E, F)|G], [C, A], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([B], [H]),
'CHR='(F, I).
'CHRneg_2__105'([A|B], C, D, E) :-
'CHRneg_2__105'(B, C, D, E).
:- set_flag('CHRneg_2__105' / 4, leash, notrace).
'CHRneg_2__106'(['CHRand_3'(and(A, B, C), D, E, F)|G], [A, C], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([B], [H]),
'CHR='(F, I).
'CHRneg_2__106'([A|B], C, D, E) :-
'CHRneg_2__106'(B, C, D, E).
:- set_flag('CHRneg_2__106' / 4, leash, notrace).
'CHRneg_2__107'(['CHRand_3'(and(A, B, C), D, E, F)|G], [C, B], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([A], [H]),
'CHR='(F, I).
'CHRneg_2__107'([A|B], C, D, E) :-
'CHRneg_2__107'(B, C, D, E).
:- set_flag('CHRneg_2__107' / 4, leash, notrace).
'CHRneg_2__108'(['CHRand_3'(and(A, B, C), D, E, F)|G], [B, C], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([A], [H]),
'CHR='(F, I).
'CHRneg_2__108'([A|B], C, D, E) :-
'CHRneg_2__108'(B, C, D, E).
:- set_flag('CHRneg_2__108' / 4, leash, notrace).
'CHRneg_2__109'(['CHRor_3'(or(A, B, C), D, E, F)|G], [C, A], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([B], [H]),
'CHR='(F, I).
'CHRneg_2__109'([A|B], C, D, E) :-
'CHRneg_2__109'(B, C, D, E).
:- set_flag('CHRneg_2__109' / 4, leash, notrace).
'CHRneg_2__110'(['CHRor_3'(or(A, B, C), D, E, F)|G], [A, C], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([B], [H]),
'CHR='(F, I).
'CHRneg_2__110'([A|B], C, D, E) :-
'CHRneg_2__110'(B, C, D, E).
:- set_flag('CHRneg_2__110' / 4, leash, notrace).
'CHRneg_2__111'(['CHRor_3'(or(A, B, C), D, E, F)|G], [C, B], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([A], [H]),
'CHR='(F, I).
'CHRneg_2__111'([A|B], C, D, E) :-
'CHRneg_2__111'(B, C, D, E).
:- set_flag('CHRneg_2__111' / 4, leash, notrace).
'CHRneg_2__112'(['CHRor_3'(or(A, B, C), D, E, F)|G], [B, C], [H], I) ?-
'CHRvar'(D),
'CHRkill'(D),
'CHR='([A], [H]),
'CHR='(F, I).
'CHRneg_2__112'([A|B], C, D, E) :-
'CHRneg_2__112'(B, C, D, E).
:- set_flag('CHRneg_2__112' / 4, leash, notrace).
'CHRneg_2__113'(['CHRimp_2'(imp(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRneg_2__113'([A|B], C, D, E) :-
'CHRneg_2__113'(B, C, D, E).
:- set_flag('CHRneg_2__113' / 4, leash, notrace).
'CHRneg_2__114'(['CHRimp_2'(imp(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRneg_2__114'([A|B], C, D, E) :-
'CHRneg_2__114'(B, C, D, E).
:- set_flag('CHRneg_2__114' / 4, leash, notrace).
'CHRneg_2__115'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B], [G], H) ?-
'CHRvar'(C),
'CHR='([A], [G]),
'CHR='(E, H).
'CHRneg_2__115'([A|B], C, D, E) :-
'CHRneg_2__115'(B, C, D, E).
:- set_flag('CHRneg_2__115' / 4, leash, notrace).
'CHRneg_2__116'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B], [G], H) ?-
'CHRvar'(C),
'CHR='([A], [G]),
'CHR='(E, H).
'CHRneg_2__116'([A|B], C, D, E) :-
'CHRneg_2__116'(B, C, D, E).
:- set_flag('CHRneg_2__116' / 4, leash, notrace).
'CHRneg_2__117'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A], [G], H) ?-
'CHRvar'(C),
'CHR='([B], [G]),
'CHR='(E, H).
'CHRneg_2__117'([A|B], C, D, E) :-
'CHRneg_2__117'(B, C, D, E).
:- set_flag('CHRneg_2__117' / 4, leash, notrace).
:- set_flag('CHRneg_2' / 4, leash, notrace).
:- current_macro('CHRneg_2' / 4, _43373, _43374, _43375) -> true ; define_macro('CHRneg_2' / 4, tr_chr / 2, [write]).
'CHRneg_2__104'(A, B, C, D) :-
'CHRneg_2__118'(A, B, C, D).
:- set_flag('CHRneg_2__104' / 4, leash, notrace).
'CHRneg_2__118'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__119'(F, C, neg(A, B), D, E).
'CHRneg_2__118'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118' / 4, leash, notrace).
'CHRneg_2__118__119'(['CHRneg_2'(neg(A, B), C, D, E)|F], G, neg(H, A), I, J) ?-
'CHRvar'(C),
coca(try_double(J, neg(H, A), E, neg(A, B), neg(K, L), neg(L, M), keep_first, true, K = M, neg_neg)),
!,
'CHRkill'(C),
coca(fired_rule(neg_neg)),
'CHRneg_2__118__119'(F, G, neg(H, A), I, J),
H = B.
'CHRneg_2__118__119'([A|B], C, D, E, F) :-
'CHRneg_2__118__119'(B, C, D, E, F).
'CHRneg_2__118__119'([], A, B, C, D) :-
'CHRneg_2__118__120'(B, A, C, D).
:- set_flag('CHRneg_2__118__119' / 5, leash, notrace).
'CHRneg_2__118__120'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__121'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120' / 4, leash, notrace).
'CHRneg_2__118__120__121'(['CHRneg_2'(neg(A, B), C, D, E)|F], G, neg(H, B), I, J) ?-
'CHRvar'(C),
coca(try_double(J, neg(H, B), E, neg(A, B), neg(K, L), neg(M, L), keep_first, true, K = M, neg_neg)),
!,
'CHRkill'(C),
coca(fired_rule(neg_neg)),
'CHRneg_2__118__120__121'(F, G, neg(H, B), I, J),
H = A.
'CHRneg_2__118__120__121'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__121'(B, C, D, E, F).
'CHRneg_2__118__120__121'([], A, B, C, D) :-
'CHRneg_2__118__120__122'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__121' / 5, leash, notrace).
'CHRneg_2__118__120__122'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(A, F),
'CHRneg_2__118__120__122__123'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122' / 4, leash, notrace).
'CHRneg_2__118__120__122__123'(['CHRneg_2'(neg(A, B), C, D, E)|F], G, neg(A, H), I, J) ?-
'CHRvar'(C),
coca(try_double(J, neg(A, H), E, neg(A, B), neg(K, L), neg(K, M), keep_first, true, L = M, neg_neg)),
!,
'CHRkill'(C),
coca(fired_rule(neg_neg)),
'CHRneg_2__118__120__122__123'(F, G, neg(A, H), I, J),
H = B.
'CHRneg_2__118__120__122__123'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__123'(B, C, D, E, F).
'CHRneg_2__118__120__122__123'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__123' / 5, leash, notrace).
'CHRneg_2__118__120__122__124'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__125'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__125'(['CHRand_3'(and(A, B, C), D, E, F)|G], H, neg(A, B), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(A, B), F, and(A, B, C), neg(K, L), and(K, L, M), keep_first, true, M = 0, anonymous("34"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("34"))),
'CHRneg_2__118__120__122__124__125'(G, H, neg(A, B), I, J),
C = 0.
'CHRneg_2__118__120__122__124__125'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__125'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__125'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__125' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__127'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__127'(['CHRand_3'(and(A, B, C), D, E, F)|G], H, neg(B, A), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(B, A), F, and(A, B, C), neg(K, L), and(L, K, M), keep_first, true, M = 0, anonymous("35"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("35"))),
'CHRneg_2__118__120__122__124__126__127'(G, H, neg(B, A), I, J),
C = 0.
'CHRneg_2__118__120__122__124__126__127'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__127'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__127'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__127' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__129'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__129'(['CHRor_3'(or(A, B, C), D, E, F)|G], H, neg(A, B), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(A, B), F, or(A, B, C), neg(K, L), or(K, L, M), keep_first, true, M = 1, anonymous("40"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("40"))),
'CHRneg_2__118__120__122__124__126__128__129'(G, H, neg(A, B), I, J),
C = 1.
'CHRneg_2__118__120__122__124__126__128__129'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__129'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__129'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__129' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__131'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__131'(['CHRor_3'(or(A, B, C), D, E, F)|G], H, neg(B, A), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(B, A), F, or(A, B, C), neg(K, L), or(L, K, M), keep_first, true, M = 1, anonymous("41"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("41"))),
'CHRneg_2__118__120__122__124__126__128__130__131'(G, H, neg(B, A), I, J),
C = 1.
'CHRneg_2__118__120__122__124__126__128__130__131'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__131'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__131'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__131' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__132__133'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130__132'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__133'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, neg(A, B), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(A, B), F, exor(A, B, C), neg(K, L), exor(K, L, M), keep_first, true, M = 1, anonymous("46"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("46"))),
'CHRneg_2__118__120__122__124__126__128__130__132__133'(G, H, neg(A, B), I, J),
C = 1.
'CHRneg_2__118__120__122__124__126__128__130__132__133'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__132__133'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__132__133'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__133' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__132__134__135'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130__132__134'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__135'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, neg(B, A), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(B, A), F, exor(A, B, C), neg(K, L), exor(L, K, M), keep_first, true, M = 1, anonymous("47"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("47"))),
'CHRneg_2__118__120__122__124__126__128__130__132__134__135'(G, H, neg(B, A), I, J),
C = 1.
'CHRneg_2__118__120__122__124__126__128__130__132__134__135'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__135'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__132__134__135'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__135' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__137'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__137'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, neg(A, C), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(A, C), F, exor(A, B, C), neg(K, L), exor(K, M, L), keep_first, true, M = 1, anonymous("48"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("48"))),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__137'(G, H, neg(A, C), I, J),
B = 1.
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__137'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__137'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__137'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__137' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, neg(C, A), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(C, A), F, exor(A, B, C), neg(K, L), exor(L, M, K), keep_first, true, M = 1, anonymous("49"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("49"))),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139'(G, H, neg(C, A), I, J),
B = 1.
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__139' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, neg(B, C), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(B, C), F, exor(A, B, C), neg(K, L), exor(M, K, L), keep_first, true, M = 1, anonymous("50"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("50"))),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141'(G, H, neg(B, C), I, J),
A = 1.
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__141' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142'(neg(A, B), C, D, E) ?-
'CHRvar'(C),
!,
'CHRget_delayed_goals'(B, F),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143'(F, C, neg(A, B), D, E).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142'(neg(A, B), C, D, E) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__144'(neg(A, B), C, D, E).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142' / 4, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143'(['CHRexor_3'(exor(A, B, C), D, E, F)|G], H, neg(C, B), I, J) ?-
'CHRvar'(D),
coca(try_double(J, neg(C, B), F, exor(A, B, C), neg(K, L), exor(M, L, K), keep_first, true, M = 1, anonymous("51"))),
!,
'CHRkill'(D),
coca(fired_rule(anonymous("51"))),
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143'(G, H, neg(C, B), I, J),
A = 1.
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143'([A|B], C, D, E, F) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143'(B, C, D, E, F).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143'([], A, B, C, D) :-
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__144'(B, A, C, D).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__143' / 5, leash, notrace).
'CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__144'(neg(A, B), C, D, E) :-
(
'CHRvar'(C)
->
'CHRdelay'([C, neg(A, B)], 'CHRneg_2'(neg(A, B), C, D, E))
;
true
).
:- set_flag('CHRneg_2__118__120__122__124__126__128__130__132__134__136__138__140__142__144' / 4, leash, notrace).
%%% Prolog clauses for imp / 2
clause_imp(0, A).
clause_imp(1, 1).
:- current_macro(clause_imp / 2, _51526, _51527, _51528) -> true ; define_macro(clause_imp / 2, tr_chr / 2, [write]).
imp(A, B) :-
'CHRgen_num'(C),
coca(add_one_constraint(C, imp(A, B))),
'CHRimp_2'(imp(A, B), D, E, C).
%%% Rules handling for imp / 2
'CHRimp_2'(imp(A, B), C, D, E) :-
(
'CHRnonvar'(C)
;
'CHRalready_in'('CHRimp_2'(imp(A, B), C, D, E)),
coca(already_in)
),
!.
'CHRimp_2'(imp(0, A), B, C, D) ?-
coca(try_rule(D, imp(0, A), anonymous("54"), imp(0, E), replacement, true, true)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("54"))).
'CHRimp_2'(imp(A, 0), B, C, D) ?-
coca(try_rule(D, imp(A, 0), anonymous("55"), imp(E, 0), replacement, true, E = 0)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("55"))),
A = 0.
'CHRimp_2'(imp(1, A), B, C, D) ?-
coca(try_rule(D, imp(1, A), anonymous("56"), imp(1, E), replacement, true, E = 1)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("56"))),
A = 1.
'CHRimp_2'(imp(A, 1), B, C, D) ?-
coca(try_rule(D, imp(A, 1), anonymous("57"), imp(E, 1), replacement, true, true)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("57"))).
'CHRimp_2'(imp(A, A), B, C, D) ?-
coca(try_rule(D, imp(A, A), anonymous("58"), imp(E, E), replacement, true, true)),
!,
'CHRkill'(B),
coca(fired_rule(anonymous("58"))).
'CHRimp_2'(imp(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRimp_2__146'(F, [B, A], [], G),
coca(try_double(E, imp(A, B), G, neg(A, B), imp(H, I), neg(H, I), replacement, true, (H = 0, I = 1), anonymous("52"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("52"))),
[A, B] = [0, 1].
'CHRimp_2'(imp(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRimp_2__147'(F, [B, A], [], G),
coca(try_double(E, imp(A, B), G, neg(B, A), imp(H, I), neg(I, H), replacement, true, (H = 0, I = 1), anonymous("53"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("53"))),
[A, B] = [0, 1].
'CHRimp_2'(imp(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRimp_2__148'(F, [B, A], [], G),
coca(try_double(E, imp(A, B), G, imp(B, A), imp(H, I), imp(I, H), replacement, true, H = I, anonymous("59"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("59"))),
A = B.
'CHRimp_2'(imp(A, B), C, D, E) ?-
'CHRget_delayed_goals'(B, F),
'CHRimp_2__149'(F, [B, A], [], G),
coca(try_double(E, imp(A, B), G, imp(B, A), imp(H, I), imp(I, H), replacement, true, I = H, anonymous("59"))),
!,
'CHRkill'(C),
coca(fired_rule(anonymous("59"))),
B = A.
'CHRimp_2'(imp(A, B), C, D, E) :-
'CHRimp_2__145'(imp(A, B), C, D, E).
'CHRimp_2__146'(['CHRneg_2'(neg(A, B), C, D, E)|F], [B, A], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRimp_2__146'([A|B], C, D, E) :-
'CHRimp_2__146'(B, C, D, E).
:- set_flag('CHRimp_2__146' / 4, leash, notrace).
'CHRimp_2__147'(['CHRneg_2'(neg(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRimp_2__147'([A|B], C, D, E) :-
'CHRimp_2__147'(B, C, D, E).
:- set_flag('CHRimp_2__147' / 4, leash, notrace).
'CHRimp_2__148'(['CHRimp_2'(imp(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRimp_2__148'([A|B], C, D, E) :-
'CHRimp_2__148'(B, C, D, E).
:- set_flag('CHRimp_2__148' / 4, leash, notrace).
'CHRimp_2__149'(['CHRimp_2'(imp(A, B), C, D, E)|F], [A, B], [], G) ?-
'CHRvar'(C),
'CHRkill'(C),
'CHR='([], []),
'CHR='(E, G).
'CHRimp_2__149'([A|B], C, D, E) :-
'CHRimp_2__149'(B, C, D, E).
:- set_flag('CHRimp_2__149' / 4, leash, notrace).
:- set_flag('CHRimp_2' / 4, leash, notrace).
:- current_macro('CHRimp_2' / 4, _54488, _54489, _54490) -> true ; define_macro('CHRimp_2' / 4, tr_chr / 2, [write]).
'CHRimp_2__145'(A, B, C, D) :-
'CHRimp_2__150'(A, B, C, D).
:- set_flag('CHRimp_2__145' / 4, leash, notrace).
'CHRimp_2__150'(imp(A, B), C, D, E) :-
(
'CHRvar'(C)
->
'CHRdelay'([C, imp(A, B)], 'CHRimp_2'(imp(A, B), C, D, E))
;
true
).
:- set_flag('CHRimp_2__150' / 4, leash, notrace).
%%% Prolog clauses for card / 4
clause_card(A, B, [], 0) :-
A =< 0,
0 =< B.
clause_card(A, B, [0|C], D) :-
E is D - 1,
card(A, B, C).
clause_card(A, B, [1|C], D) :-
E is A - 1,
F is B - 1,
G is D - 1,
card(E, F, C).
:- current_macro(clause_card / 4, _54950, _54951, _54952) -> true ; define_macro(clause_card / 4, tr_chr / 2, [write]).
card(A, B, C, D) :-
'CHRgen_num'(E),
coca(add_one_constraint(E, card(A, B, C, D))),
'CHRcard_4'(card(A, B, C, D), F, G, E).
%%% Rules handling for card / 4
'CHRcard_4'(card(A, B, C, D), E, F, G) :-
(
'CHRnonvar'(E)
;
'CHRalready_in'('CHRcard_4'(card(A, B, C, D), E, F, G)),
coca(already_in)
),
!.
'CHRcard_4'(card(A, B, C, D), E, F, G) ?-
coca(try_rule(G, card(A, B, C, D), triv_sat, card(H, I, J, K), replacement, (H =< 0, K =< I), true)),
no_delayed_goals((A =< 0, D =< B)),
!,
'CHRkill'(E),
coca(fired_rule(triv_sat)).
'CHRcard_4'(card(A, B, C, A), D, E, F) ?-
coca(try_rule(F, card(A, B, C, A), pos_sat, card(G, H, I, G), replacement, true, set_to_ones(I))),
!,
'CHRkill'(D),
coca(fired_rule(pos_sat)),
set_to_ones(C).
'CHRcard_4'(card(A, 0, B, C), D, E, F) ?-
coca(try_rule(F, card(A, 0, B, C), neg_sat, card(G, 0, H, I), replacement, true, set_to_zeros(H))),
!,
'CHRkill'(D),
coca(fired_rule(neg_sat)),
set_to_zeros(B).
'CHRcard_4'(card(A, B, C, D), E, F, G) ?-
coca(try_rule(G, card(A, B, C, D), pos_red, card(H, I, J, K), replacement, (delete(L, J, M), L == 1), (N is H - 1, O is I - 1, P is K - 1, card(N, O, M, P)))),
no_delayed_goals((delete(Q, C, R), Q == 1)),
!,
'CHRkill'(E),
coca(fired_rule(pos_red)),
S is A - 1,
T is B - 1,
U is D - 1,
card(S, T, R, U).
'CHRcard_4'(card(A, B, C, D), E, F, G) ?-
coca(try_rule(G, card(A, B, C, D), neg_red, card(H, I, J, K), replacement, (delete(L, J, M), L == 0), (N is K - 1, card(H, I, M, N)))),
no_delayed_goals((delete(O, C, P), O == 0)),
!,
'CHRkill'(E),
coca(fired_rule(neg_red)),
Q is D - 1,
card(A, B, P, Q).
'CHRcard_4'(card(0, 1, [A, B], 2), C, D, E) ?-
coca(try_rule(E, card(0, 1, [A, B], 2), card2nand, card(0, 1, [F, G], 2), replacement, true, and(F, G, 0))),
!,
'CHRkill'(C),
coca(fired_rule(card2nand)),
and(A, B, 0).
'CHRcard_4'(card(1, 1, [A, B], 2), C, D, E) ?-
coca(try_rule(E, card(1, 1, [A, B], 2), card2neg, card(1, 1, [F, G], 2), replacement, true, neg(F, G))),
!,
'CHRkill'(C),
coca(fired_rule(card2neg)),
neg(A, B).
'CHRcard_4'(card(1, 2, [A, B], 2), C, D, E) ?-
coca(try_rule(E, card(1, 2, [A, B], 2), card2or, card(1, 2, [F, G], 2), replacement, true, or(F, G, 1))),
!,
'CHRkill'(C),
coca(fired_rule(card2or)),
or(A, B, 1).
'CHRcard_4'(card(A, B, C, D), E, F, G) :-
'CHRcard_4__151'(card(A, B, C, D), E, F, G).
:- set_flag('CHRcard_4' / 4, leash, notrace).
:- current_macro('CHRcard_4' / 4, _56673, _56674, _56675) -> true ; define_macro('CHRcard_4' / 4, tr_chr / 2, [write]).
'CHRcard_4__151'(A, B, C, D) :-
'CHRcard_4__152'(A, B, C, D).
:- set_flag('CHRcard_4__151' / 4, leash, notrace).
'CHRcard_4__152'(card(A, B, C, D), E, F, G) :-
(
'CHRvar'(E)
->
'CHRdelay'([E, card(A, B, C, D)], 'CHRcard_4'(card(A, B, C, D), E, F, G))
;
true
).
:- set_flag('CHRcard_4__152' / 4, leash, notrace).
:- getval(variable_names_flag, Val), set_flag(variable_names, Val).
| kishoredbn/barrelfish | usr/eclipseclp/ecrc_solvers/chr/bool.pl | Perl | mit | 61,241 |
#!/usr/bin/perl
# $Id: pxref.perl,v 1.2 2013-10-11 19:24:18-07 - - $
use strict;
use warnings;
$0 =~ s|^.*/||;
my $exit_status = 0;
END {exit $exit_status}
sub note(@) {print STDERR "@_"};
$SIG{'__WARN__'} = sub {note @_; $exit_status = 1};
$SIG{'__DIE__'} = sub {warn @_; exit};
my $sep = ":" x 32;
push @ARGV, "-" unless @ARGV;
for my $filename (@ARGV) {
my %xref;
open my $file, "<$filename"
or warn "$0: $filename: $!\n" and next;
while (defined (my $line = <$file>)) {
push @{$xref{$1}}, $.
while $line =~ s|^.*?(\w+([-'.:/]\w+)*)||;
}
close $file;
print "$sep\n$filename\n$sep\n";
printf "%s [%d] %s\n", $_, @{$xref{$_}} + 0,
join " ", @{$xref{$_}}
for sort keys %xref;
}
| joshuapena/cmps012b-wm-w15 | mackey/assignments/asg2j-jxref-lists/code/pxref.perl | Perl | mit | 748 |
package Paws::Signin::LoginResponse;
use Moose;
has URL => (is => 'ro', isa => 'Str', required => 1);
1;
### main pod documentation begin ###
=head1 NAME
Paws::Signin::GetSigninTokenResponse - Response for method GetSigninToken on Paws::Signin
=head1 DESCRIPTION
=head2 SigninToken => Str
The Token for the Sigin API
=head1 SEE ALSO
This class forms part of L<Paws>, and documents parameters for ListAccessKeys in Paws::IAM
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | lib/Paws/Signin/LoginResponse.pm | Perl | apache-2.0 | 613 |
package Paws::Config::DescribeComplianceByResourceResponse;
use Moose;
has ComplianceByResources => (is => 'ro', isa => 'ArrayRef[Paws::Config::ComplianceByResource]');
has NextToken => (is => 'ro', isa => 'Str');
has _request_id => (is => 'ro', isa => 'Str');
### main pod documentation begin ###
=head1 NAME
Paws::Config::DescribeComplianceByResourceResponse
=head1 ATTRIBUTES
=head2 ComplianceByResources => ArrayRef[L<Paws::Config::ComplianceByResource>]
Indicates whether the specified AWS resource complies with all of the
AWS Config rules that evaluate it.
=head2 NextToken => Str
The string that you use in a subsequent request to get the next page of
results in a paginated response.
=head2 _request_id => Str
=cut
1; | ioanrogers/aws-sdk-perl | auto-lib/Paws/Config/DescribeComplianceByResourceResponse.pm | Perl | apache-2.0 | 751 |
package Date::Manip::Lang::swedish;
# Copyright (c) 1996-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
########################################################################
########################################################################
require 5.010000;
use strict;
use warnings;
use utf8;
our($VERSION);
$VERSION='6.48';
our($Language,@Encodings,$LangName,$YearAdded);
@Encodings = qw(utf-8 ISO-8859-15 perl);
$LangName = "Swedish";
$YearAdded = 1996;
$Language = {
ampm => [['FM'], ['EM']],
at => ['kl', 'kl.', 'klockan'],
day_abb => [
['Mån', 'Man'],
['Tis'],
['Ons'],
['Tor'],
['Fre'],
['Lör', 'Lor'],
['Sön', 'Son'],
],
day_char => [['M'], ['Ti'], ['O'], ['To'], ['F'], ['L'], ['S']],
day_name => [
['Måndag', 'Mandag'],
['Tisdag'],
['Onsdag'],
['Torsdag'],
['Fredag'],
['Lördag', 'Lordag'],
['Söndag', 'Sondag'],
],
each => ['varje'],
fields => [
['ar', 'år'],
['manader', 'månader', 'man', 'manad', 'mån', 'månad'],
['veckor', 'v', 'vecka'],
['dagar', 'd', 'dag'],
['timmar', 't', 'tim', 'timme'],
['minuter', 'm', 'min', 'minut'],
['sekunder', 's', 'sek', 'sekund'],
],
last => ['forra', 'förra', 'senaste'],
mode => [['exakt', 'ungefar', 'ungefär'], ['arbetsdag', 'arbetsdagar']],
month_abb => [
['Jan'],
['Feb'],
['Mar'],
['Apr'],
['Maj'],
['Jun'],
['Jul'],
['Aug'],
['Sep'],
['Okt'],
['Nov'],
['Dec'],
],
month_name => [
['Januari'],
['Februari'],
['Mars'],
['April'],
['Maj'],
['Juni'],
['Juli'],
['Augusti'],
['September'],
['Oktober'],
['November'],
['December'],
],
nextprev => [['nasta', 'nästa'], ['forra', 'förra']],
nth => [
['1:a', 'en', 'ett', 'forsta', 'första'],
['2:a', 'två', 'tva', 'andra'],
['3:e', 'tre', 'tredje'],
['4:e', 'fyra', 'fjarde', 'fjärde'],
['5:e', 'fem', 'femte'],
['6:e', 'sex', 'sjatte', 'sjätte'],
['7:e', 'sju', 'sjunde'],
['8:e', 'åtta', 'atta', 'attonde', 'åttonde'],
['9:e', 'nio', 'nionde'],
['10:e', 'tio', 'tionde'],
['11:e', 'elva', 'elfte'],
['12:e', 'tolv', 'tolfte'],
['13:e', 'tretton', 'trettonde'],
['14:e', 'fjorton', 'fjortonde'],
['15:e', 'femton', 'femtonde'],
['16:e', 'sexton', 'sextonde'],
['17:e', 'sjutton', 'sjuttonde'],
['18:e', 'arton', 'artonde'],
['19:e', 'nitton', 'nittonde'],
['20:e', 'tjugo', 'tjugonde'],
['21:a', 'tjugoen', 'tjugoett', 'tjugoforsta', 'tjugoförsta'],
['22:a', 'tjugotvå', 'tjugotva', 'tjugoandra'],
['23:e', 'tjugotre', 'tjugotredje'],
['24:e', 'tjugofyra', 'tjugofjarde', 'tjugofjärde'],
['25:e', 'tjugofem', 'tjugofemte'],
['26:e', 'tjugosex', 'tjugosjatte', 'tjugosjätte'],
['27:e', 'tjugosju', 'tjugosjunde'],
['28:e', 'tjugoåtta', 'tjugoatta', 'tjugoattonde', 'tjugoåttonde'],
['29:e', 'tjugonio', 'tjugonionde'],
['30:e', 'trettio', 'trettionde'],
['31:a', 'trettioen', 'trettioett', 'trettioforsta', 'trettioförsta'],
['32:a', 'trettiotvå', 'trettiotva', 'trettioandra'],
['33:e', 'trettiotre', 'trettiotredje'],
['34:e', 'trettiofyra', 'trettiofjarde', 'trettiofjärde'],
['35:e', 'trettiofem', 'trettiofemte'],
['36:e', 'trettiosex', 'trettiosjatte', 'trettiosjätte'],
['37:e', 'trettiosju', 'trettiosjunde'],
['38:e', 'trettioåtta', 'trettioatta', 'trettioattonde', 'trettioåttonde'],
['39:e', 'trettionio', 'trettionionde'],
['40:e', 'fyrtio', 'fyrtionde'],
['41:a', 'fyrtioen', 'fyrtioett', 'fyrtioforsta', 'fyrtioförsta'],
['42:a', 'fyrtiotvå', 'fyrtiotva', 'fyrtioandra'],
['43:e', 'fyrtiotre', 'fyrtiotredje'],
['44:e', 'fyrtiofyra', 'fyrtiofjarde', 'fyrtiofjärde'],
['45:e', 'fyrtiofem', 'fyrtiofemte'],
['46:e', 'fyrtiosex', 'fyrtiosjatte', 'fyrtiosjätte'],
['47:e', 'fyrtiosju', 'fyrtiosjunde'],
['48:e', 'fyrtioåtta', 'fyrtioatta', 'fyrtioattonde', 'fyrtioåttonde'],
['49:e', 'fyrtionio', 'fyrtionionde'],
['50:e', 'femtio', 'femtionde'],
['51:a', 'femtioen', 'femtioett', 'femtioforsta', 'femtioförsta'],
['52:a', 'femtiotvå', 'femtiotva', 'femtioandra'],
['53:e', 'femtiotre', 'femtiotredje'],
],
of => ['om'],
offset_date => {
'i dag' => '0:0:0:0:0:0:0',
'i gar' => '-0:0:0:1:0:0:0',
'i går' => '-0:0:0:1:0:0:0',
'i morgon' => '+0:0:0:1:0:0:0',
},
offset_time => { nu => '0:0:0:0:0:0:0' },
on => ['pa', 'på'],
sephm => ['\.'],
sepms => [':'],
times => {
'midnatt' => '00:00:00',
'mitt pa dagen' => '12:00:00',
'mitt på dagen' => '12:00:00',
},
when => [['sedan'], ['om', 'senare']],
};
1;
| nriley/Pester | Source/Manip/Lang/swedish.pm | Perl | bsd-2-clause | 4,869 |
package Contenticious::Content;
use Mojo::Base -base;
use Contenticious::Content::Node::Directory;
use File::Copy::Recursive 'dircopy';
use Carp;
has pages_dir => sub { croak 'no pages_dir given' };
has root_node => sub { shift->build_root_node };
# root_node builder
sub build_root_node {
my $self = shift;
# let there be root!
return Contenticious::Content::Node::Directory->new(
filename => $self->pages_dir,
is_root => 1,
);
}
# find a content node for a given path like foo/bar/baz
sub find {
my $self = shift;
my $path = shift // '';
# split path and find content node
my @names = split m|/| => $path;
return $self->root_node->find(@names);
}
# execute a subroutine for all content nodes
# the given subroutine gets the node as a single argument
sub for_all_nodes {
my ($self, $sub) = @_;
_walk_tree($self->root_node, $sub);
}
# not a public method but a recursive utitlity function
sub _walk_tree {
my ($node, $sub) = @_;
# execute
$sub->($node);
# walk the tree if possible (duck typing)
if ($node->can('children')) {
_walk_tree($_, $sub) foreach @{$node->children};
}
}
# delete cached content
sub empty_cache {
my $self = shift;
# urgs
delete $self->{root_node};
}
1;
__END__
=head1 NAME
Contenticious::Content - content for Contenticious
=head1 SYNOPSIS
use Contenticious::Content;
my $content = Contenticious::Content->new(pages_dir => 'pages');
my $node = $content->find('foo/bar');
=head1 DESCRIPTION
Access a Contenticious content tree.
=head1 ATTRIBUTES
=head2 C<pages_dir>
The directory to read from
=head2 C<root_node>
The root of the generated content tree. Will be generated from C<pages_dir>.
=head1 METHODS
=head2 C<find>
my $node = $content->find('foo/bar');
Find a content node for a given path
=head2 C<for_all_nodes>
$content->for_all_nodes(sub {
my $node = shift;
do_something_with($node);
});
Execute a subroutine for all content nodes
=head2 C<empty_cache>
Delete cached content
=head1 SEE ALSO
L<Contenticious::Content::Node>, L<Contenticious>
| gitpan/Contenticious | lib/Contenticious/Content.pm | Perl | mit | 2,172 |
package DDG::Spice::AlternativeTo;
use DDG::Spice;
primary_example_queries "alternative to notepad";
secondary_example_queries "alternative to photoshop for mac", "free alternative to spotify for windows";
description "Find software alternatives";
name "AlternativeTo";
icon_url "/i/alternativeto.net.ico";
source "AlternativeTo";
code_url "https://github.com/duckduckgo/zeroclickinfo-spice/blob/master/lib/DDG/Spice/AlternativeTo.pm";
topics "everyday", "programming";
category "computing_tools";
attribution github => ['https://github.com/Getty','Torsten Raudssus'],
twitter => ['https://twitter.com/raudssus','Torsten Raudssus'];
triggers start => "free","opensource","commercial";
triggers any => "alternative","alternatives","alternativeto";
spice from => '([^/]+)/(.*?)/([^/]*)';
spice to => 'http://api.alternativeto.net/software/$1/?platform=$2&license=$3&count=12&callback={{callback}}&key={{ENV{DDG_SPICE_ALTERNATIVETO_APIKEY}}}';
my %alternatives = (
'google' => 'googlecom',
'photoshop' => 'adobe-photoshop',
'yahoo' => 'yahoo-search',
'bing' => 'bingcom',
'mac-os-x' => 'mac-os'
);
handle query_lc => sub {
if (/^(?:(free|open\ssource|commercial))?\s*(?:alternative(?:s|)?\s*?(?:to|for)\s*?)(\b(?!for\b).*?\b)(?:\s*?for\s(.*))?$/) {
my $license = $1 || "";
my $prog = $2 || "";
my $platform = $3 || "";
$license =~ s/\s+//;
$prog =~ s/\s+$//g;
$prog =~ s/^\s+//g;
$prog =~ s/\s+/-/g;
$prog = $alternatives{$prog} if exists $alternatives{$prog};
if($platform) {
return $prog, $platform, $license;
}
return $prog, "all", $license;
}
return;
};
1;
| timeanddate/zeroclickinfo-spice | lib/DDG/Spice/AlternativeTo.pm | Perl | apache-2.0 | 1,713 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
0800 082D
0830 083E
END
| efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/perl5/5.16.2/unicore/lib/Scx/Samr.pl | Perl | apache-2.0 | 443 |
package SmartSea::Schema::Result::Plan;
use strict;
use warnings;
use 5.010000;
use base qw/DBIx::Class::Core/;
use Scalar::Util 'blessed';
use SmartSea::Core qw(:all);
use SmartSea::HTML qw(:all);
my @columns = (
id => {},
name => {data_type => 'text', html_size => 30, not_null => 1},
owner => {}
);
__PACKAGE__->table('plans');
__PACKAGE__->add_columns(@columns);
__PACKAGE__->set_primary_key('id');
__PACKAGE__->has_many(uses => 'SmartSea::Schema::Result::Use', 'plan');
__PACKAGE__->many_to_many(use_classes => 'uses', 'use_class');
__PACKAGE__->has_many(extras => 'SmartSea::Schema::Result::Plan2DatasetExtra', 'plan');
__PACKAGE__->many_to_many(extra_datasets => 'extras', 'dataset');
__PACKAGE__->has_many(zonings => 'SmartSea::Schema::Result::Zoning', 'plan');
sub relationship_hash {
return {
uses => {
source => 'Use',
ref_to_parent => 'plan',
class_column => 'use_class',
class_widget => sub {
my ($self, $children) = @_;
my %has;
for my $obj (@$children) {
$has{$obj->use_class->id} = 1;
}
my @objs;
for my $obj ($self->{app}{schema}->resultset('UseClass')->all) {
next if $has{$obj->id};
push @objs, $obj;
}
return drop_down(name => 'use_class', objs => \@objs);
}
},
extra_datasets => {
name => 'Extra dataset',
source => 'Dataset',
link_objects => 'extras',
link_source => 'Plan2DatasetExtra',
ref_to_parent => 'plan',
ref_to_related => 'dataset',
stop_edit => 1,
class_column => 'dataset',
class_widget => sub {
my ($self, $children) = @_;
my $has = $self->{row}->datasets($self);
for my $obj (@$children) {
$has->{$obj->id} = 1;
}
my @objs;
for my $obj ($self->{app}{schema}->resultset('Dataset')->search({path => { '!=', undef }})) {
next if $has->{$obj->id};
push @objs, $obj;
}
return drop_down(name => 'dataset', objs => \@objs);
}
},
zonings => {
name => 'Zonings',
source => 'Zoning',
ref_to_parent => 'plan',
}
};
}
sub need_form_for_child {
my ($class, $child_source) = @_;
return 0; # Use and Dataset are simple links
}
# datasets referenced by this plan through rules
sub datasets {
my ($self, $args) = @_;
my %datasets;
for my $use_class ($self->use_classes) {
my $use = $args->{app}{schema}->
resultset('Use')->
single({plan => $self->id, use_class => $use_class->id});
for my $layer_class ($use->layer_classes) {
my $layer = $args->{app}{schema}->
resultset('Layer')->
single({use => $use->id, layer_class => $layer_class->id});
for my $rule ($layer->rules({cookie => ''})) {
$datasets{$rule->dataset->id} = $rule->dataset if $rule->dataset;
}
}
}
return \%datasets;
}
sub read {
my $self = shift;
my @uses;
my %data;
for my $use ($self->uses(undef, {order_by => 'id'})) {
push @uses, $use->read;
}
for my $dataset ($self->extra_datasets) {
$data{$dataset->id} = 1 if $dataset->usable_in_rule;
}
return {
owner => $self->owner,
name => $self->name,
id => $self->id,
uses => \@uses,
data => \%data
};
}
1;
| ajolma/SmartSeaMSPTool | SmartSea/Schema/Result/Plan.pm | Perl | mit | 3,768 |
###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite time locale
# generator (0.04). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate_from_cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file se_FI.xml.
# The source file version number was 1.19, generated on
# 2007/07/19 22:31:40.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::se_FI;
use strict;
BEGIN
{
if ( $] >= 5.006 )
{
require utf8; utf8->import;
}
}
use DateTime::Locale::se;
@DateTime::Locale::se_FI::ISA = qw(DateTime::Locale::se);
my @day_names = (
"måanta",
"däjsta",
"gaskevahkoe",
"dåarsta",
"bearjadahke",
"laavadahke",
"aejlege",
);
my @day_abbreviations = (
"2",
"3",
"4",
"5",
"6",
"7",
"1",
);
my @day_narrows = (
"M",
"D",
"G",
"D",
"B",
"L",
"S",
);
my @month_abbreviations = (
"ođđajage",
"guovva",
"njukča",
"cuoŋo",
"miesse",
"geasse",
"suoidne",
"borge",
"čakča",
"golggot",
"skábma",
"juovla",
);
sub day_names { \@day_names }
sub day_abbreviations { \@day_abbreviations }
sub day_narrows { \@day_narrows }
sub month_abbreviations { \@month_abbreviations }
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/Locale/se_FI.pm | Perl | mit | 1,531 |
#!/usr/bin/perl -c
use strict;
use warnings;
package JMAP::AOLDB;
use base qw(JMAP::ImapDB);
use DBI;
use Date::Parse;
use Data::UUID::LibUUID;
use OAuth2::Tiny;
use Encode;
use Encode::MIME::Header;
use Digest::SHA qw(sha1_hex);
use AnyEvent;
use AnyEvent::Socket;
use Data::Dumper;
use JMAP::Sync::Gmail;
sub new {
my $class = shift;
my $Self = $class->SUPER::new(@_);
$Self->{is_aol} = 1;
return $Self;
}
sub access_token {
my $Self = shift;
$Self->begin();
my $server = $Self->dgetone('iserver', {}, 'imapHost,username,password');
$Self->commit();
my $O = JMAP::Sync::AOL::O();
my $data = $O->refresh($server->{password});
return [$server->{imapHost}, $server->{username}, $data->{access_token}];
}
1;
| jmapio/jmap-perl | JMAP/AOLDB.pm | Perl | mit | 737 |
package MIP::Recipes::Build::Bwa_prerequisites;
use 5.026;
use Carp;
use charnames qw{ :full :short };
use English qw{ -no_match_vars };
use File::Spec::Functions qw{ catdir catfile };
use open qw{ :encoding(UTF-8) :std };
use Params::Check qw{ allow check last_error };
use utf8;
use warnings;
use warnings qw{ FATAL utf8 };
## CPANM
use autodie qw{ :all };
use Readonly;
## MIPs lib/
use MIP::Constants qw{ $DOT $NEWLINE $UNDERSCORE };
BEGIN {
require Exporter;
use base qw{ Exporter };
# Functions and variables which can be optionally exported
our @EXPORT_OK = qw{ build_bwa_prerequisites build_bwa_mem2_prerequisites };
}
sub build_bwa_prerequisites {
## Function : Creates the Bwa prerequisites
## Returns :
## Arguments: $active_parameter_href => Active parameters for this analysis hash {REF}
## : $case_id => Family id
## : $file_info_href => File info hash {REF}
## : $human_genome_reference => Human genome reference
## : $job_id_href => Job id hash {REF}
## : $log => Log object
## : $parameter_build_suffixes_ref => The bwa reference associated file endings {REF}
## : $parameter_href => Parameter hash {REF}
## : $profile_base_command => Submission profile base command
## : $recipe_name => Program name
## : $sample_info_href => Info on samples and case hash {REF}
## : $temp_directory => Temporary directory
my ($arg_href) = @_;
## Flatten argument(s)
my $active_parameter_href;
my $file_info_href;
my $job_id_href;
my $log;
my $parameter_build_suffixes_ref;
my $parameter_href;
my $recipe_name;
my $sample_info_href;
## Default(s)
my $case_id;
my $human_genome_reference;
my $profile_base_command;
my $temp_directory;
my $tmpl = {
active_parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$active_parameter_href,
strict_type => 1,
},
case_id => {
default => $arg_href->{active_parameter_href}{case_id},
store => \$case_id,
strict_type => 1,
},
file_info_href => {
default => {},
defined => 1,
required => 1,
store => \$file_info_href,
strict_type => 1,
},
human_genome_reference => {
default => $arg_href->{active_parameter_href}{human_genome_reference},
store => \$human_genome_reference,
strict_type => 1,
},
job_id_href => {
default => {},
defined => 1,
required => 1,
store => \$job_id_href,
strict_type => 1,
},
log => {
defined => 1,
required => 1,
store => \$log,
},
parameter_build_suffixes_ref => {
default => [],
defined => 1,
required => 1,
store => \$parameter_build_suffixes_ref,
strict_type => 1,
},
parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$parameter_href,
strict_type => 1,
},
profile_base_command => {
default => q{sbatch},
store => \$profile_base_command,
strict_type => 1,
},
recipe_name => {
defined => 1,
required => 1,
store => \$recipe_name,
strict_type => 1,
},
sample_info_href => {
default => {},
defined => 1,
required => 1,
store => \$sample_info_href,
strict_type => 1,
},
temp_directory => {
default => $arg_href->{active_parameter_href}{temp_directory},
store => \$temp_directory,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Language::Shell qw{ check_exist_and_move_file };
use MIP::Processmanagement::Processes qw{ submit_recipe };
use MIP::Program::Bwa qw{ bwa_index };
use MIP::Recipe qw{ parse_recipe_prerequisites };
use MIP::Recipes::Build::Human_genome_prerequisites qw{ build_human_genome_prerequisites };
use MIP::Script::Setup_script qw{ setup_script };
## Constants
Readonly my $MAX_RANDOM_NUMBER => 100_00;
Readonly my $PROCESSING_TIME => 3;
## Unpack parameters
my %recipe = parse_recipe_prerequisites(
{
active_parameter_href => $active_parameter_href,
parameter_href => $parameter_href,
recipe_name => q{bwa_mem},
}
);
## filehandleS
# Create anonymous filehandle
my $filehandle = IO::Handle->new();
## Generate a random integer between 0-10,000.
my $random_integer = int rand $MAX_RANDOM_NUMBER;
## Creates recipe directories (info & data & script), recipe script filenames and writes sbatch header
my ($recipe_file_path) = setup_script(
{
active_parameter_href => $active_parameter_href,
directory_id => $case_id,
filehandle => $filehandle,
job_id_href => $job_id_href,
process_time => $PROCESSING_TIME,
recipe_directory => $recipe_name,
recipe_name => $recipe_name,
source_environment_commands_ref => $recipe{load_env_ref},
}
);
build_human_genome_prerequisites(
{
active_parameter_href => $active_parameter_href,
filehandle => $filehandle,
file_info_href => $file_info_href,
job_id_href => $job_id_href,
log => $log,
parameter_build_suffixes_ref =>
\@{ $file_info_href->{human_genome_reference_file_endings} },
parameter_href => $parameter_href,
recipe_name => $recipe_name,
random_integer => $random_integer,
sample_info_href => $sample_info_href,
}
);
if ( $parameter_href->{bwa_build_reference}{build_file} == 1 ) {
$log->warn( q{Will try to create required }
. $human_genome_reference
. q{ index files before executing }
. $recipe_name );
say {$filehandle} q{## Building BWA index};
## Get parameters
my $prefix = $human_genome_reference . $UNDERSCORE . $random_integer;
bwa_index(
{
construction_algorithm => q{bwtsw},
filehandle => $filehandle,
prefix => $prefix,
reference_genome => $human_genome_reference,
}
);
say {$filehandle} $NEWLINE;
PREREQ_FILE:
foreach my $file ( @{$parameter_build_suffixes_ref} ) {
my $intended_file_path = $human_genome_reference . $file;
my $temporary_file_path =
$human_genome_reference . $UNDERSCORE . $random_integer . $file;
## Checks if a file exists and moves the file in place if file is lacking or has a size of 0 bytes.
check_exist_and_move_file(
{
filehandle => $filehandle,
intended_file_path => $intended_file_path,
temporary_file_path => $temporary_file_path,
}
);
}
## Ensure that this subrutine is only executed once
$parameter_href->{bwa_build_reference}{build_file} = 0;
}
close $filehandle or $log->logcroak(q{Could not close filehandle});
if ( $recipe{mode} == 1 ) {
submit_recipe(
{
base_command => $profile_base_command,
dependency_method => q{island_to_samples},
case_id => $case_id,
job_id_href => $job_id_href,
log => $log,
job_id_chain => $recipe{job_id_chain},
recipe_file_path => $recipe_file_path,
sample_ids_ref => \@{ $active_parameter_href->{sample_ids} },
submission_profile => $active_parameter_href->{submission_profile},
}
);
}
return 1;
}
sub build_bwa_mem2_prerequisites {
## Function : Creates the Bwa mem 2 prerequisites
## Returns :
## Arguments: $active_parameter_href => Active parameters for this analysis hash {REF}
## : $case_id => Family id
## : $file_info_href => File info hash {REF}
## : $human_genome_reference => Human genome reference
## : $job_id_href => Job id hash {REF}
## : $log => Log object
## : $parameter_build_suffixes_ref => The bwa reference associated file endings {REF}
## : $parameter_href => Parameter hash {REF}
## : $profile_base_command => Submission profile base command
## : $recipe_name => Program name
## : $sample_info_href => Info on samples and case hash {REF}
## : $temp_directory => Temporary directory
my ($arg_href) = @_;
## Flatten argument(s)
my $active_parameter_href;
my $file_info_href;
my $job_id_href;
my $log;
my $parameter_build_suffixes_ref;
my $parameter_href;
my $recipe_name;
my $sample_info_href;
## Default(s)
my $case_id;
my $human_genome_reference;
my $profile_base_command;
my $temp_directory;
my $tmpl = {
active_parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$active_parameter_href,
strict_type => 1,
},
case_id => {
default => $arg_href->{active_parameter_href}{case_id},
store => \$case_id,
strict_type => 1,
},
file_info_href => {
default => {},
defined => 1,
required => 1,
store => \$file_info_href,
strict_type => 1,
},
human_genome_reference => {
default => $arg_href->{active_parameter_href}{human_genome_reference},
store => \$human_genome_reference,
strict_type => 1,
},
job_id_href => {
default => {},
defined => 1,
required => 1,
store => \$job_id_href,
strict_type => 1,
},
log => {
defined => 1,
required => 1,
store => \$log,
},
parameter_build_suffixes_ref => {
default => [],
defined => 1,
required => 1,
store => \$parameter_build_suffixes_ref,
strict_type => 1,
},
parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$parameter_href,
strict_type => 1,
},
profile_base_command => {
default => q{sbatch},
store => \$profile_base_command,
strict_type => 1,
},
recipe_name => {
defined => 1,
required => 1,
store => \$recipe_name,
strict_type => 1,
},
sample_info_href => {
default => {},
defined => 1,
required => 1,
store => \$sample_info_href,
strict_type => 1,
},
temp_directory => {
default => $arg_href->{active_parameter_href}{temp_directory},
store => \$temp_directory,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Language::Shell qw{ check_exist_and_move_file };
use MIP::Processmanagement::Processes qw{ submit_recipe };
use MIP::Program::Bwa qw{ bwa_mem2_index };
use MIP::Recipe qw{ parse_recipe_prerequisites };
use MIP::Recipes::Build::Human_genome_prerequisites qw{ build_human_genome_prerequisites };
use MIP::Script::Setup_script qw{ setup_script };
## Constants
Readonly my $MAX_RANDOM_NUMBER => 100_00;
Readonly my $MEMORY_ALLOCATION => 100;
Readonly my $PROCESSING_TIME => 3;
## Unpack parameters
my %recipe = parse_recipe_prerequisites(
{
active_parameter_href => $active_parameter_href,
parameter_href => $parameter_href,
recipe_name => q{bwa_mem},
}
);
## filehandleS
# Create anonymous filehandle
my $filehandle = IO::Handle->new();
## Generate a random integer between 0-10,000.
my $random_integer = int rand $MAX_RANDOM_NUMBER;
## Creates recipe directories (info & data & script), recipe script filenames and writes sbatch header
my ($recipe_file_path) = setup_script(
{
active_parameter_href => $active_parameter_href,
directory_id => $case_id,
filehandle => $filehandle,
job_id_href => $job_id_href,
memory_allocation => $MEMORY_ALLOCATION,
process_time => $PROCESSING_TIME,
recipe_directory => $recipe_name,
recipe_name => $recipe_name,
source_environment_commands_ref => $recipe{load_env_ref},
}
);
build_human_genome_prerequisites(
{
active_parameter_href => $active_parameter_href,
filehandle => $filehandle,
file_info_href => $file_info_href,
job_id_href => $job_id_href,
log => $log,
parameter_build_suffixes_ref =>
\@{ $file_info_href->{human_genome_reference_file_endings} },
parameter_href => $parameter_href,
recipe_name => $recipe_name,
random_integer => $random_integer,
sample_info_href => $sample_info_href,
}
);
if ( $parameter_href->{bwa_mem2_build_reference}{build_file} == 1 ) {
$log->warn( q{Will try to create required }
. $human_genome_reference
. q{ index files before executing }
. $recipe_name );
say {$filehandle} q{## Building BWA index};
## Get parameters
my $prefix = $human_genome_reference . $UNDERSCORE . $random_integer;
bwa_mem2_index(
{
filehandle => $filehandle,
prefix => $prefix,
reference_genome => $human_genome_reference,
}
);
say {$filehandle} $NEWLINE;
PREREQ_FILE:
foreach my $file ( @{$parameter_build_suffixes_ref} ) {
my $intended_file_path = $human_genome_reference . $file;
my $temporary_file_path =
$human_genome_reference . $UNDERSCORE . $random_integer . $file;
## Checks if a file exists and moves the file in place if file is lacking or has a size of 0 bytes.
check_exist_and_move_file(
{
filehandle => $filehandle,
intended_file_path => $intended_file_path,
temporary_file_path => $temporary_file_path,
}
);
}
## Ensure that this subrutine is only executed once
$parameter_href->{bwa_mem2_build_reference}{build_file} = 0;
}
close $filehandle or $log->logcroak(q{Could not close filehandle});
if ( $recipe{mode} == 1 ) {
submit_recipe(
{
base_command => $profile_base_command,
dependency_method => q{island_to_samples},
case_id => $case_id,
job_id_href => $job_id_href,
log => $log,
job_id_chain => $recipe{job_id_chain},
recipe_file_path => $recipe_file_path,
sample_ids_ref => \@{ $active_parameter_href->{sample_ids} },
submission_profile => $active_parameter_href->{submission_profile},
}
);
}
return 1;
}
1;
| henrikstranneheim/MIP | lib/MIP/Recipes/Build/Bwa_prerequisites.pm | Perl | mit | 17,297 |
/***********************************************************************
name: calendar.pl
description: Knowledgebase about dates and times
Dates are represented as date(Y,M,D)
Times are represented as time(H,M,S)
author: Rebecca Jonson
***************************************************************************/
:-module( calendar, [consistent/2,inconsistent/2,ampm_disamb/3, day2date/2, dayhalf/2, hour/3, weekday/2, next_weekday/2, previous_weekday/2, today/1, tomorrow/1, now/1, day2nr/2, month2nr/2]).
:- use_module(library(system),[datime/1]).
:-ensure_loaded(semsort_agendatalk).
%returns todays date
day2date(today, Date):-
today(Date).
day2date(tomorrow, Date):-
tomorrow(Date).
day2date(aftertomorrow, Date):-
datime(datime(Y,M,D,_,_,_)),
succ_date(date(Y,M,D),Date1),
succ_date(Date1,Date).
day2date(yesterday, Date):-
yesterday(Date).
day2date(WeekDay, Date):-
sem_sort(WeekDay, weekday),
next_weekday(Date, WeekDay).
%%%next friday
day2date([next,WeekDay], Date):-
sem_sort(WeekDay, weekday),
succ_weekday(Date, WeekDay).
day2date(Day, date(Y,M,DayNr)):-
sem_sort(Day,day),
day2nr([Day], DayNr),
datime(datime(Y,M,_,_,_,_)).
day2date([Day,Month],date(Y,M,D)):-
sem_sort(Day,day),
sem_sort(Month, month),
day2nr([Day], D),
month2nr([Month], M),
datime(datime(Y,_,_,_,_,_)).
day2date([WD,Day,Month],date(Y,M,D)):-
sem_sort(Day,day),
sem_sort(Month,month),
sem_sort(WD,weekday),
day2nr([Day],D),
month2nr([Month],M),
datime(datime(Y,_,_,_,_,_)).
consistent(DATE,ANSWER):-
(inconsistent(DATE),
!,fail
;
ANSWER=yes).
inconsistent(date(_,4,31)).
inconsistent(date(_,6,31)).
inconsistent(date(_,9,31)).
inconsistent(date(_,11,31)).
inconsistent(date(_,2,30)).
inconsistent(date(_,2,31)).
inconsistent(date(Y,2,29)):-
%datime(datime(Y,_,_,_,_,_)),
N is Y mod 4,
N \= 0.
today(date(Y,M,D)):-
datime(datime(Y,M,D,_,_,_)).
%returns tomorrows date
tomorrow(Date):-
datime(datime(Y,M,D,_,_,_)),
succ_date(date(Y,M,D),Date).
%returns yesterdays date
yesterday(Date):-
datime(datime(Y,M,D,_,_,_)),
succ_date(Date,date(Y,M,D)).
%weekday(+Date,?Weekday)
%returns day of week for a date
%algorithm due to http://www.terra.es/personal2/grimmer/
%works for 2000-2099
weekday(date(Y,M,D),Weekday):-
ground(date(Y,M,D)),
X1 is Y-2000,
X2 is X1 // 4,
X3 is X1 + X2,
monthcode(X4,M),
X5 is X3 + X4 + D,
X6 is X5 mod 7,
(
0 is Y mod 4,
M < 3,
X7 is X6 -1
;
X7 is X6
),
weekdaycode(X7,Weekday).
%next_weekday(?Date,+Weekday)
%returns Date for next Weekday (or one week after that, or two weeks etc)
next_weekday(Date,Weekday):-
ground(Weekday),
today(Today),
succ_date_iterate(forward,Today,Weekday,Date1),
%return Date1 or iterate another week)
(
Date = Date1
;
succ_date(Date1,Date2),
succ_date_iterate(forward,Date2,Weekday,Date)
).
succ_weekday(Date,Weekday):-
ground(Weekday),
today(Today),
succ_date_iterate(forward,Today,Weekday,Date1),
%return Date1 or iterate another week)
%(
% Date = Date1
%;
succ_date(Date1,Date2),
succ_date_iterate(forward,Date2,Weekday,Date).
previous_weekday(Date,Weekday):-
ground(Weekday),
today(Today),
succ_date_iterate(back,Today,Weekday,Date1),
%return Date1 or iterate another week)
(
Date = Date1
;
succ_date(Date2,Date1),
succ_date_iterate(back,Date2,Weekday,Date)
).
%succ_date_iterate(+Direction,+StartDate,+Weekday,?Date)
%Date is date of next (or previous) Weekday relative to StartDate
succ_date_iterate(_Direction,Date,Weekday,Date):-
weekday(Date,Weekday),!.
succ_date_iterate(forward,StartDate,Weekday,Date):-
succ_date(StartDate,NewStartDate),
succ_date_iterate(forward,NewStartDate,Weekday,Date).
succ_date_iterate(back,StartDate,Weekday,Date):-
succ_date(NewStartDate,StartDate),
succ_date_iterate(back,NewStartDate,Weekday,Date).
%returns the current time
now(time(H,M,S)):-
datime(datime(_,_,_,H,M,S)).
succ_date(X,Y):-
var(X),
var(Y),
!,fail.
%last day of month
%january
succ_date(date(Y,1,31),date(Y,2,1)):-!.
%february
succ_date(date(Y,2,28),date(Y,3,1)):-
N is Y mod 4,
N \= 0,!.
succ_date(date(Y,2,29),date(Y,3,1)):-
0 is Y mod 4,!.
%march
succ_date(date(Y,3,31),date(Y,4,1)):-!.
%april
succ_date(date(Y,4,30),date(Y,5,1)):-!.
%may
succ_date(date(Y,5,31),date(Y,6,1)):-!.
%june
succ_date(date(Y,6,30),date(Y,7,1)):-!.
%july
succ_date(date(Y,7,31),date(Y,8,1)):-!.
%august
succ_date(date(Y,8,31),date(Y,9,1)):-!.
%september
succ_date(date(Y,9,30),date(Y,10,1)):-!.
%october
succ_date(date(Y,10,31),date(Y,11,1)):-!.
%november
succ_date(date(Y,11,30),date(Y,12,1)):-!.
% december
succ_date(date(Y0,12,31),date(Y,1,1)):-!,
(
var(Y0),
Y0 is Y -1;
Y is Y0 + 1
).
%rest
succ_date(date(Y,M,D0),date(Y,M,D)):-
(
var(D0),
D0 is D-1;
D is D0 + 1
).
weekdaycode(1,monday).
weekdaycode(2,tuesday).
weekdaycode(3,wednesday).
weekdaycode(4,thursday).
weekdaycode(5,friday).
weekdaycode(6,saturday).
weekdaycode(7,sunday).
weekdaycode(N,Weekday):-
N<1,
M is N+7,
weekdaycode(M,Weekday).
%monthcode(Code,Month)
monthcode(6,1).
monthcode(2,2).
monthcode(2,3).
monthcode(5,4).
monthcode(0,5).
monthcode(3,6).
monthcode(5,7).
monthcode(1,8).
monthcode(4,9).
monthcode(6,10).
monthcode(2,11).
monthcode(4,12).
%hours
hour(one, 1, 13).
hour(two, 2, 14).
hour(three, 3, 15).
hour(four, 4, 16).
hour(five, 5, 17).
hour(six, 6, 18).
hour(seven, 7, 19).
hour(eight, 8, 20).
hour(nine, 9, 21).
hour(ten, 10, 22).
hour(eleven, 11, 23).
hour(twelve, 12, 24).
day2nr([first],01).
day2nr([second],02).
day2nr([third],03).
day2nr([fourth],04).
day2nr([fifth],05).
day2nr([sixth],06).
day2nr([seventh],07).
day2nr([eight],08).
day2nr([ninth], 09).
day2nr([tenth],10).
day2nr([eleventh],11).
day2nr([twelfth],12).
day2nr([thirteenth], 13).
day2nr([fourteenth],14).
day2nr([fifteenth], 15).
day2nr([sixteenth],16).
day2nr([seventeenth], 17).
day2nr([eighteenth],18).
day2nr([nineteenth], 19).
day2nr([twentieth], 20).
day2nr([twentyfirst], 21).
day2nr([twentysecond],22).
day2nr([twentythird], 23).
day2nr([twentyfourth],24).
day2nr([twentyfifth], 25).
day2nr([twentysixth],26).
day2nr([twentyseventh], 27).
day2nr([twentyeigth],28).
day2nr([twentyninth], 29).
day2nr([thirtieth],30).
day2nr([thirtyfirst], 31).
month2nr( [january],1).
month2nr( [february], 2 ).
month2nr( [march] ,3).
month2nr( [april] , 4).
month2nr( [may] ,5).
month2nr( [june], 6).
month2nr( [july],7).
month2nr( [august] , 8).
month2nr( [september] ,9).
month2nr( [october] , 10).
month2nr( [november] ,11).
month2nr( [december] , 12).
%ampm_disamb(OrigTime, AMPM, CorrectTime)
ampm_disamb(StartTime, am, StartTime):-
dayhalf(StartTime, am).
ampm_disamb(StartTime,pm, StartTime):-
dayhalf(StartTime, pm).
ampm_disamb(StartTime, pm, SolvedTime):-
name(StartTime, [H1, H2, M1, M2]),
name(StartHour, [H1,H2]),
hour(_, StartHour, PMHour),
name(PMHour, [PMH1, PMH2]),
name(SolvedTime, [PMH1, PMH2, M1, M2]).
ampm_disamb(StartTime, pm, SolvedTime):-
name(StartTime, [H1, M1, M2]),
name(StartHour, [H1]),
hour(_, StartHour, PMHour),
name(PMHour, [PMH1, PMH2]),
name(SolvedTime, [PMH1, PMH2, M1, M2]).
%dayhalf(Time, AMPM) checks what dayhalf a time belongs to
dayhalf(T, pm):-
sem_sort(T, time),
name(T, [T1, T2,T3,T4]),
name( Hour, [T1,T2]),
Hour > 12,
Hour =< 23.
dayhalf(T, am):-
sem_sort(T, time),
name(T, [T1, T2, T3, T4]),
name(Hour, [T1,T2]),
Hour >= 0,
Hour =< 12.
dayhalf(T,am):-
sem_sort(T,time),
name(T, [T1,T2,T3]),
name(Hour, [T1]),
Hour >=0,
Hour =<12.
%%% 1,2,3,4,5 o'oclock is always considered afternoon
timeknowledge(T,pm):-
sem_sort(T,time),
name(T, [T1,T2,T3]),
name(Hour, [T1]),
Hour >=1,
Hour =<5.
timeknowledge(T,am):-
sem_sort(T,time),
name(T, [T1,T2,T3,T4]),
name(Hour, [T1,T2]),
Hour >=10,
Hour =<12.
%split(Atom, CharList)
split('', []).
split(Atom,[C|Cs]):-
atom_concat(C,Rest,Atom),
atom_length(C,1),
split(Rest,Cs).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/SIRIDUS/UGOT-D31/godis-apps/domain-agendatalk/Resources/calendar.pl | Perl | mit | 8,119 |
package T2t::EmptyRow;
use strict;
use warnings;
use T2t::Cell;
use T2t::UserPreferencesCache;
use T2t::Utilities;
use base qw( T2t::Row );
sub new
{
my $package = shift;
my $self = {};
$self->{header} = shift || 0;
$self->{attrs} = T2t::UserPreferencesCache::getInstance()->getRowAttributes();
bless( $self, $package );
$self->{_data} = [];
$self;
}
sub setNumberOfColumns
{
my $self = shift;
my $columnCount = shift;
my @cellData;
push(@cellData,new T2t::Cell('',1)) foreach( 1 .. $columnCount );
$self->{_data} = \@cellData;
}
sub isEmpty { return 1; }
1;
__END__
=head1 AUTHOR INFORMATION
Copyright 2000-, Steven Scholnick <scholnicks@gmail.com>
t2t is published under MIT. See license.txt for details
| scholnicks/t2t | T2t/EmptyRow.pm | Perl | mit | 774 |
#!/usr/bin/perl
#/usr/pubsw/bin/perl -w
# Author: Brandon Heller
# Test to see if rawIP.pm can handle multiple sequential packet sends.
# On nf-test13, it crashes around 97-98 sequential sends.
# params:
# --len [bytes]
# --pkts [num]
use strict;
use NF2::TestLib;
use NF2::PacketLib;
use Getopt::Long;
use File::Copy;
use NF2::Base;
use Time::HiRes qw (sleep gettimeofday tv_interval usleep);
my $len = 1496;
my $pkts_to_send = 10;
unless ( GetOptions (
"len=i" => \$len,
"pkts=i" => \$pkts_to_send,
)
)
{
print "invalid options...exiting\n";
exit 1;
}
my $routerMAC0 = "00:ca:fe:00:00:01";
my @interfaces = ("nf2c0", "nf2c1", "nf2c2", "nf2c3", "eth1", "eth2");
nftest_init(\@ARGV,\@interfaces,);
#nftest_start(\@interfaces);
nftest_set_router_MAC ("nf2c0", $routerMAC0);
nftest_phy_loopback("nf2c0");
my $sent_pkt;
# set parameters
my $DA = $routerMAC0;
my $SA = "aa:bb:cc:dd:ee:ff";
my $TTL = 1;
my $DST_IP = "192.168.1.1";
my $SRC_IP = "192.168.0.1";
my $nextHopMAC = $routerMAC0;
# create MAC header
my $MAC_hdr = NF2::Ethernet_hdr->new(DA => $DA,
SA => $SA,
Ethertype => 0x800
);
# create IP header
my $IP_hdr = NF2::IP_hdr->new(ttl => $TTL,
src_ip => $SRC_IP,
dst_ip => $DST_IP,
dgram_len => $len - $MAC_hdr->length_in_bytes()
);
$IP_hdr->checksum(0); # make sure its zero before we calculate it
$IP_hdr->checksum($IP_hdr->calc_checksum);
# create packet filling.... (IP PDU)
my $PDU = NF2::PDU->new($len - $MAC_hdr->length_in_bytes() - $IP_hdr->length_in_bytes() );
# get packed packet string
my $sent_pkt = $MAC_hdr->packed . $IP_hdr->packed . $PDU->packed;
print "start time: ", scalar localtime, "\n";
my @start_time = gettimeofday();
for (my $j = 0; $j < $pkts_to_send; $j++) {
nftest_send("nf2c0", $sent_pkt, 0);
}
my $sending_time = tv_interval(\@start_time);
print "completed in $sending_time seconds\n";
exit 1;
| shkkgs/DE4-multicore-network-processor-with-multiple-hardware-monitors- | DE4_network_processor_4cores_6monitors_release/projects/DE4_Reference_Router_with_DMA/src/sources_ngnp_multicore/to_send/ngnp_added_monitor/ngnp/regress/test_crash/crash.pl | Perl | mit | 1,967 |
package MIP::Recipes::Analysis::Gzip_fastq;
use 5.026;
use Carp;
use charnames qw{ :full :short };
use English qw{ -no_match_vars };
use File::Spec::Functions qw{ catdir catfile };
use open qw{ :encoding(UTF-8) :std };
use Params::Check qw{ allow check last_error };
use utf8;
use warnings;
use warnings qw{ FATAL utf8 };
## CPANM
use autodie qw{ :all };
use Readonly;
## MIPs lib/
use MIP::Constants qw{ $DOT $LOG_NAME $NEWLINE };
BEGIN {
require Exporter;
use base qw{ Exporter };
# Functions and variables which can be optionally exported
our @EXPORT_OK = qw{ analysis_gzip_fastq };
}
sub analysis_gzip_fastq {
## Function : Gzips fastq files
## Returns :
## Arguments: $active_parameter_href => Active parameters for this analysis hash {REF}
## : $case_id => Family id
## : $file_info_href => File info hash {REF}
## : $job_id_href => Job id hash {REF}
## : $parameter_href => Parameter hash {REF}
## : $profile_base_command => Submission profile base command
## : $recipe_name => Program name
## : $sample_id => Sample id
## : $sample_info_href => Info on samples and case hash {REF}
my ($arg_href) = @_;
## Flatten argument(s)
my $active_parameter_href;
my $file_info_href;
my $job_id_href;
my $parameter_href;
my $recipe_name;
my $sample_id;
my $sample_info_href;
## Default(s)
my $case_id;
my $profile_base_command;
my $tmpl = {
active_parameter_href => {
defined => 1,
default => {},
required => 1,
store => \$active_parameter_href,
strict_type => 1,
},
case_id => {
default => $arg_href->{active_parameter_href}{case_id},
store => \$case_id,
strict_type => 1,
},
file_info_href => {
default => {},
defined => 1,
required => 1,
store => \$file_info_href,
strict_type => 1,
},
job_id_href => {
default => {},
defined => 1,
required => 1,
store => \$job_id_href,
strict_type => 1,
},
parameter_href => {
default => {},
defined => 1,
required => 1,
store => \$parameter_href,
strict_type => 1,
},
profile_base_command => {
default => q{sbatch},
store => \$profile_base_command,
strict_type => 1,
},
recipe_name => {
defined => 1,
required => 1,
store => \$recipe_name,
strict_type => 1,
},
sample_id => {
defined => 1,
required => 1,
store => \$sample_id,
strict_type => 1,
},
sample_info_href => {
default => {},
defined => 1,
required => 1,
store => \$sample_info_href,
strict_type => 1,
},
};
check( $tmpl, $arg_href, 1 ) or croak q{Could not parse arguments!};
use MIP::Cluster qw{ update_core_number_to_seq_mode };
use MIP::Environment::Cluster qw{ check_max_core_number };
use MIP::File_info
qw{ get_io_files get_is_sample_files_compressed get_sample_file_attribute parse_io_outfiles };
use MIP::Processmanagement::Processes qw{ submit_recipe };
use MIP::Program::Gzip qw{ gzip };
use MIP::Recipe qw{ parse_recipe_prerequisites };
use MIP::Script::Setup_script qw{ setup_script };
my $is_files_compressed = get_is_sample_files_compressed(
{
file_info_href => $file_info_href,
sample_id => $sample_id,
}
);
## No uncompressed fastq infiles for this sample_id
return if ($is_files_compressed);
## Retrieve logger object
my $log = Log::Log4perl->get_logger($LOG_NAME);
## Unpack parameters
## Get the io infiles per chain and id
my %io = get_io_files(
{
id => $sample_id,
file_info_href => $file_info_href,
parameter_href => $parameter_href,
recipe_name => $recipe_name,
stream => q{in},
}
);
my $indir_path_prefix = $io{in}{dir_path_prefix};
my @infile_names = @{ $io{in}{file_names} };
my @infile_name_prefixes = @{ $io{in}{file_name_prefixes} };
my @infile_paths = @{ $io{in}{file_paths} };
my %recipe = parse_recipe_prerequisites(
{
active_parameter_href => $active_parameter_href,
parameter_href => $parameter_href,
recipe_name => $recipe_name,
}
);
my $core_number = $recipe{core_number};
## Outpaths
my @outfile_paths =
map { catdir( $indir_path_prefix, $_ . $DOT . q{fastq.gz} ) } @infile_name_prefixes;
## Set and get the io files per chain, id and stream
%io = (
%io,
parse_io_outfiles(
{
chain_id => $recipe{job_id_chain},
id => $sample_id,
file_info_href => $file_info_href,
file_paths_ref => \@outfile_paths,
parameter_href => $parameter_href,
recipe_name => $recipe_name,
}
)
);
## Adjust according to number of infiles to process
# One full lane on Hiseq takes approx. 2 h for gzip to process
my $time = $recipe{time} * scalar @infile_names;
## Filehandles
# Create anonymous filehandle
my $filehandle = IO::Handle->new();
my %file_info_sample = get_sample_file_attribute(
{
file_info_href => $file_info_href,
sample_id => $sample_id,
}
);
INFILE_LANE:
foreach my $infile_prefix ( @{ $file_info_sample{no_direction_infile_prefixes} } ) {
my $sequence_run_type = get_sample_file_attribute(
{
attribute => q{sequence_run_type},
file_info_href => $file_info_href,
file_name => $infile_prefix,
sample_id => $sample_id,
}
);
## Update the number of cores to be used in the analysis according to sequencing mode requirements
$core_number = update_core_number_to_seq_mode(
{
core_number => $core_number,
sequence_run_type => $sequence_run_type,
}
);
}
## Limit number of cores requested to the maximum number of cores available per node
$core_number = check_max_core_number(
{
core_number_requested => $core_number,
max_cores_per_node => $active_parameter_href->{max_cores_per_node},
}
);
## Creates recipe directories (info & data & script), recipe script filenames and writes sbatch header
my ($recipe_file_path) = setup_script(
{
active_parameter_href => $active_parameter_href,
core_number => $core_number,
directory_id => $sample_id,
filehandle => $filehandle,
job_id_href => $job_id_href,
memory_allocation => $recipe{memory},
process_time => $time,
recipe_directory => $recipe_name,
recipe_name => $recipe_name,
}
);
my $process_batches_count = 1;
# Used to print wait at the right times since infiles cannot be used (can be a mixture of .gz and .fast files)
my $uncompressed_file_counter = 0;
## Gzip
say {$filehandle} q{## } . $recipe_name;
INFILE:
while ( my ( $infile_index, $infile ) = each @infile_names ) {
my $is_file_compressed = get_sample_file_attribute(
{
attribute => q{is_file_compressed},
file_info_href => $file_info_href,
file_name => $infile,
sample_id => $sample_id,
}
);
## For files ending with ".fastq" required since there can be a mixture (also .fastq.gz) within the sample dir
if ( not $is_file_compressed ) {
## Using only $active_parameter{max_cores_per_node} cores
if ( $uncompressed_file_counter ==
$process_batches_count * $active_parameter_href->{max_cores_per_node} )
{
say {$filehandle} q{wait}, $NEWLINE;
$process_batches_count = $process_batches_count + 1;
}
## Perl wrapper for writing gzip recipe to $filehandle
gzip(
{
filehandle => $filehandle,
infile_paths_ref => [ $infile_paths[$infile_index] ],
}
);
say {$filehandle} q{&};
$uncompressed_file_counter++;
}
}
print {$filehandle} $NEWLINE;
say {$filehandle} q{wait}, $NEWLINE;
if ( $recipe{mode} == 1 ) {
submit_recipe(
{
base_command => $profile_base_command,
case_id => $case_id,
dependency_method => q{island_to_sample},
job_id_chain => $recipe{job_id_chain},
job_id_href => $job_id_href,
job_reservation_name => $active_parameter_href->{job_reservation_name},
log => $log,
recipe_file_path => $recipe_file_path,
sample_id => $sample_id,
submission_profile => $active_parameter_href->{submission_profile},
}
);
}
return 1;
}
1;
| henrikstranneheim/MIP | lib/MIP/Recipes/Analysis/Gzip_fastq.pm | Perl | mit | 10,035 |
use utf8;
package Chinook::Schema::Result::Employee;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
Chinook::Schema::Result::Employee
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 TABLE: C<Employee>
=cut
__PACKAGE__->table("Employee");
=head1 ACCESSORS
=head2 employeeid
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
=head2 lastname
data_type: 'nvarchar'
is_nullable: 0
size: 20
=head2 firstname
data_type: 'nvarchar'
is_nullable: 0
size: 20
=head2 title
data_type: 'nvarchar'
is_nullable: 1
size: 30
=head2 reportsto
data_type: 'integer'
is_foreign_key: 1
is_nullable: 1
=head2 birthdate
data_type: 'datetime'
is_nullable: 1
=head2 hiredate
data_type: 'datetime'
is_nullable: 1
=head2 address
data_type: 'nvarchar'
is_nullable: 1
size: 70
=head2 city
data_type: 'nvarchar'
is_nullable: 1
size: 40
=head2 state
data_type: 'nvarchar'
is_nullable: 1
size: 40
=head2 country
data_type: 'nvarchar'
is_nullable: 1
size: 40
=head2 postalcode
data_type: 'nvarchar'
is_nullable: 1
size: 10
=head2 phone
data_type: 'nvarchar'
is_nullable: 1
size: 24
=head2 fax
data_type: 'nvarchar'
is_nullable: 1
size: 24
=head2 email
data_type: 'nvarchar'
is_nullable: 1
size: 60
=cut
__PACKAGE__->add_columns(
"employeeid",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"lastname",
{ data_type => "nvarchar", is_nullable => 0, size => 20 },
"firstname",
{ data_type => "nvarchar", is_nullable => 0, size => 20 },
"title",
{ data_type => "nvarchar", is_nullable => 1, size => 30 },
"reportsto",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 1 },
"birthdate",
{ data_type => "datetime", is_nullable => 1 },
"hiredate",
{ data_type => "datetime", is_nullable => 1 },
"address",
{ data_type => "nvarchar", is_nullable => 1, size => 70 },
"city",
{ data_type => "nvarchar", is_nullable => 1, size => 40 },
"state",
{ data_type => "nvarchar", is_nullable => 1, size => 40 },
"country",
{ data_type => "nvarchar", is_nullable => 1, size => 40 },
"postalcode",
{ data_type => "nvarchar", is_nullable => 1, size => 10 },
"phone",
{ data_type => "nvarchar", is_nullable => 1, size => 24 },
"fax",
{ data_type => "nvarchar", is_nullable => 1, size => 24 },
"email",
{ data_type => "nvarchar", is_nullable => 1, size => 60 },
);
=head1 PRIMARY KEY
=over 4
=item * L</employeeid>
=back
=cut
__PACKAGE__->set_primary_key("employeeid");
=head1 RELATIONS
=head2 customers
Type: has_many
Related object: L<Chinook::Schema::Result::Customer>
=cut
__PACKAGE__->has_many(
"customers",
"Chinook::Schema::Result::Customer",
{ "foreign.supportrepid" => "self.employeeid" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 employees
Type: has_many
Related object: L<Chinook::Schema::Result::Employee>
=cut
__PACKAGE__->has_many(
"employees",
"Chinook::Schema::Result::Employee",
{ "foreign.reportsto" => "self.employeeid" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 reportsto
Type: belongs_to
Related object: L<Chinook::Schema::Result::Employee>
=cut
__PACKAGE__->belongs_to(
"reportsto",
"Chinook::Schema::Result::Employee",
{ employeeid => "reportsto" },
{
is_deferrable => 0,
join_type => "LEFT",
on_delete => "NO ACTION",
on_update => "NO ACTION",
},
);
# Created by DBIx::Class::Schema::Loader v0.07035 @ 2013-04-19 13:38:48
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:0yI+EFM/4/YrBCXV1lHTiw
# You can replace this text with custom code or comments, and it will be preserved on regeneration
1;
| rpillar/Chinook | lib/Chinook/Schema/Result/Employee.pm | Perl | mit | 3,766 |
/* <module>
%
% PFC is codeA language extension for prolog.
%
% It adds codeA new type of module inheritance
%
% Dec 13, 2035
% Douglas Miles
*/
% was_module(header_sane,[]).
:- include(test_header).
%:- add_import_module(header_sane,baseKB,end).
:- set_defaultAssertMt(modA).
:- begin_pfc.
:- mpred_trace_exec.
:- set_prolog_flag(retry_undefined, kb_shared).
mtHybrid(modA).
mtHybrid(modB).
modA: (codeA:-
(notrace(printAll((
'$current_source_module'(_),
'$current_typein_module'(_),
context_module(_),
ignore(prolog_load_context(reloading,_)),
current_prolog_flag(retry_undefined,_),
current_prolog_flag(debug,_),
current_prolog_flag(unknown,_)))),
%set_prolog_flag(unknown,error),
trace,
%set_prolog_flag(retry_undefined,kb_shared),
notrace(call(call,codeB)),
format('~n~n~n~n~nSuccess~n~n~n',[]))).
modB: (codeB).
genlMt(modA,modB).
:- set_prolog_flag(retry_undefined,kb_shared).
% run the test
%modA: (:- codeA).
:- set_prolog_flag(unknown,error).
:- catch(modA:codeA,E,wdmsg(E)).
:- break.
% before test, to make sure codeA was not accdently defined in modB
:- sanity(\+ module_clause(modB:codeA,_)).
:- sanity(\+ module_clause(modA:codeB,_)).
:- sanity( module_clause(modA:codeA,_)).
:- sanity( module_clause(modB:codeB,_)).
% before test, genlMt makes the rule available and should not corrupt the modA module
% make sure genlMt didnt unassert
:- sanity(clause_u(modB:codeB,_)).
:- warn_fail_TODO(clause_u(modA:codeB,_)).
% to make codeB sure is available in modA
:- mpred_must( clause_u(modA:codeB,_)).
% to make sure codeA does not get accdently defined in modB
:- mpred_must(\+ ((clause_u(modB:codeA,B,Ref),B\=inherit_above(modB, codeA), clause_property(Ref,module(modB))))).
% genlMt makes the rule available and should not corrupt the modA module
:- warn_fail_TODO(clause(modA:codeB,_)).
% genlMt
:- warn_fail_TODO( clause_u(modA:codeB,_)).
| TeamSPoon/logicmoo_workspace | packs_sys/pfc/t/sanity_base/mt_03b.pl | Perl | mit | 1,926 |
# Acme::NoSql
#
# Example subclass for Elevator::Drivers::Mongo
# this represents settings Acme organization uses for MongoDB
use MooseX::Declare;
class Acme::NoSql extends Elevator::Drivers::Mongo {
use Method::Signatures::Simple name => 'action';
use Elevator::Model::BaseObject;
# if no don't use nosql, just return undef
action server() {
return '127.0.0.1';
}
}
| mpdehaan/Elevator | examples/Acme/NoSql.pm | Perl | mit | 403 |
#
# Routino CGI paths Perl script
#
# Part of the Routino routing software.
#
# This file Copyright 2008-2010 Andrew M. Bishop
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Directory path parameters
# EDIT THIS to set the root directory for the non-web data files.
$root_dir="../..";
# EDIT THIS to change the location of the individual directories.
$bin_dir="$root_dir/bin";
$data_dir="$root_dir/data";
$results_dir="$root_dir/results";
# EDIT THIS to change the names of the executables (enables easy selection of slim mode).
$router_exe="router";
$filedumper_exe="filedumper";
1;
| MinnPost/minnpost-nice-ride | data-processing/routino-2.2/web/www/routino/paths.pl | Perl | mit | 1,203 |
#!env perl
use strict;
use warnings;
use Getopt::Long;
my $debug = 0;
GetOptions('v|debug!' => \$debug);
my $fpkm = shift || "cuffnorm/genes.fpkm_table";
my $functional= shift || '../../../annotation/Hw2.maker.MTedit_longID.v20160306.desc';
open(my $fh => $functional) || die "Cannot open $functional$!";
my %gene;
while(<$fh>) {
chomp;
my ($id,$function) = split(/\t/,$_);
$gene{$id} = $function;
}
open($fh => $fpkm) || die $!;
my $i =0;
while(<$fh>) {
my ($id,@row) = split;
if( $i++ == 0 ) {
print join("\t",$id,qw(FUNCTION),@row),"\n";
} else {
print join("\t",$id,$gene{$id} || '',@row),"\n";
}
}
| stajichlab/Hortaea_werneckii | RNAseq/Expression/v20160306/scripts/add_function.pl | Perl | cc0-1.0 | 629 |
package Paws::Greengrass::CreateGroupVersion;
use Moose;
has AmznClientToken => (is => 'ro', isa => 'Str', traits => ['ParamInHeader'], header_name => 'X-Amzn-Client-Token');
has CoreDefinitionVersionArn => (is => 'ro', isa => 'Str');
has DeviceDefinitionVersionArn => (is => 'ro', isa => 'Str');
has FunctionDefinitionVersionArn => (is => 'ro', isa => 'Str');
has GroupId => (is => 'ro', isa => 'Str', traits => ['ParamInURI'], uri_name => 'GroupId', required => 1);
has LoggerDefinitionVersionArn => (is => 'ro', isa => 'Str');
has SubscriptionDefinitionVersionArn => (is => 'ro', isa => 'Str');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'CreateGroupVersion');
class_has _api_uri => (isa => 'Str', is => 'ro', default => '/greengrass/groups/{GroupId}/versions');
class_has _api_method => (isa => 'Str', is => 'ro', default => 'POST');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Greengrass::CreateGroupVersionResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Greengrass::CreateGroupVersion - Arguments for method CreateGroupVersion on Paws::Greengrass
=head1 DESCRIPTION
This class represents the parameters used for calling the method CreateGroupVersion on the
AWS Greengrass service. Use the attributes of this class
as arguments to method CreateGroupVersion.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to CreateGroupVersion.
As an example:
$service_obj->CreateGroupVersion(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 AmznClientToken => Str
The client token used to request idempotent operations.
=head2 CoreDefinitionVersionArn => Str
Core definition version arn for this group.
=head2 DeviceDefinitionVersionArn => Str
Device definition version arn for this group.
=head2 FunctionDefinitionVersionArn => Str
Function definition version arn for this group.
=head2 B<REQUIRED> GroupId => Str
The unique Id of the AWS Greengrass Group
=head2 LoggerDefinitionVersionArn => Str
Logger definitionv ersion arn for this group.
=head2 SubscriptionDefinitionVersionArn => Str
Subscription definition version arn for this group.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method CreateGroupVersion in L<Paws::Greengrass>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Greengrass/CreateGroupVersion.pm | Perl | apache-2.0 | 2,881 |
=for advent_year 2011
=for advent_day 22
=for advent_title DBI Profile
=for advent_author Fayland Lam
=encoding utf8
DBI 优化的工具很多。最正统的便是 M<DBI::Profile>,通过设置 DBI_PROFILE 环境变量,或者直接设置 $dbh->{Profile}。环境变量无需更改脚本本身,而通过设 ->{Profile} 可以控制 profile 的范围。如果你选择了输出 M<DBI::ProfileDumper> 到文件,则回头可以用 dbiprof 来查看更加清晰的报告。如果需要直接输出漂亮格式的话,也可以求助 M<DBIx::ProfileManager>。另外 Plack 下也有专门的 M<Plack::Middleware::Debug::DBIProfile> 可以在 web 页面查看。
如果更进一步,可以选择试试 M<DBIx::QueryLog>,该模块可以设置最小记录时间 threshold,可以设置 explain 来直接输出 explain 语句等等。
使用 M<DBIx::Class> 的人可以用 M<DBIx::Class::QueryLog>, 该模块在 Catalyst 或者 Plack 下都有自己的插件。如 M<Plack::Middleware::Debug::DBIC::QueryLog>
如果是需要查询 DBI 的 connect 情况的话,可以试试 DBI_TRACE 或者 Plack 的 M<Plack::Middleware::Debug::DBITrace>。注意别设太大的值,否则你会看不过来的。
另外可以看看日本今年的 dbix advent A<http://perl-users.jp/articles/advent-calendar/2011/dbix/> 不需要懂日语,copy 到 Google Translate 就可以。比如今日介绍的 M<DBIx::Connector> 便是一个很赞的模块。
谢谢。
| PerlChina/mojo-advent | articles/2011/22/DBIProfile.pod | Perl | apache-2.0 | 1,460 |
package Paws::RDS::RecurringCharge;
use Moose;
has RecurringChargeAmount => (is => 'ro', isa => 'Num');
has RecurringChargeFrequency => (is => 'ro', isa => 'Str');
1;
### main pod documentation begin ###
=head1 NAME
Paws::RDS::RecurringCharge
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::RDS::RecurringCharge object:
$service_obj->Method(Att1 => { RecurringChargeAmount => $value, ..., RecurringChargeFrequency => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::RDS::RecurringCharge object:
$result = $service_obj->Method(...);
$result->Att1->RecurringChargeAmount
=head1 DESCRIPTION
This data type is used as a response element in the
DescribeReservedDBInstances and DescribeReservedDBInstancesOfferings
actions.
=head1 ATTRIBUTES
=head2 RecurringChargeAmount => Num
The amount of the recurring charge.
=head2 RecurringChargeFrequency => Str
The frequency of the recurring charge.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::RDS>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/RDS/RecurringCharge.pm | Perl | apache-2.0 | 1,563 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::juniper::common::screenos::snmp::mode::cpu;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', default => '' },
"critical:s" => { name => 'critical', default => '' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
($self->{warn1m}, $self->{warn5m}, $self->{warn15m}) = split /,/, $self->{option_results}->{warning};
($self->{crit1m}, $self->{crit5m}, $self->{crit15m}) = split /,/, $self->{option_results}->{critical};
if (($self->{perfdata}->threshold_validate(label => 'warn1min', value => $self->{warn1m})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning (1min) threshold '" . $self->{warn1m} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'warn5min', value => $self->{warn5m})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning (5min) threshold '" . $self->{warn5m} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'warn15min', value => $self->{warn15m})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning (15min) threshold '" . $self->{warn15m} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'crit1min', value => $self->{crit1m})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical (1min) threshold '" . $self->{crit1m} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'crit5min', value => $self->{crit5m})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical (5min) threshold '" . $self->{crit5m} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'crit15min', value => $self->{crit15m})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical (15min) threshold '" . $self->{crit15m} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
my $oid_nsResCpuLast1Min = '.1.3.6.1.4.1.3224.16.1.2.0';
my $oid_nsResCpuLast5Min = '.1.3.6.1.4.1.3224.16.1.3.0';
my $oid_nsResCpuLast15Min = '.1.3.6.1.4.1.3224.16.1.4.0';
my $result = $self->{snmp}->get_leef(oids => [$oid_nsResCpuLast1Min, $oid_nsResCpuLast5Min,
$oid_nsResCpuLast15Min], nothing_quit => 1);
my $cpu1min = $result->{$oid_nsResCpuLast1Min};
my $cpu5min = $result->{$oid_nsResCpuLast5Min};
my $cpu15min = $result->{$oid_nsResCpuLast15Min};
my $exit1 = $self->{perfdata}->threshold_check(value => $cpu1min,
threshold => [ { label => 'crit1min', exit_litteral => 'critical' }, { label => 'warn1min', exit_litteral => 'warning' } ]);
my $exit2 = $self->{perfdata}->threshold_check(value => $cpu5min,
threshold => [ { label => 'crit5min', exit_litteral => 'critical' }, { label => 'warn5min', exit_litteral => 'warning' } ]);
my $exit3 = $self->{perfdata}->threshold_check(value => $cpu15min,
threshold => [ { label => 'crit15min', exit_litteral => 'critical' }, { label => 'warn15min', exit_litteral => 'warning' } ]);
my $exit = $self->{output}->get_most_critical(status => [ $exit1, $exit2, $exit3 ]);
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("CPU Usage: %.2f%% (1min), %.2f%% (5min), %.2f%% (15min)",
$cpu1min, $cpu5min, $cpu15min));
$self->{output}->perfdata_add(label => "cpu_1min", unit => '%',
value => $cpu1min,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warn1min'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'crit1min'),
min => 0, max => 100);
$self->{output}->perfdata_add(label => "cpu_5min", unit => '%',
value => $cpu5min,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warn5min'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'crit5min'),
min => 0, max => 100);
$self->{output}->perfdata_add(label => "cpu_15min", unit => '%',
value => $cpu15min,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warn15min'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'crit15min'),
min => 0, max => 100);
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check Juniper cpu usage (NETSCREEN-RESOURCE-MIB).
=over 8
=item B<--warning>
Threshold warning in percent (1min,5min,15min).
=item B<--critical>
Threshold critical in percent (1min,5min,15min).
=back
=cut
| centreon/centreon-plugins | network/juniper/common/screenos/snmp/mode/cpu.pm | Perl | apache-2.0 | 6,405 |
#
# Bio::Das::ProServer::SourceAdaptor::conservation_score
#
# Copyright EnsEMBL Team
#
# You may distribute this module under the same terms as perl itself
#
# pod documentation - main docs before the code
=head1 NAME
Bio::Das::ProServer::SourceAdaptor::conservation_score - Extension of the ProServer for e! conservation scores
=head1 INHERITANCE
This module inherits attributes and methods from Bio::Das::ProServer::SourceAdaptor
=head1 DAS CONFIGURATION FILE
There are some specific parameters for this module you can use in the DAS server configuration file
=head2 registry
Your registry configuration file to connect to the compara database
=head2 database
The species name in your Registry configuration file.
=head2 this_species
The main species. Features will be shown for this species.
=head2 other_species
The other species. This DAS track will show alignments between this_species and other_species.
You can add more than one other species separated by commas.
=head2 analysis
The method_link_type. This defines the type of score. E.g. GERP_CONSERVATION_SCORE
See perldoc Bio::EnsEMBL::Compara::MethodLinkSpeciesSet for more details about the
method_link_type
=head2 Example
=head3 registry configuration file
use strict;
use Bio::EnsEMBL::Utils::ConfigRegistry;
use Bio::EnsEMBL::Compara::DBSQL::DBAdaptor;
new Bio::EnsEMBL::Compara::DBSQL::DBAdaptor(
-host => 'ensembldb.ensembl.org',
-user => 'anonymous',
-port => 3306,
-species => 'ensembl-compara-41',
-dbname => 'ensembl_compara_41');
=head3 DAS server configuration file
[general]
hostname = ecs4b.internal.sanger.ac.uk
prefork = 6
maxclients = 100
port = 9013
[conservation_score]
registry = /home/foo/ProServer/eg/reg.pl
state = on
adaptor = compara
database = ensembl-compara-41
this_species = Homo sapiens
other_species = Mus musculus, Rattus norvegicus, Canis familiaris, Gallus gallus, Bos taurus, Monodelphis domestica
analysis = GERP_CONSTRAINED_ELEMENT
description = 7 way mlagan alignment
group_type = default
=cut
package Bio::Das::ProServer::SourceAdaptor::conservation_score;
use strict;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Utils::Exception;
use base qw( Bio::Das::ProServer::SourceAdaptor );
sub init
{
my ($self) = @_;
$self->{'capabilities'} = { 'features' => '1.0',
'stylesheet' => '1.0' };
my $registry = $self->config()->{'registry'};
unless (defined $registry) {
throw("registry not defined\n");
}
if (not $Bio::EnsEMBL::Registry::registry_register->{'seen'}) {
Bio::EnsEMBL::Registry->load_all($registry);
}
}
sub build_features
{
my ($self, $opts) = @_;
my $db = "Bio::EnsEMBL::Registry";
$db->no_version_check(1);
my $dbname = $self->config()->{'database'};
#need to put adaptors here and not in init
my $meta_con =
$db->get_adaptor($dbname, 'compara', 'MetaContainer') or
die "no metadbadaptor:$dbname, 'compara','MetaContainer' \n";
my $mlss_adaptor =
$db->get_adaptor($dbname, 'compara', 'MethodLinkSpeciesSet') or
die "can't get $dbname, 'compara', 'MethodLinkSpeciesSet'\n";
my $cs_adaptor =
$db->get_adaptor($dbname, 'compara', 'ConservationScore') or
die "can't get $dbname, 'compara', 'ConservationScore'\n";
my $species = $self->config()->{'this_species'};
my $slice_adaptor =
$db->get_adaptor($species, 'core', 'Slice') or
die "can't get $species, 'core', 'Slice'\n";
my $genome_db_adaptor =
$db->get_adaptor($dbname, 'compara', 'GenomeDB') or
die "can't get $dbname, 'compara', 'GenomeDB'\n";
my $genomedbs = $genome_db_adaptor->fetch_all();
my $daschr = $opts->{'segment'} || return ( );
my $dasstart = $opts->{'start'} || return ( );
my $dasend = $opts->{'end'} || return ( );
my $species1 = $self->config()->{'this_species'};
my @other_species = split(/\s*\,\s*/, $self->config()->{'other_species'});
my $chr1 = $daschr;
my $start1 = $dasstart;
my $end1 = $dasend;
my $method_link = $self->config()->{'analysis'};
my $stored_max_alignment_length;
my $values = $meta_con->list_value_by_key("max_alignment_length");
if(@$values) {
$stored_max_alignment_length = $values->[0];
}
my $species1_genome_db;
my @other_species_genome_dbs;
## Get the Bio::EnsEMBL::Compara::GenomeDB object for the primary species
foreach my $this_genome_db (@$genomedbs){
if ($this_genome_db->name eq $species1) {
$species1_genome_db = $this_genome_db;
}
}
if (!defined($species1_genome_db)) {
die "No species called $species1 in the database -- check spelling\n";
}
## Get the Bio::EnsEMBL::Compara::GenomeDB objects for the remaining species
foreach my $this_other_species (@other_species) {
my $this_other_genome_db;
foreach my $this_genome_db (@$genomedbs){
if ($this_genome_db->name eq $this_other_species) {
$this_other_genome_db = $this_genome_db;
last;
}
}
if (!defined($this_other_genome_db)) {
die "No species called $this_other_species in the database -- check spelling\n";
}
push(@other_species_genome_dbs, $this_other_genome_db);
}
## Fetch the Bio::EnsEMBL::Compara::MethodLinkSpeciesSet object
my $method_link_species_set;
$method_link_species_set =
$mlss_adaptor->fetch_by_method_link_type_GenomeDBs(
$method_link, [$species1_genome_db, @other_species_genome_dbs]);
##Fetch the Bio::EnsEMBL::Slice object
my $slice = $slice_adaptor->fetch_by_region(undef, $chr1, $start1, $end1);
#Fetch conservation scores
my $conservation_scores = $cs_adaptor->fetch_all_by_MethodLinkSpeciesSet_Slice($method_link_species_set, $slice);
## Build the results array
my @results = ();
foreach my $score (@$conservation_scores) {
unless (defined $score->diff_score) {
next;
}
#my $id = $score->genomic_align_block_id;
my $id = $score->genomic_align_block_id ."_" . $score->position;
my $label = "Conservation scores";
# note will contain expected and observed scores and window size
my $note = sprintf("Expected %.3f Observed %.3f Diff %.3f Window Size %d Max", $score->expected_score, $score->observed_score, $score->diff_score, $score->window_size);
#my $start_pos = $start1 + $score->position;
#my $end_pos = $start1 + $score->position + $score->window_size;
my $start_pos = $start1 + $score->position - 1;
my $end_pos = $start_pos + $score->window_size - 1;
my $new_score = $score->diff_score()*-1;
push @results, {
'id' => $id,
'label' => $label,
'method'=> $method_link,
'start' => $start_pos,
'end' => $end_pos,
'ori' => '+',
'score' => $new_score,
'note' => $note,
'typecategory' => 'Conservation scores',
'type' => 'histogram'
};
}
return @results;
}
sub das_stylesheet
{
my $self = shift;
return <<EOT;
<!DOCTYPE DASSTYLE SYSTEM "http://www.biodas.org/dtd/dasstyle.dtd">
<DASSTYLE>
<STYLESHEET version="1.0">
<CATEGORY id="Conservation scores">
<TYPE id="histogram"><GLYPH><HISTOGRAM>
<MIN>-3</MIN>
<MAX>3</MAX>
<HEIGHT>100</HEIGHT>
<STEPS>50</STEPS>
<COLOR1>red</COLOR1>
<COLOR2>yellow</COLOR2>
<COLOR3>blue</COLOR3>
</HISTOGRAM></GLYPH></TYPE>
</CATEGORY>
</STYLESHEET>
</DASSTYLE>
EOT
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-compara/modules/Bio/Das/ProServer/SourceAdaptor/conservation_score.pm | Perl | apache-2.0 | 7,768 |
#!/usr/bin/perl
=head1 COPYRIGHT
Copyright (c) 2016, Juniper Networks Inc.
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=head1 AUTHOR
Justin Bellomi - justinb@juniper.net
=head1 NAME
unmake-admin.pl - Quick script to make a user not an admin.
=cut
use strict;
use warnings;
# Other Libraries
use DBWrap;
my $databases = ['branchlocker'];
my $database_name = 'branchlocker';
my $schema_name = 'blr';
my $username = $ARGV[0];
die "unmake-admin.pl <username>\n" if (! defined $username);
die "Invalid username: $username" if ($username !~ /^[a-z]+$/);
$DBServers::readwrite_user = 'blr_w';
$DBServers::readwrite_pass = '';
$DBServers::readwrite_host = 'localhost';
$DBServers::readonly_user = 'blr_w';
$DBServers::readonly_pass = '';
$DBServers::database_servers = { 'only' => ['localhost'] };
DBWrap::init({
'writable' => 1,
'databases' => $databases,
});
my $user = DBWrap::get_row_from_columns({
'database' => $database_name,
'schema' => $schema_name,
'table' => 'bl_user',
'columns' => {
'name' => $username,
},
});
die "Could not find user: $username\n" if (! defined $user);
my $link = DBWrap::delete_rows_from_columns({
'database' => $database_name,
'schema' => $schema_name,
'table' => 'bl_link_user_to_group',
'columns' => {
'user_id' => $user->{'id'},
'group_id' => 1,
},
});
die "Failed to unmake user an admin.\n" if (! defined $link);
print "User: $username is no longer an admin.\n";
exit 0;
| Juniper/branch-locker | src/unmake-admin.pl | Perl | apache-2.0 | 2,024 |
use utf8;
package Schema::Result::Deliveryservice;
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
Schema::Result::Deliveryservice
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 TABLE: C<deliveryservice>
=cut
__PACKAGE__->table("deliveryservice");
=head1 ACCESSORS
=head2 id
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
=head2 xml_id
data_type: 'varchar'
is_nullable: 0
size: 48
=head2 active
data_type: 'tinyint'
is_nullable: 0
=head2 dscp
data_type: 'integer'
is_nullable: 0
=head2 signed
data_type: 'tinyint'
is_nullable: 1
=head2 qstring_ignore
data_type: 'tinyint'
is_nullable: 1
=head2 geo_limit
data_type: 'tinyint'
default_value: 0
is_nullable: 1
=head2 http_bypass_fqdn
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 dns_bypass_ip
data_type: 'varchar'
is_nullable: 1
size: 45
=head2 dns_bypass_ip6
data_type: 'varchar'
is_nullable: 1
size: 45
=head2 dns_bypass_ttl
data_type: 'integer'
is_nullable: 1
=head2 org_server_fqdn
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 type
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 profile
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 cdn_id
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 ccr_dns_ttl
data_type: 'integer'
is_nullable: 1
=head2 global_max_mbps
data_type: 'integer'
is_nullable: 1
=head2 global_max_tps
data_type: 'integer'
is_nullable: 1
=head2 long_desc
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 long_desc_1
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 long_desc_2
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 max_dns_answers
data_type: 'integer'
default_value: 0
is_nullable: 1
=head2 info_url
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 miss_lat
data_type: 'double precision'
is_nullable: 1
=head2 miss_long
data_type: 'double precision'
is_nullable: 1
=head2 check_path
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 last_updated
data_type: 'timestamp'
datetime_undef_if_invalid: 1
default_value: current_timestamp
is_nullable: 1
=head2 protocol
data_type: 'tinyint'
default_value: 0
is_nullable: 1
=head2 ssl_key_version
data_type: 'integer'
default_value: 0
is_nullable: 1
=head2 ipv6_routing_enabled
data_type: 'tinyint'
is_nullable: 1
=head2 range_request_handling
data_type: 'tinyint'
default_value: 0
is_nullable: 1
=head2 edge_header_rewrite
data_type: 'varchar'
is_nullable: 1
size: 2048
=head2 origin_shield
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 mid_header_rewrite
data_type: 'varchar'
is_nullable: 1
size: 2048
=head2 regex_remap
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 cacheurl
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 remap_text
data_type: 'varchar'
is_nullable: 1
size: 2048
=head2 multi_site_origin
data_type: 'tinyint'
is_nullable: 1
=head2 display_name
data_type: 'varchar'
is_nullable: 0
size: 48
=head2 tr_response_headers
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 initial_dispersion
data_type: 'integer'
default_value: 1
is_nullable: 1
=head2 dns_bypass_cname
data_type: 'varchar'
is_nullable: 1
size: 255
=head2 tr_request_headers
data_type: 'varchar'
is_nullable: 1
size: 1024
=head2 regional_geo_blocking
data_type: 'tinyint'
is_nullable: 0
=head2 geo_provider
data_type: 'tinyint'
default_value: 0
is_nullable: 1
=head2 multi_site_origin_algorithm
data_type: 'tinyint'
is_nullable: 1
=head2 geo_limit_countries
data_type: 'varchar'
is_nullable: 1
size: 750
=head2 logs_enabled
data_type: 'tinyint'
is_nullable: 0
=head2 geolimit_redirect_url
data_type: 'varchar'
is_nullable: 1
size: 255
=cut
__PACKAGE__->add_columns(
"id",
{ data_type => "integer", is_auto_increment => 1, is_nullable => 0 },
"xml_id",
{ data_type => "varchar", is_nullable => 0, size => 48 },
"active",
{ data_type => "tinyint", is_nullable => 0 },
"dscp",
{ data_type => "integer", is_nullable => 0 },
"signed",
{ data_type => "tinyint", is_nullable => 1 },
"qstring_ignore",
{ data_type => "tinyint", is_nullable => 1 },
"geo_limit",
{ data_type => "tinyint", default_value => 0, is_nullable => 1 },
"http_bypass_fqdn",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"dns_bypass_ip",
{ data_type => "varchar", is_nullable => 1, size => 45 },
"dns_bypass_ip6",
{ data_type => "varchar", is_nullable => 1, size => 45 },
"dns_bypass_ttl",
{ data_type => "integer", is_nullable => 1 },
"org_server_fqdn",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"type",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"profile",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"cdn_id",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"ccr_dns_ttl",
{ data_type => "integer", is_nullable => 1 },
"global_max_mbps",
{ data_type => "integer", is_nullable => 1 },
"global_max_tps",
{ data_type => "integer", is_nullable => 1 },
"long_desc",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"long_desc_1",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"long_desc_2",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"max_dns_answers",
{ data_type => "integer", default_value => 0, is_nullable => 1 },
"info_url",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"miss_lat",
{ data_type => "double precision", is_nullable => 1 },
"miss_long",
{ data_type => "double precision", is_nullable => 1 },
"check_path",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"last_updated",
{
data_type => "timestamp",
datetime_undef_if_invalid => 1,
default_value => \"current_timestamp",
is_nullable => 1,
},
"protocol",
{ data_type => "tinyint", default_value => 0, is_nullable => 1 },
"ssl_key_version",
{ data_type => "integer", default_value => 0, is_nullable => 1 },
"ipv6_routing_enabled",
{ data_type => "tinyint", is_nullable => 1 },
"range_request_handling",
{ data_type => "tinyint", default_value => 0, is_nullable => 1 },
"edge_header_rewrite",
{ data_type => "varchar", is_nullable => 1, size => 2048 },
"origin_shield",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"mid_header_rewrite",
{ data_type => "varchar", is_nullable => 1, size => 2048 },
"regex_remap",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"cacheurl",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"remap_text",
{ data_type => "varchar", is_nullable => 1, size => 2048 },
"multi_site_origin",
{ data_type => "tinyint", is_nullable => 1 },
"display_name",
{ data_type => "varchar", is_nullable => 0, size => 48 },
"tr_response_headers",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"initial_dispersion",
{ data_type => "integer", default_value => 1, is_nullable => 1 },
"dns_bypass_cname",
{ data_type => "varchar", is_nullable => 1, size => 255 },
"tr_request_headers",
{ data_type => "varchar", is_nullable => 1, size => 1024 },
"regional_geo_blocking",
{ data_type => "tinyint", is_nullable => 0 },
"geo_provider",
{ data_type => "tinyint", default_value => 0, is_nullable => 1 },
"multi_site_origin_algorithm",
{ data_type => "tinyint", is_nullable => 1 },
"geo_limit_countries",
{ data_type => "varchar", is_nullable => 1, size => 750 },
"logs_enabled",
{ data_type => "tinyint", is_nullable => 0 },
"geolimit_redirect_url",
{ data_type => "varchar", is_nullable => 1, size => 255 },
);
=head1 PRIMARY KEY
=over 4
=item * L</id>
=item * L</type>
=back
=cut
__PACKAGE__->set_primary_key("id", "type");
=head1 UNIQUE CONSTRAINTS
=head2 C<ds_id_UNIQUE>
=over 4
=item * L</id>
=back
=cut
__PACKAGE__->add_unique_constraint("ds_id_UNIQUE", ["id"]);
=head2 C<ds_name_UNIQUE>
=over 4
=item * L</xml_id>
=back
=cut
__PACKAGE__->add_unique_constraint("ds_name_UNIQUE", ["xml_id"]);
=head1 RELATIONS
=head2 cdn
Type: belongs_to
Related object: L<Schema::Result::Cdn>
=cut
__PACKAGE__->belongs_to(
"cdn",
"Schema::Result::Cdn",
{ id => "cdn_id" },
{ is_deferrable => 1, on_delete => "RESTRICT", on_update => "RESTRICT" },
);
=head2 deliveryservice_regexes
Type: has_many
Related object: L<Schema::Result::DeliveryserviceRegex>
=cut
__PACKAGE__->has_many(
"deliveryservice_regexes",
"Schema::Result::DeliveryserviceRegex",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 deliveryservice_servers
Type: has_many
Related object: L<Schema::Result::DeliveryserviceServer>
=cut
__PACKAGE__->has_many(
"deliveryservice_servers",
"Schema::Result::DeliveryserviceServer",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 deliveryservice_tmusers
Type: has_many
Related object: L<Schema::Result::DeliveryserviceTmuser>
=cut
__PACKAGE__->has_many(
"deliveryservice_tmusers",
"Schema::Result::DeliveryserviceTmuser",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 federation_deliveryservices
Type: has_many
Related object: L<Schema::Result::FederationDeliveryservice>
=cut
__PACKAGE__->has_many(
"federation_deliveryservices",
"Schema::Result::FederationDeliveryservice",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 jobs
Type: has_many
Related object: L<Schema::Result::Job>
=cut
__PACKAGE__->has_many(
"jobs",
"Schema::Result::Job",
{ "foreign.job_deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 profile
Type: belongs_to
Related object: L<Schema::Result::Profile>
=cut
__PACKAGE__->belongs_to(
"profile",
"Schema::Result::Profile",
{ id => "profile" },
{ is_deferrable => 1, on_delete => "NO ACTION", on_update => "NO ACTION" },
);
=head2 staticdnsentries
Type: has_many
Related object: L<Schema::Result::Staticdnsentry>
=cut
__PACKAGE__->has_many(
"staticdnsentries",
"Schema::Result::Staticdnsentry",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 steering_target_deliveryservices
Type: has_many
Related object: L<Schema::Result::SteeringTarget>
=cut
__PACKAGE__->has_many(
"steering_target_deliveryservices",
"Schema::Result::SteeringTarget",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 steering_target_deliveryservices_2s
Type: has_many
Related object: L<Schema::Result::SteeringTarget>
=cut
__PACKAGE__->has_many(
"steering_target_deliveryservices_2s",
"Schema::Result::SteeringTarget",
{ "foreign.deliveryservice" => "self.id" },
{ cascade_copy => 0, cascade_delete => 0 },
);
=head2 type
Type: belongs_to
Related object: L<Schema::Result::Type>
=cut
__PACKAGE__->belongs_to(
"type",
"Schema::Result::Type",
{ id => "type" },
{ is_deferrable => 1, on_delete => "NO ACTION", on_update => "NO ACTION" },
);
# Created by DBIx::Class::Schema::Loader v0.07045 @ 2016-08-01 08:58:13
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:k1aJ71tsV0AWeFF/OpHFUA
# You can replace this text with custom code or comments, and it will be preserved on regeneration
1;
| knutsel/traffic_control-1 | traffic_ops/app/lib/Schema/Result/Deliveryservice.pm | Perl | apache-2.0 | 12,478 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::aruba::standard::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
# $options->{options} = options object
$self->{version} = '1.0';
%{$self->{modes}} = (
'ap-connections' => 'centreon::common::aruba::snmp::mode::apconnections',
'ap-users' => 'centreon::common::aruba::snmp::mode::apusers',
'cpu' => 'centreon::common::aruba::snmp::mode::cpu',
'hardware' => 'centreon::common::aruba::snmp::mode::hardware',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'memory' => 'centreon::common::aruba::snmp::mode::memory',
'storage' => 'centreon::common::aruba::snmp::mode::storage',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Aruba equipments in SNMP.
=cut
| s-duret/centreon-plugins | network/aruba/standard/snmp/plugin.pm | Perl | apache-2.0 | 1,985 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package snmp_standard::mode::uptime;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use POSIX;
use centreon::plugins::misc;
use centreon::plugins::statefile;
use Time::HiRes qw(time);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning' },
"critical:s" => { name => 'critical' },
"force-oid:s" => { name => 'force_oid' },
"check-overload" => { name => 'check_overload' },
});
$self->{statefile_cache} = centreon::plugins::statefile->new(%options);
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
$self->{statefile_cache}->check_options(%options);
}
sub check_overload {
my ($self, %options) = @_;
return $options{timeticks} if (!defined($self->{option_results}->{check_overload}));
my $current_time = floor(time() * 100);
$self->{new_datas} = { last_time => $current_time, uptime => $options{timeticks}, overload => 0 };
$self->{statefile_cache}->read(statefile => "cache_" . $self->{snmp}->get_hostname() . '_' . $self->{snmp}->get_port() . '_' . $self->{mode});
my $old_uptime = $self->{statefile_cache}->get(name => 'uptime');
my $last_time = $self->{statefile_cache}->get(name => 'last_time');
my $overload = $self->{statefile_cache}->get(name => 'overload');
if (defined($old_uptime) && $old_uptime < $current_time) {
my $diff_time = $current_time - $last_time;
my $overflow = ($old_uptime + $diff_time) % 4294967296;
my $division = ($old_uptime + $diff_time) / 4294967296;
if ($division >= 1 &&
$overflow >= ($options{timeticks} - 5000) &&
$overflow <= ($options{timeticks} + 5000)) {
$overload++;
}
$options{timeticks} += ($overload * 4294967296);
}
$self->{new_datas}->{overload} = $overload if (defined($overload));
$self->{statefile_cache}->write(data => $self->{new_datas});
return $options{timeticks};
}
sub run {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
# To be used first for OS
my $oid_hrSystemUptime = '.1.3.6.1.2.1.25.1.1.0';
# For network equipment or others
my $oid_sysUpTime = '.1.3.6.1.2.1.1.3.0';
my ($result, $value);
if (defined($self->{option_results}->{force_oid})) {
$result = $self->{snmp}->get_leef(oids => [ $self->{option_results}->{force_oid} ], nothing_quit => 1);
$value = $result->{$self->{option_results}->{force_oid}};
} else {
$result = $self->{snmp}->get_leef(oids => [ $oid_hrSystemUptime, $oid_sysUpTime ], nothing_quit => 1);
if (defined($result->{$oid_hrSystemUptime})) {
$value = $result->{$oid_hrSystemUptime};
} else {
$value = $result->{$oid_sysUpTime};
}
}
$value = $self->check_overload(timeticks => $value);
$value = floor($value / 100);
my $exit_code = $self->{perfdata}->threshold_check(value => $value,
threshold => [ { label => 'critical', exit_litteral => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->perfdata_add(label => 'uptime', unit => 's',
value => $value,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning'),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical'),
min => 0);
$self->{output}->output_add(severity => $exit_code,
short_msg => sprintf("System uptime is: %s",
centreon::plugins::misc::change_seconds(value => $value, start => 'd')));
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check system uptime.
=over 8
=item B<--warning>
Threshold warning in seconds.
=item B<--critical>
Threshold critical in seconds.
=item B<--force-oid>
Can choose your oid (numeric format only).
=item B<--check-overload>
Uptime counter limit is 4294967296 and overflow.
With that option, we manage the counter going back. But there is a few chance we can miss a reboot.
=back
=cut
| maksimatveev/centreon-plugins | snmp_standard/mode/uptime.pm | Perl | apache-2.0 | 6,035 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::cisco::aci::apic::restapi::mode::node;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::templates::catalog_functions qw(catalog_status_threshold);
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'nodes', type => 1, cb_prefix_output => 'prefix_nodes_output', message_multiple => 'All fabric nodes are ok' }
];
$self->{maps_counters}->{nodes} = [
{ label => 'health-current', nlabel => 'node.health.current.percentage', set => {
key_values => [ { name => 'current' }, { name => 'dn' } ],
output_template => 'current: %s %%', output_error_template => "current: %s %%",
perfdatas => [
{ template => '%d', unit => '%', min => 0, max => 100, label_extra_instance => 1, instance_use => 'dn' }
]
}
},
{ label => 'health-minimum', nlabel => 'node.health.minimum.percentage', set => {
key_values => [ { name => 'min' }, { name => 'dn' } ],
output_template => 'min: %s %%', output_error_template => "min: %s %%",
perfdatas => [
{ template => '%d', unit => '%', min => 0, max => 100, label_extra_instance => 1, instance_use => 'dn' }
]
}
},
{ label => 'health-average', nlabel => 'node.health.average.percentage', set => {
key_values => [ { name => 'avg' }, { name => 'dn' } ],
output_template => 'average: %s %%', output_error_template => "average %s %%",
perfdatas => [
{ template => '%d', unit => '%', min => 0, max => 100, label_extra_instance => 1, instance_use => 'dn' }
]
}
}
];
}
sub prefix_nodes_output {
my ($self, %options) = @_;
return "Node '" . $options{instance_value}->{dn} . "' health ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-node:s' => { name => 'filter_node' }
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $result_nodes = $options{custom}->get_node_health_5m();
$self->{nodes} = {};
foreach my $node (@{$result_nodes->{imdata}}) {
$node->{fabricNodeHealth5min}->{attributes}->{dn} =~ /^topology\/(.*)\/sys\/CDfabricNodeHealth5min$/;
my $node_dn = $1;
if (defined($self->{option_results}->{filter_node}) && $self->{option_results}->{filter_node} ne '' &&
$node_dn !~ /$self->{option_results}->{filter_node}/) {
$self->{output}->output_add(long_msg => "skipping '" . $node_dn . "': no matching filter", debug => 1);
next;
}
$self->{nodes}->{$node_dn} = {
min => $node->{fabricNodeHealth5min}->{attributes}->{healthMin},
current => $node->{fabricNodeHealth5min}->{attributes}->{healthLast},
avg => $node->{fabricNodeHealth5min}->{attributes}->{healthAvg},
dn => $node_dn
};
}
if (scalar(keys %{$self->{nodes}}) <= 0) {
$self->{output}->add_option_msg(short_msg => "No nodes found (try --debug)");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check fabric nodes.
=over 8
=item B<--filter-node>
Regexp filter on the pod / node name
=item B<--warning-*>
Set warning for each health percentage value. Can be :
--warning-health-average=90:
--warning-health-current
--warning-health-minimum
=item B<--critical-*>
Set criticai for each health percentage value. Can be :
--critical-health-average=90:
--critical-health-current=95:
--critical-health-minimum
=back
=cut
| Tpo76/centreon-plugins | network/cisco/aci/apic/restapi/mode/node.pm | Perl | apache-2.0 | 4,662 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Compara::RunnableDB::PairAligner::UcscToEnsemblMapping
=head1 DESCRIPTION
Convert UCSC names to ensembl names (reference only chromosomes and supercontigs, ie no haplotypes)
First check the names using chromInfo.txt and then go to mapping file if necessary eg ctgPos.txt for human
Download from:
http://hgdownload.cse.ucsc.edu/downloads.html
Choose species
Choose Annotation database
wget http://hgdownload.cse.ucsc.edu/goldenPath/ponAbe2/database/chromInfo.txt.gz
=cut
package Bio::EnsEMBL::Compara::RunnableDB::PairAligner::UcscToEnsemblMapping;
use strict;
use warnings;
use Bio::EnsEMBL::Compara::MethodLinkSpeciesSet;
use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable');
############################################################
=head2 fetch_input
Title : fetch_input
Usage : $self->fetch_input
Returns : nothing
Args : none
=cut
sub fetch_input {
my( $self) = @_;
#Must define chromInfo file
return if (!defined $self->param('chromInfo_file') || $self->param('chromInfo_file') eq "");
my $gdba = $self->compara_dba->get_GenomeDBAdaptor;
my $genome_db = $gdba->fetch_by_name_assembly($self->param('species'));
$self->param('genome_db', $genome_db);
#Get slice adaptor
my $slice_adaptor = $genome_db->db_adaptor->get_SliceAdaptor;
my $ensembl_names;
my $ucsc_to_ensembl_mapping;
#Get all toplevel slices
my $ref_slices = $slice_adaptor->fetch_all("toplevel");
foreach my $this_slice ( @$ref_slices ) {
$ensembl_names->{$this_slice->seq_region_name} = 1;
}
#Open UCSC chromInfo file
open UCSC, $self->param('chromInfo_file') or die ("Unable to open " . $self->param('chromInfo_file'));
while (<UCSC>) {
my ($ucsc_chr, $size, $file) = split " ";
my $chr = $ucsc_chr;
$chr =~ s/chr//;
if ($ensembl_names->{$chr}) {
$ensembl_names->{$chr} = 2;
$ucsc_to_ensembl_mapping->{$ucsc_chr} = $chr;
} elsif ($chr eq "M") {
#Special case for MT
$ensembl_names->{"MT"} = 2;
$ucsc_to_ensembl_mapping->{$ucsc_chr} = "MT";
} else {
#Try extracting gl from filename
if (defined $self->param('ucsc_map')) {
read_ucsc_map($self->param('ucsc_map'), $ensembl_names, $ucsc_to_ensembl_mapping);
} else {
die ("You must provide a UCSC mapping file");
}
}
}
close UCSC;
foreach my $chr (keys %$ensembl_names) {
if ($ensembl_names->{$chr} != 2) {
die ("Failed to find $chr in UCSC");
}
}
$self->param('ucsc_to_ensembl_mapping', $ucsc_to_ensembl_mapping);
}
sub read_ucsc_map {
my ($ucsc_map, $ensembl_names, $ucsc_to_ensembl_mapping) = @_;
open MAP, $ucsc_map or die ("Unable to open " . $ucsc_map);
while (<MAP>) {
my ($contig, $size, $chrom, $chromStart, $chromEnd) = split " ";
if ($ensembl_names->{$contig}) {
#print "FOUND $contig\n";
$ensembl_names->{$contig} = 2;
$ucsc_to_ensembl_mapping->{$chrom} = $contig;
}
}
close MAP;
}
sub run {
my $self = shift;
}
sub write_output {
my ($self) = shift;
return if (!defined $self->param('chromInfo_file') || $self->param('chromInfo_file') eq "");
my $genome_db_id = $self->param('genome_db')->dbID;
#Insert into ucsc_to_ensembl_mapping table
my $sql = "INSERT INTO ucsc_to_ensembl_mapping (genome_db_id, ucsc, ensembl) VALUES (?,?,?)";
my $sth = $self->compara_dba->dbc->prepare($sql);
my $ucsc_to_ensembl_mapping = $self->param('ucsc_to_ensembl_mapping');
foreach my $ucsc_chr (keys %$ucsc_to_ensembl_mapping) {
#print "$ucsc_chr " . $ucsc_to_ensembl_mapping->{$ucsc_chr} . "\n";
$sth->execute($genome_db_id, $ucsc_chr, $ucsc_to_ensembl_mapping->{$ucsc_chr});
}
$sth->finish();
}
1;
| danstaines/ensembl-compara | modules/Bio/EnsEMBL/Compara/RunnableDB/PairAligner/UcscToEnsemblMapping.pm | Perl | apache-2.0 | 4,714 |
:- module(classad_reltime_parser,
[parse_reltime/2, % parse_reltime(ReltimeExpression, Seconds)
unparse_reltime/1 % unparse_reltime(Seconds)
]).
:- expects_dialect(swi).
% unparse a number of seconds into a reltime string format
% note, may be called from with_output_to/2
unparse_reltime(Seconds) :-
number(Seconds),
((Seconds < 0) -> (SG = "-", SS is -Seconds) ; (SG = "", SS is Seconds)),
WS is floor(SS), FS is SS-WS,
D is WS // 86400, DR is WS mod 86400,
H is DR // 3600, HR is DR mod 3600,
M is HR // 60, MR is HR mod 60,
S is MR + FS,
((D > 0) -> (
format("~s~d+", [SG, D]),
format("~0t~d~2+:", [H]),
format("~0t~d~2+:", [M]),
format("~0t~3f~6+", [S])
) ; ( (H > 0) -> (
format("~s~d:", [SG, H]),
format("~0t~d~2+:", [M]),
format("~0t~3f~6+", [S])
) ; ( (M > 0) -> (
format("~s~d:", [SG,M]),
format("~0t~3f~6+", [S])
) ; (
format("~s~3f", [SG,S])
)))).
% parse a string containing a reltime expression, and return the corresponding number of seconds.
parse_reltime(RTE, S) :- is_list(RTE), !, reltime(S, RTE, []), !.
parse_reltime(RTE, S) :- atom(RTE), !, atom_codes(RTE, CL), reltime(S, CL, []), !.
% consume a reltime expression, and return the corresponding number of seconds
% a reltime expression consists of a leading sign, day, hour, min, sec fields. All are optional, however
% at least *one* of day/hour/min/sec must be present to be valid.
reltime(S) --> sign(SF), days(D, DP), hms(HMS, HP), whitespace, { (DP+HP) > 0, S is SF * (86400*D + HMS) }.
sign(1) --> whitespace, "+".
sign(-1) --> whitespace, "-".
sign(1) --> "".
days(D, 1) --> whitespace, num(D), tdays.
days(0, 0) --> "".
tdays --> whitespace, "+".
tdays --> whitespace, "d".
tdays --> whitespace, "D".
hms(HMS, P) --> hmsfix(HMS, P).
hms(HMS, P) --> hmstag(HMS, P).
hmsfix(HMS, 3) --> whitespace, ge2num(H), ":", ge2num(M), ":", ge2num(S), fnum(F), { HMS is 3600*H + 60*M + S + F}.
hmstag(HMS, P) --> hours(H, HP), minutes(M, MP), seconds(S, SP), { P is HP+MP+SP, HMS is 3600*H + 60*M + S }.
hours(H, 1) --> whitespace, num(H), thours.
hours(0, 0) --> "".
thours --> whitespace, "h".
thours --> whitespace, "H".
minutes(M, 1) --> whitespace, num(M), tminutes.
minutes(0, 0) --> "".
tminutes --> whitespace, "m".
tminutes --> whitespace, "M".
seconds(S, 1) --> whitespace, num(TS), fnum(F), tseconds, { S is TS + F}.
seconds(0, 0) --> "".
tseconds --> whitespace, "s".
tseconds --> whitespace, "S".
tseconds --> "".
% consume a number, return with its value:
ge2num(N) --> dhead(0, T1), dhead(T1, T2), drest(T2, N).
num(N) --> dhead(0, T), drest(T, N).
% the decimal fraction of a number
fnum(F) --> ".", frest(0, 0.1, F).
fnum(0) --> "".
fhead(C, DF, F) --> [D], { char_type(D, digit), F is C + DF*(D-"0") }.
frest(C, DF, F) --> fhead(C, DF, T), { NDF is 0.1 * DF }, frest(T, NDF, F).
frest(C, _, C) --> "".
dhead(C, N) --> [D], { char_type(D, digit), N is (D-"0") + 10*C }.
drest(C, N) --> dhead(C, T), drest(T, N).
drest(N, N) --> "".
% consume whitespace
whitespace --> wschar, whitespace.
whitespace --> "".
wschar --> [C], { char_type(C, white) }.
| erikerlandson/classad_pl | lib/classad_reltime_parser.pl | Perl | apache-2.0 | 3,214 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Clownfish::CFC::Model::Class;
use Clownfish::CFC;
our $VERSION = '0.005000';
$VERSION = eval $VERSION;
1;
| rectang/lucy-clownfish | compiler/perl/lib/Clownfish/CFC/Model/Class.pm | Perl | apache-2.0 | 898 |
use v5.10;
$_ = <<'HERE';
Amelia said "I am a camel"
HERE
say "Matched [$+{said}]!" if m/
( ['"] )
(?<said>.*?)
(?1)
/x;
| mishin/presentation | regex_recursion/005.pl | Perl | apache-2.0 | 214 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Query::GlyphSet::Variation;
use strict;
use warnings;
use parent qw(EnsEMBL::Web::Query::Generic::GlyphSet);
our $VERSION = 12;
sub fixup {
my ($self) = @_;
$self->fixup_config('config');
$self->fixup_href('href',1);
$self->fixup_location('start','slice',0);
$self->fixup_location('end','slice',1);
$self->fixup_slice('slice','species',20000);
# Fix class key (depends on depth)
if($self->phase eq 'post_process') {
my $depth = $self->context->depth;
my $data = $self->data;
foreach my $f (@$data) {
$f->{'class'} = 'group' if defined $depth && $depth <= 1;
}
}
$self->SUPER::fixup();
}
sub precache {
return {
'1kgindels' => {
loop => ['genome'],
args => {
species => 'Homo_sapiens',
id => 'variation_set_1kg_3',
config => {
sets => ['1kg_3'],
set_name => '1000 Genomes - All - short variants (SNPs and indels)',
},
var_db => 'variation',
config_type => 'contigviewbottom',
type => 'variation_set_1kg_3',
}
},
'variation-mouse' => {
loop => ['genome'],
args => {
species => 'Mus_musculus',
id => 'variation_feature_variation',
config => {},
var_db => 'variation',
config_type => 'contigviewbottom',
type => 'variation_feature_variation',
}
},
'ph-short' => {
loop => ['species','genome'],
args => {
'id' => 'variation_set_ph_variants',
'config' => {
'sets' => ['ph_variants'],
'set_name' => 'All phenotype-associated - short variants (SNPs and indels)'
},
'var_db' => 'variation',
'config_type' => 'contigviewbottom',
'type' => 'variation_set_ph_variants',
}
}
};
}
sub colour_key { return lc $_[1]->display_consequence; }
sub text_overlay { my $text = $_[1]->ambig_code; return $text unless $text and $text eq '-'; }
sub href {
my ($self,$f,$args) = @_;
# Fix URL encoding issue with the "<>" characters
my $var = $f->variation_name;
$var =~ s/(<|>)/_/g if $var;
return {
species => $args->{'species'},
type => 'Variation',
v => $var,
vf => $f->dbID,
vdb => $args->{'var_db'} || 'variation',
snp_fake => 1,
config => $args->{'config_type'},
track => $args->{'type'},
};
}
sub type {
my ($self, $f, $args) = @_;
my $type;
if ($f->var_class eq 'insertion' || $f->var_class eq 'deletion') {
$type = $f->var_class;
}
return $type;
}
sub title {
my ($self,$f,$args) = @_;
my $vid = $f->variation_name ||'';
my $type = $f->display_consequence || '';
my $dbid = $f->dbID || '';
my ($s, $e) = $self->slice2sr($args->{'slice'},$f->start, $f->end);
my $loc = $s == $e ? $s : $s < $e ? "$s-$e" : "Between $s and $e";
return "Variation: $vid; Location: $loc; Consequence: $type; Ambiguity code: ". ($f->ambig_code||'');
}
sub _plainify {
my ($self,$f,$args) = @_;
return {
strand => $f->strand,
start => $f->start,
end => $f->end,
colour_key => $self->colour_key($f),
type => $self->type($f,$args),
label => $f->variation_name,
text_overlay => $self->text_overlay($f),
href => $self->href($f,$args),
title => $self->title($f,$args),
dbID => $f->dbID, # used in ZMenu, yuk!
};
}
sub check_set {
my ($self, $f, $sets) = @_;
foreach (@{$f->get_all_VariationSets}) {
return 1 if $sets->{$_->short_name};
}
return 0;
}
sub check_source {
my ($self, $f, $sources) = @_;
foreach (@{$f->get_all_sources}) {
return 1 if $sources->{$_};
}
return 0;
}
sub fetch_features {
my ($self,$args) = @_;
my $adaptors = $self->source('Adaptors');
my $species = $args->{'species'};
my $id = $args->{'id'};
my $filter = $args->{'config'}{'filter'};
my $source = $args->{'config'}{'source'};
my $sources = $args->{'config'}{'sources'};
my $sets = $args->{'config'}{'sets'};
my $set_name = $args->{'config'}{'set_name'};
my $var_db = $args->{'var_db'} || 'variation';
my $slice = $args->{'slice'};
my $slice_length = $args->{'slice_length'} || 0;
my $vdb = $adaptors->variation_db_adaptor($var_db,$species);
return [] unless $vdb;
my $orig_failed_flag = $vdb->include_failed_variations;
$vdb->include_failed_variations(0);
# dont calculate consequences over a certain slice length
my $no_cons = $slice_length > 1e5 ? 1 : 0;
my $snps;
# different retrieval method for somatic mutations
if ($id =~ /somatic/) {
my @somatic_mutations;
if ($filter) {
@somatic_mutations = @{$slice->get_all_somatic_VariationFeatures_with_phenotype(undef, undef, $filter, $var_db) || []};
} elsif ($source) {
@somatic_mutations = @{$slice->get_all_somatic_VariationFeatures_by_source($source, undef, $var_db) || []};
} else {
@somatic_mutations = @{$slice->get_all_somatic_VariationFeatures(undef, undef, undef, $var_db) || []};
}
$snps = \@somatic_mutations;
} else { # get standard variations
$sources = { map { $_ => 1 } @$sources } if $sources;
$sets = { map { $_ => 1 } @$sets } if $sets;
my %ct = map { $_->SO_term => $_->rank } values %Bio::EnsEMBL::Variation::Utils::Constants::OVERLAP_CONSEQUENCES;
my @vari_features;
if ($id =~ /set/) {
my $short_name = ($args->{'config'}{'sets'})->[0];
my $track_set = $set_name;
my $set_object = $vdb->get_VariationSetAdaptor->fetch_by_short_name($short_name);
return [] unless $set_object;
# Enable the display of failed variations in order to display the failed variation track
$vdb->include_failed_variations(1) if $track_set =~ /failed/i;
@vari_features = @{$vdb->get_VariationFeatureAdaptor->fetch_all_by_Slice_VariationSet($slice, $set_object) || []};
# Reset the flag for displaying of failed variations to its original state
$vdb->include_failed_variations($orig_failed_flag);
} elsif ($id =~ /^variation_vcf/) {
my $vca = $vdb->get_VCFCollectionAdaptor;
my $vcf_id = $id;
$vcf_id =~ s/^variation_vcf_//;
if(my $vc = $vca->fetch_by_id($vcf_id)) {
@vari_features = @{$vc->get_all_VariationFeatures_by_Slice($slice, $no_cons)};
}
} else {
my @temp_variations = @{$vdb->get_VariationFeatureAdaptor->fetch_all_by_Slice_constraint($slice, undef, $no_cons) || []};
## Add a filtering step here
# Make "most functional" snps appear first; filter by source/set
@vari_features =
map { $_->[1] }
sort { $a->[0] <=> $b->[0] }
map { [ $ct{$_->display_consequence} * 1e9 + $_->start, $_ ] }
grep { $sources ? $self->check_source($_, $sources) : 1 }
grep { $sets ? $self->check_set($_, $sets) : 1 }
@temp_variations;
}
$vdb->include_failed_variations($orig_failed_flag);
$snps = \@vari_features;
}
#warn ">>> FOUND ".scalar @$snps." SNPs";
return $snps||[];
}
sub get {
my ($self,$args) = @_;
my $slice = $args->{'slice'};
my $slice_length = $slice->length;
my $features_list = $self->fetch_features($args);
return [map { $self->_plainify($_,$args) } @$features_list];
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Query/GlyphSet/Variation.pm | Perl | apache-2.0 | 8,015 |
package VMOMI::ArrayOfHostPciPassthruConfig;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['HostPciPassthruConfig', 'HostPciPassthruConfig', 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfHostPciPassthruConfig.pm | Perl | apache-2.0 | 441 |
#!/usr/bin/perl -w
# Copyright © 2006-2013 Jamie Zawinski <jwz@jwz.org>
#
# Permission to use, copy, modify, distribute, and sell this software and its
# documentation for any purpose is hereby granted without fee, provided that
# the above copyright notice appear in all copies and that both that
# copyright notice and this permission notice appear in supporting
# documentation. No representations are made about the suitability of this
# software for any purpose. It is provided "as is" without express or
# implied warranty.
#
# Converts and installs a thumbnail image inside a .saver bundle.
#
# Created: 26-Jul-2012.
require 5;
#use diagnostics; # Fails on some MacOS 10.5 systems
use strict;
my $progname = $0; $progname =~ s@.*/@@g;
my $version = q{ $Revision: 1.3 $ }; $version =~ s/^[^0-9]+([0-9.]+).*$/$1/;
my $verbose = 1;
$ENV{PATH} = "/opt/local/bin:$ENV{PATH}"; # MacPorts, for ImageMagick
sub safe_system(@) {
my @cmd = @_;
system (@cmd);
my $exit_value = $? >> 8;
my $signal_num = $? & 127;
my $dumped_core = $? & 128;
error ("$cmd[0]: core dumped!") if ($dumped_core);
error ("$cmd[0]: signal $signal_num!") if ($signal_num);
error ("$cmd[0]: exited with $exit_value!") if ($exit_value);
}
# Returns true if the two files differ (by running "cmp")
#
sub cmp_files($$) {
my ($file1, $file2) = @_;
my @cmd = ("cmp", "-s", "$file1", "$file2");
print STDERR "$progname: executing \"" . join(" ", @cmd) . "\"\n"
if ($verbose > 3);
system (@cmd);
my $exit_value = $? >> 8;
my $signal_num = $? & 127;
my $dumped_core = $? & 128;
error ("$cmd[0]: core dumped!") if ($dumped_core);
error ("$cmd[0]: signal $signal_num!") if ($signal_num);
return $exit_value;
}
sub update($$) {
my ($src_dir, $app_dir) = @_;
# Apparently Apple wants Resources/{thumbnail.png to be 90x58,
# and Resources/thumbnail@2x.png to be 180x116. Let's just
# make the former, but make it be the latter's size.
#
my $size = '180x116';
error ("$app_dir does not exist") unless (-d $app_dir);
error ("$app_dir: no name")
unless ($app_dir =~ m@/([^/.]+).(saver|app)/?$@x);
my $app_name = $1;
$app_dir =~ s@/+$@@s;
$app_dir .= "/Contents/Resources";
error ("$app_dir does not exist") unless (-d $app_dir);
my $target = "$app_dir/thumbnail.png";
$src_dir .= "/" unless ($src_dir =~ m@/$@s);
my $src_dir2 = "${src_dir}retired/";
$app_name =~ s/rdbomb/rd-bomb/si; # sigh
my $img = $src_dir . lc($app_name) . ".jpg";
my $img2 = $src_dir2 . lc($app_name) . ".jpg";
$img = $img2 if (! -f $img && -f $img2);
error ("$img does not exist") unless (-f $img);
my $tmp = sprintf ("%s/thumb-%08x.png",
($ENV{TMPDIR} ? $ENV{TMPDIR} : "/tmp"),
rand(0xFFFFFFFF));
my @cmd = ("convert",
$img,
"-resize", $size . "^",
"-gravity", "center",
"-extent", $size,
"-quality", "95", # saves 8%
"+dither", "-colors", "128", # Saves an additional 61%
$tmp);
print STDERR "$progname: exec: " . join(' ', @cmd) . "\n"
if ($verbose > 2);
safe_system (@cmd);
if (! -s $tmp) {
unlink $tmp;
error ("failed: " . join(" ", @cmd));
}
# This only saves 0.4% on top of the above.
# @cmd = ("optipng", "-quiet", "-o7", $tmp);
# print STDERR "$progname: exec: " . join(' ', @cmd) . "\n"
# if ($verbose > 2);
# safe_system (@cmd);
if (! -s $tmp) {
unlink $tmp;
error ("failed: " . join(" ", @cmd));
}
if (! cmp_files ($tmp, $target)) {
unlink $tmp;
print STDERR "$progname: $target: unchanged\n" if ($verbose > 1);
} elsif (! rename ($tmp, $target)) {
unlink $tmp;
error ("mv $tmp $target: $!");
} else {
print STDERR "$progname: wrote $target\n" if ($verbose);
}
}
sub error($) {
my ($err) = @_;
print STDERR "$progname: $err\n";
exit 1;
}
sub usage() {
print STDERR "usage: $progname [--verbose] image-dir program.app ...\n";
exit 1;
}
sub main() {
my $src_dir;
my @files = ();
while ($_ = $ARGV[0]) {
shift @ARGV;
if (m/^--?verbose$/s) { $verbose++; }
elsif (m/^-v+$/) { $verbose += length($_)-1; }
elsif (m/^--?q(uiet)?$/s) { $verbose = 0; }
elsif (m/^-/s) { usage(); }
elsif (! $src_dir) { $src_dir = $_; }
else { push @files, $_; }
}
usage() unless ($src_dir && $#files >= 0);
foreach (@files) {
update ($src_dir, $_);
}
}
main();
exit 0;
| AstralDynamics/photo-tagger | xscreensaver-5.29/OSX/update-thumbnail.pl | Perl | bsd-3-clause | 4,541 |
#-----------------------------------------------------------------
# SADI::Service::Instance
# Author: Edward Kawas <edward.kawas@gmail.com>,
#
# For copyright and disclaimer see below.
#
# $Id: Instance.pm,v 1.6 2010-03-23 16:30:16 ubuntu Exp $
#-----------------------------------------------------------------
package SADI::Service::Instance;
use SADI::Base;
use base ("SADI::Base");
use strict;
# add versioning to this module
use vars qw /$VERSION/;
$VERSION = sprintf "%d.%02d", q$Revision: 1.7 $ =~ /: (\d+)\.(\d+)/;
=head1 NAME
SADI::Service::Instance - A module that describes a SADI web service.
=head1 SYNOPSIS
use SADI::Service::Instance;
# create a new blank SADI service instance object
my $data = SADI::Service::Instance->new ();
# create a new primed SADI service instance object
$data = SADI::Service::Instance->new (
ServiceName => "helloworld",
ServiceType => "http://someontology.org/services/sometype",
InputClass => "http://someontology.org/datatypes#Input1",
OutputClass => "http://someontology.org/datatypes#Output1",
Description => "the usual hello world service",
UniqueIdentifier => "urn:lsid:myservices:helloworld",
Authority => "helloworld.com",
Authoritative => 1,
Provider => 'myaddress@organization.org',
ServiceURI => "http://helloworld.com/cgi-bin/helloworld.pl",
URL => "http://helloworld.com/cgi-bin/helloworld.pl",
SignatureURL =>"http://foo.bar/myServiceDescription",
);
# get the service name
my $name = $data->ServiceName;
# set the service name
$data->ServiceName($name);
# get the service type
my $type = $data->ServiceType;
# set the service type
$data->ServiceType($type);
# get the input class URI
my $input_class = $data->InputClass;
# set the input class URI
$data->InputClass($input_class);
# get the output class URI
my $output_class = $data->OutputClass;
# set the output class URI
$data->OutputClass($input_class);
# get the description
my $desc = $data->Description;
# set the description
$data->Description($desc);
# get the unique id
my $id = $data->UniqueIdentifier;
# set the unique id
$data->UniqueIdentifier($id);
# get the authority
my $auth = $data->Authority;
# set the authority
$data->Authority($auth);
# get the service provider URI
my $uri = $data->Provider;
# set the service provider URI
$data->Provider($uri);
# get the service URI
my $uri = $data->ServiceURI;
# set the service URI
$data->ServiceURI($uri);
# get the service URL
my $url = $data->URL;
# set the service URL
$data->URL($url);
# get the signature url
my $sig = $data->SignatureURL;
# set the signature url
$data->SignatureURL($sig);
=head1 DESCRIPTION
An object representing a SADI service signature.
=head1 AUTHORS
Edward Kawas (edward.kawas [at] gmail [dot] com)
=cut
#-----------------------------------------------------------------
# A list of allowed attribute names. See SADI::Base for details.
#-----------------------------------------------------------------
=head1 ACCESSIBLE ATTRIBUTES
Details are in L<SADI::Base>. Here just a list of them (additionally
to the attributes from the parent classes)
=over
=item B<ServiceName>
A name for the service.
=item B<ServiceType>
Our SADI service type.
=item B<InputClass>
The URI to the input class for our SADI service.
=item B<OutputClass>
The URI to the output class for our SADI service.
=item B<Description>
A description for our SADI service.
=item B<UniqueIdentifier>
A unique identifier (like an LSID, etc) for our SADI service.
=item B<Authority>
The service provider URI for our SADI service.
=item B<ServiceURI>
The service URI for our SADI service.
=item B<URL>
The URL to our SADI service.
=item B<Provider>
The email address of the service provider.
B<Note: This method throws an exception if the address is syntactically invalid!>.
=item B<Authoritative>
Whether or not the provider of the SADI service is an authority over the data.
This value must be a boolean value. True values match =~ /true|\+|1|yes|ano/.
All other values are false.
Defaults to 1;
=item B<Format>
The format of the service. More than likely, it will be 'sadi' if it is a SADI web service.
=item B<SignatureURL>
A url to the SADI service signature.
=back
=cut
{
my %_allowed = (
ServiceName => { type => SADI::Base->STRING },
ServiceType => { type => SADI::Base->STRING },
InputClass => { type => SADI::Base->STRING },
OutputClass => { type => SADI::Base->STRING },
Description => { type => SADI::Base->STRING },
UniqueIdentifier => { type => SADI::Base->STRING },
Authority => {
type => SADI::Base->STRING,
post => sub {
my $i = shift;
my $domain = $i->Authority;
if ($domain =~ /[\@\&\%\#\(\)\=]/gi) {
$i->{Authority} = "";
$i->throw(
"Invalid authority specified! '$domain' contains invalid characters ."
);
}
unless ($domain =~ /.+\.+.+/gi) {
$i->{Authority} = "";
$i->throw(
"Invalid authority specified! '$domain' must take the form NNN.NNN.NNN." );
}
my $uri = new URI($domain);
$i->{Authority} = $uri->authority if defined $uri->authority;
}
},
Provider => {
type => SADI::Base->STRING,
post => sub {
my $i = shift;
my ( $name, $domain ) = $i->Provider =~ /^(.*)@(.*)$/;
$i->throw( "Invalid email address specified! '"
. $i->Provider
. "' is not a valid address" )
unless $name and $domain;
$i->throw(
"Invalid email address specified! Invalid characters found in username."
) if $name =~ /[\@\&\%\#\(\)\=]/gi;
$i->throw(
"Invalid email address specified! Invalid characters found in domain."
) if $domain =~ /[\@\&\%\#\(\)\=]/gi;
$i->throw(
"Invalid email address specified! Please check the domain of the address."
) unless $domain =~ /.+\.+.+/gi;
}
},
ServiceURI => { type => SADI::Base->STRING },
URL => {
type => SADI::Base->STRING,
post => sub {
my $i = shift;
# set the signature url to be the URL address unless defined
$i->SignatureURL( $i->URL ) unless $i->SignatureURL;
# set the service uri to be the URL address unless defined
$i->ServiceURI( $i->URL ) unless $i->ServiceURI;
# set the unique id to be the URL address unless defined
$i->UniqueIdentifier( $i->URL ) unless $i->UniqueIdentifier;
}
},
Authoritative => { type => SADI::Base->BOOLEAN },
Format => { type => SADI::Base->STRING },
SignatureURL => { type => SADI::Base->STRING },
UnitTest => { type => 'SADI::Service::UnitTest', is_array => 1 },
);
sub _accessible {
my ( $self, $attr ) = @_;
exists $_allowed{$attr} or $self->SUPER::_accessible($attr);
}
sub _attr_prop {
my ( $self, $attr_name, $prop_name ) = @_;
my $attr = $_allowed{$attr_name};
return ref($attr) ? $attr->{$prop_name} : $attr if $attr;
return $self->SUPER::_attr_prop( $attr_name, $prop_name );
}
}
#-----------------------------------------------------------------
# init
#-----------------------------------------------------------------
sub init {
my ($self) = shift;
$self->SUPER::init();
# set the default format for this signature
$self->Format('sadi');
$self->Authoritative(1);
}
1;
__END__
| gitpan/SADI | lib/SADI/Service/Instance.pm | Perl | bsd-3-clause | 7,705 |
/*************************************************************************
name: update.pl
version:
description: uses ADL = algorithm definition language
author: Peter Bohlin, Staffan Larsson
*************************************************************************/
/*========================================================================
Module Declaration
========================================================================*/
:- module(select,[select/0]).
:- use_module(trindikit(tkit_tis_access)).
:- use_module(trindikit(tkit_operators)).
/*========================================================================
Load rules
========================================================================*/
:- ensure_loaded(library(selection_rules)).
/*========================================================================
Load the ADL interpreter
========================================================================*/
:- ensure_loaded(trindikit(tkit_dme_adl)).
/*========================================================================
The selection algorithm
========================================================================*/
selection_algorithm( [ if empty($/private/agenda) then (try select_action),
try select_move ] ).
/*========================================================================
Main predicate
========================================================================*/
select :-
selection_algorithm( Algorithm ),
adl_exec( Algorithm ).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/SIRIDUS/UGOT-D31/trindikit4-0.1alpha/examples/godis-basic/modules/select.pl | Perl | mit | 1,553 |
#!/usr/bin/perl
use strict;
use warnings;
use CGI qw(:cgi);
use Socket;
use URI::Escape;
my $port = 2763;
my $logdir = "/var/log/attempto/ws/acerules";
my $input = "";
read(STDIN, $input, $ENV{'CONTENT_LENGTH'});
$input =~ s/\n\s*/\n/g;
# 'input' must be defined
print_error_and_exit("undefined input") if ($input eq "");
my $host = 'localhost';
my $proto = getprotobyname('tcp');
my $iaddr = inet_aton($host);
my $paddr = sockaddr_in($port, $iaddr);
# Create the socket, connect to the port
socket(SOCKET, PF_INET, SOCK_STREAM, $proto) || print_error_and_exit("fail socket init");
# Attempt to connect to the socket
my $result_connect = connect(SOCKET, $paddr);
if(!$result_connect) {
print_error_and_exit("fail socket connect");
}
# Set $| to non-zero to make selection autoflushed
my $oldfh = select(SOCKET);
$| = 1;
print SOCKET "$input\n\n";
my $results = "";
# BUG: Read the whole socket at once, not line by line.
while(<SOCKET>) {
last if /^\.$/;
$results = $results . "$_";
}
print SOCKET "quit.\n\n";
close SOCKET || print_error_and_exit("fail socket close");
select($oldfh);
print STDOUT <<EOF;
Content-type: text/xml
$results
EOF
# content-type should be "application/soap+xml" but IE6 does not parse it
add_log_entry($input, $results);
exit;
sub print_error_and_exit
{
my $error_message = shift;
print <<EOF;
Content-type: text/xml
<?xml version="1.0" encoding="UTF-8"?>
<env:Envelope xmlns:env="http://schemas.xmlsoap.org/soap/envelope/">
<env:Body>
<env:Fault>
<env:faultcode>env:Sender</env:faultcode>
<env:faultstring>$error_message</env:faultstring>
</env:Fault>
</env:Body>
</env:Envelope>
EOF
# content-type should be "application/soap+xml" but IE6 does not parse it
exit;
}
sub add_log_entry
{
my $input = shift;
my $output = shift;
$input =~ s/\n+/\n/g;
$output =~ s/\n+/\n/g;
$input =~ s/\n$//;
$output =~ s/\n$//;
my $timestamp = localtime;
# Try to get at least one of those, REMOTE_HOST would be better...
my $remote_host = "NO_HOST";
$remote_host = $ENV{'REMOTE_ADDR'} if defined $ENV{'REMOTE_ADDR'};
$remote_host = $ENV{'REMOTE_HOST'} if defined $ENV{'REMOTE_HOST'};
my $http_user_agent = "NO_AGENT";
$http_user_agent = $ENV{'HTTP_USER_AGENT'} if defined $ENV{'HTTP_USER_AGENT'};
my $log_entry = "[ " .
$timestamp . " | " .
$remote_host . " | " .
$http_user_agent . " ]\n\n" .
$input . "\n\n" .
$output . "\n\n";
open(LOGFILE, ">> $logdir/acerulesws.log");
print LOGFILE $log_entry;
close LOGFILE;
}
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/AceRules/engine/webservice/acerulesws.perl | Perl | mit | 2,504 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::DensityFeature - A feature representing a density, or
precentage coverage etc. in a given region.
=head1 SYNOPSIS
use Bio::EnsEMBL::DensityFeature;
$feature = Bio::EnsEMBL::DensityFeature->new(
-seq_region => $region,
-start => 1,
-end => 1e6,
-density_type => $dt,
-density_value => 98.5
);
=head1 DESCRIPTION
A density feature represents a count, density, or percentage coverage,
etc. for a given region.
This module is part of the Ensembl project http://www.ensembl.org
=head1 METHODS
=cut
use strict;
use warnings;
package Bio::EnsEMBL::DensityFeature;
use Bio::EnsEMBL::Feature;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use Bio::EnsEMBL::Utils::Exception qw(throw);
use Bio::EnsEMBL::DensityType;
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Feature);
=head2 new
Arg [SEQ_REGION] : the sequence over which the density was calculated.
Arg [START] : start point on the seq at which density was calulated.
Arg [END] : end point on the seq at which density was calulated.
Arg [DENSITY_TYPE] : the type of density calculated.
Arg [DENSITY_VALUE] : the density.
Arg [...] : Named arguments passed to superclass
Example : $feature = Bio::EnsEMBL::DensityFeature->new
(-seq_region => $region,
-start => 1,
-end => 1e6,
-density_type => $dt,
-density_value => 98.5)
Description: Creates a new density feature.
Returntype : Bio::EnsEMBL::DensityFeature
Exceptions : throw if invalid density value type is provided
Caller : general
Status : Stable
=cut
sub new {
my $caller = shift;
#allow constructor to be called as class or object method
my $class = ref($caller) || $caller;
my $self = $class->SUPER::new(@_);
my($seq_region, $start, $end, $dt, $dv) =
rearrange(['SEQ_REGION', 'START', 'END', 'DENSITY_TYPE', 'DENSITY_VALUE'],
@_);
throw("Density value must be >= 0.") if($dv < 0);
if(!defined($dt)){
throw("Density Type is NOT optional.");
}
$self->{'density_type'} = $dt;
$self->{'density_value'} = $dv;
$self->{'slice'} = $seq_region;
$self->{'start'} = $start;
$self->{'end'} = $end;
return $self;
}
=head2 strand
Arg [1] : none
Example : $strand = $df->strand();
Description: Getter fot the strand attribute. Density features always have
strand 0 and this attribute is not settable.
Returntype : int (always 0)
Exceptions : warning if an attempt is made to set the strand
Caller : general
Status : Stable
=cut
sub strand {
my $self = shift;
warning("DensityFeature strand is not settable") if(@_);
return 0;
}
=head2 density_value
Arg [1] : (optional) float $density_value
Example : $dv = $density_feature->density_value();
Description: Getter/Setter for the density value of this DensityFeature.
The density value may be a count, a percentage, or a coverage
of a feature type in the area defined by this feature.
Returntype : float
Exceptions : throw if a negative density value is provided
Caller : general
Status : Stable
=cut
sub density_value {
my $self = shift;
if(@_) {
my $density_value = shift;
throw("Density value must be >= 0.") if($density_value < 0);
$self->{'density_value'} = $density_value;
}
return $self->{'density_value'};
}
=head2 analysis
Arg [1] : (optional) Bio::EnsEMBL::Analysis $analysis
New value for the analysis of the attached DensityType
Example : print $df->analysis()->logic_name();
Description: Overridden superclass analysis method, to chain to analysis
method on attached DensityType.
Returntype : Bio::EnsEMBL::Analysis
Exceptions : none
Caller : general
Status : Stable
=cut
sub analysis {
my $self = shift;
my $dt = $self->density_type();
return undef if(!$dt);
return $dt->analysis(@_);
}
=head2 density_type
Arg [1] : string $newval (optional)
The new value to set the density_value_type attribute to
Example : $density_value_type = $obj->density_value_type()
Description: Getter/Setter for the density_value_type attribute
Returntype : Bio::EnsEMBL::DensityType
Exceptions : if object passed is not of type DensityType
Caller : general
Status : Stable
=cut
sub density_type{
my $self = shift;
if(@_) {
my $type = shift;
if( !ref $type || !$type->isa("Bio::EnsEMBL::DensityType")){
throw("object passed must be an ensembl DensityType ".
"not a [".ref($type)."]");
}
else{
$self->{'density_type'}=$type;
}
}
return $self->{'density_type'};
}
###BG########
=head2 scaledvalue
Title : scaledvalue
Usage : $obj->scaledvalue($newval)
Function:
Returns : scalar - object's scaled value
Args : newvalue (optional)
Status : Stable
=cut
sub scaledvalue{
my $obj = shift;
if( @_ ) {
my $scaledvalue = shift;
$obj->{'scaledvalue'} = $scaledvalue;
}
return $obj->{'scaledvalue'};
}
=head2 url
Title : url
Usage : $obj->url($newval)
Function:
Returns : String containing this object's url
Args : newvalue (optional)
Status : Stable
=cut
sub url{
my $obj = shift;
if( @_ ) {
my $url = shift;
$obj->{'url'} = $url;
}
return $obj->{'url'};
}
1;
| mjg17/ensembl | modules/Bio/EnsEMBL/DensityFeature.pm | Perl | apache-2.0 | 6,408 |
#!/opt/lampp/bin/perl
# Copyright (C) 2002/2003 Kai Seidler, oswald@apachefriends.org
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
use CGI;
$form=new CGI;
$f_name=CGI::escapeHTML($form->param("f_name"));
$f_email=CGI::escapeHTML($form->param("f_email"));
$f_text=CGI::escapeHTML($form->param("f_text"));
print "Content-Type: text/html\n\n";
if($f_name)
{
open (FILE, ">>guestbook.dat") or die ("kon bestand gastenboek niet openen");
print FILE localtime()."\n";
print FILE "$f_name\n";
print FILE "$f_email\n";
print FILE "$f_text\n";
print FILE "·\n";
close(FILE);
}
print '<html>';
print '<head>';
print '<meta name="auteur" content="Kai Oswald Seidler">';
print '<link href="xampp.css" rel="stylesheet" type="text/css">';
print '</head>';
print '<body>';
print ' <p>';
print "<h1>Gastenboek (Voorbeeld voor Perl)</h1>";
print "Een klassiek en simpel gastenboek!";
open (FILE, "<guestbook.dat") or die ("kon bestand gastenboek niet openen");
while(!eof(FILE)){
chomp($date=<FILE>);
chomp($name=<FILE>);
chomp($email=<FILE>);
print "<p class=small>$date";
print "<table border=0 cellpadding=4 cellspacing=1>";
print "<tr><td class=h>";
print "<img src=img/blank.gif width=250 height=1><br>";
print "Naam: $name";
print "</td><td class=h>";
print "<img src=img/blank.gif width=250 height=1><br>";
print "E-Mail: $email";
print "</td></tr>";
print "<tr><td class=d colspan=2>";
while(1==1){
chomp($line=<FILE>);
if($line eq '·') {
last;
}
print "$line<br>";
}
print "</td></tr>";
print "</table>";
}
close (FILE);
print "<p>Voeg entry toe:";
print "<form action=guestbook-en.pl method=get>";
print "<table border=0 cellpadding=0 cellspacing=0>";
print "<tr><td>Naam:</td><td><input type=text size=30 name=f_name></td></tr>";
print "<tr><td>E-Mail:</td><td> <input type=text size=30 name=f_email></td></tr>";
print "<tr><td>Tekst:</td><td> <textarea type=text rows=3 cols=30 name=f_text></textarea></td></tr>";
print "<tr><td></td><td><input type=submit border=0 value=\"SCHRIJF\"></td></tr>";
print "</table>";
print "</form>";
print "</body>";
print "</html>";
| dumaworks/dumarepo | xampp/guestbook-nl.pl | Perl | apache-2.0 | 2,796 |
package Google::Ads::AdWords::v201406::AdGroupFeedPage;
use strict;
use warnings;
__PACKAGE__->_set_element_form_qualified(1);
sub get_xmlns { 'https://adwords.google.com/api/adwords/cm/v201406' };
our $XML_ATTRIBUTE_CLASS;
undef $XML_ATTRIBUTE_CLASS;
sub __get_attr_class {
return $XML_ATTRIBUTE_CLASS;
}
use base qw(Google::Ads::AdWords::v201406::NullStatsPage);
# Variety: sequence
use Class::Std::Fast::Storable constructor => 'none';
use base qw(Google::Ads::SOAP::Typelib::ComplexType);
{ # BLOCK to scope variables
my %totalNumEntries_of :ATTR(:get<totalNumEntries>);
my %Page__Type_of :ATTR(:get<Page__Type>);
my %entries_of :ATTR(:get<entries>);
__PACKAGE__->_factory(
[ qw( totalNumEntries
Page__Type
entries
) ],
{
'totalNumEntries' => \%totalNumEntries_of,
'Page__Type' => \%Page__Type_of,
'entries' => \%entries_of,
},
{
'totalNumEntries' => 'SOAP::WSDL::XSD::Typelib::Builtin::int',
'Page__Type' => 'SOAP::WSDL::XSD::Typelib::Builtin::string',
'entries' => 'Google::Ads::AdWords::v201406::AdGroupFeed',
},
{
'totalNumEntries' => 'totalNumEntries',
'Page__Type' => 'Page.Type',
'entries' => 'entries',
}
);
} # end BLOCK
1;
=pod
=head1 NAME
Google::Ads::AdWords::v201406::AdGroupFeedPage
=head1 DESCRIPTION
Perl data type class for the XML Schema defined complexType
AdGroupFeedPage from the namespace https://adwords.google.com/api/adwords/cm/v201406.
The result of a call to AdGroupFeedService#get. Contains a list of associations between ad groups and feeds.
=head2 PROPERTIES
The following properties may be accessed using get_PROPERTY / set_PROPERTY
methods:
=over
=item * entries
=back
=head1 METHODS
=head2 new
Constructor. The following data structure may be passed to new():
=head1 AUTHOR
Generated by SOAP::WSDL
=cut
| gitpan/GOOGLE-ADWORDS-PERL-CLIENT | lib/Google/Ads/AdWords/v201406/AdGroupFeedPage.pm | Perl | apache-2.0 | 1,923 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Lucy::Build::Binding::Plan;
use strict;
use warnings;
our $VERSION = '0.005000';
$VERSION = eval $VERSION;
sub bind_all {
my $class = shift;
$class->bind_architecture;
$class->bind_blobtype;
$class->bind_fieldtype;
$class->bind_float32type;
$class->bind_float64type;
$class->bind_fulltexttype;
$class->bind_int32type;
$class->bind_int64type;
$class->bind_schema;
$class->bind_stringtype;
}
sub bind_architecture {
my $pod_spec = Clownfish::CFC::Binding::Perl::Pod->new;
my $synopsis = <<'END_SYNOPSIS';
package MyArchitecture;
use base qw( Lucy::Plan::Architecture );
use LucyX::Index::ZlibDocWriter;
use LucyX::Index::ZlibDocReader;
sub register_doc_writer {
my ( $self, $seg_writer ) = @_;
my $doc_writer = LucyX::Index::ZlibDocWriter->new(
snapshot => $seg_writer->get_snapshot,
segment => $seg_writer->get_segment,
polyreader => $seg_writer->get_polyreader,
);
$seg_writer->register(
api => "Lucy::Index::DocReader",
component => $doc_writer,
);
$seg_writer->add_writer($doc_writer);
}
sub register_doc_reader {
my ( $self, $seg_reader ) = @_;
my $doc_reader = LucyX::Index::ZlibDocReader->new(
schema => $seg_reader->get_schema,
folder => $seg_reader->get_folder,
segments => $seg_reader->get_segments,
seg_tick => $seg_reader->get_seg_tick,
snapshot => $seg_reader->get_snapshot,
);
$seg_reader->register(
api => 'Lucy::Index::DocReader',
component => $doc_reader,
);
}
package MySchema;
use base qw( Lucy::Plan::Schema );
sub architecture {
shift;
return MyArchitecture->new(@_);
}
END_SYNOPSIS
my $constructor = <<'END_CONSTRUCTOR';
my $arch = Lucy::Plan::Architecture->new;
END_CONSTRUCTOR
$pod_spec->set_synopsis($synopsis);
$pod_spec->add_constructor( alias => 'new', sample => $constructor, );
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::Architecture",
);
$binding->set_pod_spec($pod_spec);
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_blobtype {
my $pod_spec = Clownfish::CFC::Binding::Perl::Pod->new;
my $synopsis = <<'END_SYNOPSIS';
my $string_type = Lucy::Plan::StringType->new;
my $blob_type = Lucy::Plan::BlobType->new( stored => 1 );
my $schema = Lucy::Plan::Schema->new;
$schema->spec_field( name => 'id', type => $string_type );
$schema->spec_field( name => 'jpeg', type => $blob_type );
END_SYNOPSIS
my $constructor = <<'END_CONSTRUCTOR';
my $blob_type = Lucy::Plan::BlobType->new(
stored => 1, # default: false
);
END_CONSTRUCTOR
$pod_spec->set_synopsis($synopsis);
$pod_spec->add_constructor( alias => 'new', sample => $constructor, );
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::BlobType",
);
$binding->set_pod_spec($pod_spec);
$binding->add_class_alias('KinoSearch::Plan::BlobType');
$binding->add_class_alias('KinoSearch::FieldType::BlobType');
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_fieldtype {
my $pod_spec = Clownfish::CFC::Binding::Perl::Pod->new;
my $synopsis = <<'END_SYNOPSIS';
my @sortable;
for my $field ( @{ $schema->all_fields } ) {
my $type = $schema->fetch_type($field);
next unless $type->sortable;
push @sortable, $field;
}
END_SYNOPSIS
$pod_spec->set_synopsis($synopsis);
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::FieldType",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
$binding->set_pod_spec($pod_spec);
$binding->add_class_alias("KinoSearch::Plan::FieldType");
$binding->add_class_alias("KinoSearch::FieldType");
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_float32type {
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::Float32Type",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_float64type {
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::Float64Type",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_fulltexttype {
my $pod_spec = Clownfish::CFC::Binding::Perl::Pod->new;
my $synopsis = <<'END_SYNOPSIS';
my $easyanalyzer = Lucy::Analysis::EasyAnalyzer->new(
language => 'en',
);
my $type = Lucy::Plan::FullTextType->new(
analyzer => $easyanalyzer,
);
my $schema = Lucy::Plan::Schema->new;
$schema->spec_field( name => 'title', type => $type );
$schema->spec_field( name => 'content', type => $type );
END_SYNOPSIS
my $constructor = <<'END_CONSTRUCTOR';
my $type = Lucy::Plan::FullTextType->new(
analyzer => $analyzer, # required
boost => 2.0, # default: 1.0
indexed => 1, # default: true
stored => 1, # default: true
sortable => 1, # default: false
highlightable => 1, # default: false
);
END_CONSTRUCTOR
$pod_spec->set_synopsis($synopsis);
$pod_spec->add_constructor(
alias => 'new',
pod_func => 'init2',
sample => $constructor,
);
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::FullTextType",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
$binding->set_pod_spec($pod_spec);
$binding->add_class_alias('KinoSearch::Plan::FullTextType');
$binding->add_class_alias('KinoSearch::FieldType::FullTextType');
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_int32type {
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::Int32Type",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_int64type {
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::Int64Type",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_schema {
my $pod_spec = Clownfish::CFC::Binding::Perl::Pod->new;
my $synopsis = <<'END_SYNOPSIS';
use Lucy::Plan::Schema;
use Lucy::Plan::FullTextType;
use Lucy::Analysis::EasyAnalyzer;
my $schema = Lucy::Plan::Schema->new;
my $easyanalyzer = Lucy::Analysis::EasyAnalyzer->new(
language => 'en',
);
my $type = Lucy::Plan::FullTextType->new(
analyzer => $easyanalyzer,
);
$schema->spec_field( name => 'title', type => $type );
$schema->spec_field( name => 'content', type => $type );
END_SYNOPSIS
my $constructor = <<'END_CONSTRUCTOR';
my $schema = Lucy::Plan::Schema->new;
END_CONSTRUCTOR
$pod_spec->set_synopsis($synopsis);
$pod_spec->add_constructor( alias => 'new', sample => $constructor, );
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::Schema",
);
$binding->set_pod_spec($pod_spec);
$binding->add_class_alias("KinoSearch::Plan::Schema");
$binding->add_class_alias("KinoSearch::Schema");
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
sub bind_stringtype {
my $pod_spec = Clownfish::CFC::Binding::Perl::Pod->new;
my $synopsis = <<'END_SYNOPSIS';
my $type = Lucy::Plan::StringType->new;
my $schema = Lucy::Plan::Schema->new;
$schema->spec_field( name => 'category', type => $type );
END_SYNOPSIS
my $constructor = <<'END_CONSTRUCTOR';
my $type = Lucy::Plan::StringType->new(
boost => 0.1, # default: 1.0
indexed => 1, # default: true
stored => 1, # default: true
sortable => 1, # default: false
);
END_CONSTRUCTOR
$pod_spec->set_synopsis($synopsis);
$pod_spec->add_constructor(
alias => 'new',
pod_func => 'init2',
sample => $constructor,
);
my $binding = Clownfish::CFC::Binding::Perl::Class->new(
parcel => "Lucy",
class_name => "Lucy::Plan::StringType",
);
$binding->bind_constructor( alias => 'new', initializer => 'init2' );
$binding->set_pod_spec($pod_spec);
$binding->add_class_alias('KinoSearch::Plan::StringType');
$binding->add_class_alias('KinoSearch::FieldType::StringType');
Clownfish::CFC::Binding::Perl::Class->register($binding);
}
1;
| rectang/lucy | perl/buildlib/Lucy/Build/Binding/Plan.pm | Perl | apache-2.0 | 10,152 |
package Paws::CodeStar::DeleteProjectResult;
use Moose;
has ProjectArn => (is => 'ro', isa => 'Str', traits => ['NameInRequest'], request_name => 'projectArn' );
has StackId => (is => 'ro', isa => 'Str', traits => ['NameInRequest'], request_name => 'stackId' );
has _request_id => (is => 'ro', isa => 'Str');
### main pod documentation begin ###
=head1 NAME
Paws::CodeStar::DeleteProjectResult
=head1 ATTRIBUTES
=head2 ProjectArn => Str
The Amazon Resource Name (ARN) of the deleted project.
=head2 StackId => Str
The ID of the primary stack in AWS CloudFormation that will be deleted
as part of deleting the project and its resources.
=head2 _request_id => Str
=cut
1; | ioanrogers/aws-sdk-perl | auto-lib/Paws/CodeStar/DeleteProjectResult.pm | Perl | apache-2.0 | 694 |
%
% local propagation evaluator
% for nonlinear expressions in clp(r,q)
%
:- use_module(engine(internals), [term_to_meta/2]).
arith_ground(X) :- % substitute for number(X)
type(X,Xt), normalize(Xt, X, _, _, H),
H = [].
arith_ground(X, Val) :-
type(X,Xt), normalize(Xt, X, K, I, H),
H = [],
arith_eval(K*I, Val).
:- meta_predicate delay_all(_,_,goal).
delay_all( yes, _, _).
delay_all( no, Vs, G) :- delay_all( Vs, G).
:- meta_predicate delay_all(_,goal).
delay_all( [], _).
delay_all( [V|Vs], G) :-
system_freeze( V, G),
delay_all( Vs, G).
% a = abs(b)
%
solve_abs( A, B) :-
solve_abs( _Mutex, A, B, no).
%
solve_abs( Mux, A, B, Retry) :- var(Mux), !,
( arith_ground( A, Av) ->
% arith_eval( A >= 0),
( arith_zero( Av) ->
Mux = solved,
solve_one( B, 0)
; arith_ground( B, Bv) -> % +a ?b
Mux = solved,
arith_zero( abs(Bv) - Av)
; % +a -b
delay_all( Retry, [B], solve_abs(Mux,A,B,yes))
)
; arith_ground( B, Bv) -> % -a +b
Mux = solved,
arith_eval( abs(Bv), Val), solve_one( A, Val)
; % -a -b
delay_all( Retry, [A,B], solve_abs(Mux,A,B,yes))
).
solve_abs( _, _, _, _).
normalize_abs( B, 1, Inhom, Hom) :-
type(B,Bt), normalize( Bt, B, Kb, Ib, Hb),
( Hb = [] -> % -a +b
arith_eval( abs(Kb*Ib), Inhom), Hom = []
; % -a -b
Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, Kb, Ib, Hb),
delay_all( [VA,VB], solve_abs(_Mux,VA,VB,yes))
).
% a = {min,max}(b,c)
%
solve_mix( MIX, A, B, C) :-
solve_mix( _Mutex, MIX, A, B, C, no).
%
solve_mix( Mux, MIX, A, B, C, Retry) :- var( Mux), !,
( arith_ground( A, Av) ->
( arith_ground( B, Bv) ->
( arith_ground( C, Cv) -> % +a +b +c
Mux = solved,
( MIX = min, arith_zero( Av-min(Bv,Cv))
; MIX = max, arith_zero( Av-max(Bv,Cv))
)
; % +a +b -c
delay_all( Retry, [C], solve_mix(Mux,MIX,A,B,C,yes))
)
; arith_ground( C, Cv) -> % +a -b +c
delay_all( Retry, [B], solve_mix(Mux,MIX,A,B,C,yes))
; % +a -b -c
delay_all( Retry, [B,C], solve_mix(Mux,MIX,A,B,C,yes))
)
; arith_ground( B, Bv) ->
( arith_ground( C, Cv) -> % -a +b +c
Mux = solved,
( MIX = min, arith_eval( min(Bv,Cv), Av)
; MIX = max, arith_eval( max(Bv,Cv), Av)
),
solve_one( A, Av)
; % -a +b -c
delay_all( Retry, [A,C], solve_mix(Mux,MIX,A,B,C,yes))
)
; arith_ground( C, Cv) -> % -a -b +c
delay_all( Retry, [A,B], solve_mix(Mux,MIX,A,B,C,yes))
; % -a -b -c
delay_all( Retry, [A,B,C], solve_mix(Mux,MIX,A,B,C,yes))
).
solve_mix( _, _, _, _, _, _).
normalize_mix( MIX, B, C, 1, Inhom, Hom) :-
type(B,Bt), normalize( Bt, B, Kb, Ib, Hb),
type(C,Ct), normalize( Ct, C, Kc, Ic, Hc),
( Hb = [] ->
( Hc = [] -> % -a +b +c
( MIX = min, arith_eval( min(Kb*Ib,Kc*Ic), Inhom)
; MIX = max, arith_eval( max(Kb*Ib,Kc*Ic), Inhom)
),
Hom = []
; % -a +b -c
Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
arith_eval( Kb*Ib, Vb),
var_with_def( VC, v, Kc, Ic, Hc),
delay_all( [VA,VC], solve_mix(Mux,MIX,VA,Vb,VC,yes))
)
; Hc = [] -> % -a -b +c
Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, Kb, Ib, Hb),
arith_eval( Kc*Ic, Vc),
delay_all( [VA,VB], solve_mix(Mux,MIX,VA,VB,Vc,yes))
; % -a -b -c
Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, Kb, Ib, Hb),
var_with_def( VC, v, Kc, Ic, Hc),
delay_all( [VA,VB,VC], solve_mix(_Mux,MIX,VA,VB,VC,yes))
).
% a = b * c
%
solve_mult( A, B, C) :-
solve_mult( _Mutex, A, B, C, no).
%
solve_mult( Mux, A, B, C, Retry) :-
var( Mux),
!,
( arith_ground( A, Av) ->
( arith_ground( B, Bv) -> % +a +b ?c
Mux = solved,
( arith_zero( Bv) -> arith_zero( Av) % otherwise: zero division
; arith_eval( Av/Bv, Vc), solve_one( C, Vc)
)
; arith_ground( C, Cv) -> % +a -b +c
Mux = solved,
( arith_zero( Cv) -> arith_zero( Av) % otherwise: zero division
; arith_eval( Av/Cv, Vb), solve_one( B, Vb)
)
; % +a -b -c
delay_all( Retry, [B,C], solve_mult(Mux,A,B,C,yes))
)
; arith_ground( B, Bv) ->
Mux = solved,
( arith_ground( C, Cv) -> % -a +b +c
arith_eval( Bv*Cv, Va), solve_one( A, Va)
; arith_zero( Bv) -> % -a +b -c
solve_one( A, 0)
;
solve_two( A, C, Bv)
)
; arith_ground( C, Cv) -> % -a -b +c
Mux = solved,
( arith_zero( Cv) ->
solve_one( A, 0)
;
solve_two( A, B, Cv)
)
; % -a -b -c
delay_all( Retry, [A,B,C], solve_mult(Mux,A,B,C,yes))
).
solve_mult( _, _, _, _, _).
normalize_mult( B,C, K, Inhom, Hom) :-
type(B,Bt), normalize( Bt, B, Kb, Ib, Hb),
type(C,Ct), normalize( Ct, C, Kc, Ic, Hc),
( Hb = [] ->
( Hc = [] -> % -a +b +c
arith_eval( Kb*Kc, K), arith_eval( Ib*Ic, Inhom), Hom = []
; arith_zero( Ib) ->
K = 1, Inhom = 0, Hom = []
; % -a +b -c
arith_eval( Kb*Ib*Kc, K), Inhom = Ic, Hom = Hc
)
; Hc = [] -> % -a -b +c
( arith_zero( Ic) ->
K = 1, Inhom = 0, Hom = []
;
arith_eval( Kc*Ic*Kb, K), Inhom = Ib, Hom = Hb
)
; % -a -b -c
arith_eval( Kb*Kc, K), Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, 1, Ib, Hb),
var_with_def( VC, v, 1, Ic, Hc),
delay_all( [VA,VB,VC], solve_mult(_Mux,VA,VB,VC,yes))
).
normalize_div( B,C, K, Inhom, Hom) :- % a = b/c
type(B,Bt), normalize( Bt, B, Kb, Ib, Hb),
type(C,Ct), normalize( Ct, C, Kc, Ic, Hc),
( Hb = [] ->
( Hc = [] -> % -a +b +c
( arith_zero( Ic) ->
fail % zero divison
;
arith_eval( Kb/Kc, K), arith_eval( Ib/Ic, Inhom), Hom = []
)
; arith_zero( Ib) ->
var_with_def( VC, nz, 1, Ic, Hc), % nonzero( VC)
K = 1, Inhom = 0, Hom = []
; % -a +b -c
K = Kb, Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VC, v, Kc, Ic, Hc),
delay_all( [VA,VC], solve_mult(_Mux,Ib,VA,VC,yes))
)
; Hc = [] -> % -a -b +c
( arith_zero( Ic) ->
fail % zero division
;
arith_eval( Kb/(Kc*Ic), K), Inhom = Ib, Hom = Hb
)
; % -a -b -c
arith_eval( Kb/Kc, K), Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, 1, Ib, Hb),
var_with_def( VC, nz, 1, Ic, Hc), % nonzero( VC)
delay_all( [VA,VB,VC], solve_mult(_Mux,VB,VA,VC,yes))
).
% a = b^c
%
solve_pow( A, B, C) :-
solve_pow( _Mutex, A, B, C, no).
%
solve_pow( Mux, A, B, C, Retry) :- var(Mux), !,
( arith_ground( A, Av) -> % +a ?b ?c
( arith_zero( Av) ->
Mux = solved, solve_one( B, 0)
; arith_ground( B, Bv) -> % +a +b ?c
Mux = solved,
( arith_zero( Bv) -> fail % A=0, \+ zero(Va)
; arith_zero( 1-Bv) -> arith_zero( 1-Av)
; arith_eval( log(Av)/log(Bv), Vc), solve_one( C, Vc)
)
; arith_ground( C, Cv) -> % +a -b +c
Mux = solved,
( arith_zero( Cv) -> arith_zero( 1-Av)
; arith_zero( 1-Cv) -> solve_one( B, Av)
; arith_eval( exp(Av,1/Cv), Vb), solve_one( B, Vb)
)
; % +a -b -c
delay_all( Retry, [B,C], solve_pow(Mux,A,B,C,yes))
)
; arith_ground( B, Bv) ->
( arith_ground( C, Cv) -> % -a +b +c
Mux = solved, arith_eval( exp(Bv,Cv), Va), solve_one( A, Va)
; % -a +b -c
delay_all( Retry, [A,C], solve_pow(Mux,A,B,C,yes))
)
; arith_ground( C, Cv) -> % -a -b +c
( arith_zero( Cv) ->
Mux = solved,
% nonzero( B),
solve_one( A, 1)
; arith_zero( 1-Cv) ->
Mux = solved, solve_two( A, B, 1)
;
delay_all( Retry, [A,B], solve_pow(Mux,A,B,C,yes))
)
; % -a -b -c
delay_all( Retry, [A,B,C], solve_pow(Mux,A,B,C,yes))
).
solve_pow( _, _, _, _, _).
normalize_pow( B, C, K, Inhom, Hom) :-
type(B,Bt), normalize( Bt, B, Kb, Ib, Hb),
type(C,Ct), normalize( Ct, C, Kc, Ic, Hc),
( Hb = [] ->
( Hc = [] -> % -a +b +c
Mux = solved, K = 1, arith_eval( exp(Kb*Ib,Kc*Ic), Inhom), Hom = []
; % -a +b -c
K = 1, Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
arith_eval( Kb*Ib, Vb),
var_with_def( VC, v, Kc, Ic, Hc),
delay_all( [VA,VC], solve_pow(_Mux,VA,Vb,VC,yes))
)
; Hc = [] -> % -a -b +c
( arith_zero( Ic) ->
Mux = solved,
var_with_def( VB, nz, 1, Ib, Hb), % nonzero( VB)
K = 1, Inhom = 1, Hom = []
; arith_zero( 1-Kc*Ic) ->
Mux = solved, K = Kb, Inhom = Ib, Hom = Hb
;
K = 1, Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, Kb, Ib, Hb),
arith_eval( Kc*Ic, Vc),
delay_all( [VA,VB], solve_pow(_Mux,VA,VB,Vc,yes))
)
; % -a -b -c
K = 1, Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, Kb, Ib, Hb),
var_with_def( VC, v, Kc, Ic, Hc),
delay_all( [VA,VB,VC], solve_pow(_Mux,VA,VB,VC,yes))
).
% a = TRIG(b)
%
solve_trig( Trig, A, B) :-
solve_trig( _Mutex, Trig, A, B, no).
%
solve_trig( Mux, Trig, A, B, Retry) :- var( Mux), !,
( arith_ground( A, Av) -> % +a ?b
Mux = solved,
trig( Trig, y, Av, Val), solve_one( B, Val)
; arith_ground( B, Bv) -> % -a +b
Mux = solved,
trig( Trig, x, Bv, Val), solve_one( A, Val)
; % -a -b
delay_all( Retry, [A,B], solve_trig( Mux, Trig, A, B, yes))
).
solve_trig( _, _, _, _, _).
trig( sin, x, X, Y) :- arith_eval( sin(X), Y).
trig( sin, y, X, Y) :- arith_eval( asin(X), Y).
trig( cos, x, X, Y) :- arith_eval( cos(X), Y).
trig( cos, y, X, Y) :- arith_eval( acos(X), Y).
trig( tan, x, X, Y) :- arith_eval( tan(X), Y).
trig( tan, y, X, Y) :- arith_eval( atan(X), Y).
normalize_trig( Trig, B, 1, Inhom, Hom) :-
type(B,Bt), normalize( Bt, B, K, Ib, Hb),
( Hb = [] -> % -a +b
arith_eval( K*Ib, Ibk),
trig( Trig, x, Ibk, Inhom), Hom = []
; % -a -b
Inhom = 0, Hom = [VA*1],
eqn_var_new( v, VA),
var_with_def( VB, v, K, Ib, Hb),
delay_all( [VA,VB], solve_trig(_Mux,Trig,VA,VB,yes))
).
% See clpr_freeze/2 in clprt.pl
system_freeze(Var, Goal) :-
% nonvar(Goal), % assert
term_to_meta(G,Goal),
attach_attribute(V, clpr_frozen(V,G,true)),
Var = V.
| leuschel/ecce | www/CiaoDE/ciao/library/nl_eval.pl | Perl | apache-2.0 | 11,761 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
require 5.6.0;
use strict;
use warnings;
use Thrift;
use Thrift::BufferedTransport;
use Thrift::BinaryProtocol;
#
# Server base class module
#
package Thrift::Server;
# 3 possible constructors:
# 1. (processor, serverTransport)
# 2. (processor, serverTransport, transportFactory, protocolFactory)
# 3. (processor, serverTransport,
# inputTransportFactory, outputTransportFactory,
# inputProtocolFactory, outputProtocolFactory)
sub new
{
my $classname = shift;
my @args = @_;
my $self;
if (scalar @args == 2)
{
$self = _init($args[0], $args[1],
Thrift::BufferedTransportFactory->new(),
Thrift::BufferedTransportFactory->new(),
Thrift::BinaryProtocolFactory->new(),
Thrift::BinaryProtocolFactory->new());
}
elsif (scalar @args == 4)
{
$self = _init($args[0], $args[1], $args[2], $args[2], $args[3], $args[3]);
}
elsif (scalar @args == 6)
{
$self = _init($args[0], $args[1], $args[2], $args[3], $args[4], $args[5]);
}
else
{
die "Thrift::Server expects exactly 2, 4, or 6 args";
}
return bless($self,$classname);
}
sub _init
{
my $processor = shift;
my $serverTransport = shift;
my $inputTransportFactory = shift;
my $outputTransportFactory = shift;
my $inputProtocolFactory = shift;
my $outputProtocolFactory = shift;
my $self = {
processor => $processor,
serverTransport => $serverTransport,
inputTransportFactory => $inputTransportFactory,
outputTransportFactory => $outputTransportFactory,
inputProtocolFactory => $inputProtocolFactory,
outputProtocolFactory => $outputProtocolFactory,
};
}
sub serve
{
die "abstract";
}
sub _clientBegin
{
my $self = shift;
my $iprot = shift;
my $oprot = shift;
if (exists $self->{serverEventHandler} and
defined $self->{serverEventHandler})
{
$self->{serverEventHandler}->clientBegin($iprot, $oprot);
}
}
sub _handleException
{
my $self = shift;
my $e = shift;
if ($e =~ m/TException/ and exists $e->{message}) {
my $message = $e->{message};
my $code = $e->{code};
my $out = $code . ':' . $message;
$message =~ m/TTransportException/ and die $out;
if ($message =~ m/Socket/) {
# suppress Socket messages
} else {
warn $out;
}
} else {
warn $e;
}
}
#
# SimpleServer from the Server base class that handles one connection at a time
#
package Thrift::SimpleServer;
use base qw( Thrift::Server );
sub new
{
my $classname = shift;
my @args = @_;
my $self = $classname->SUPER::new(@args);
return bless($self,$classname);
}
sub serve
{
my $self = shift;
$self->{serverTransport}->listen();
while (1)
{
my $client = $self->{serverTransport}->accept();
my $itrans = $self->{inputTransportFactory}->getTransport($client);
my $otrans = $self->{outputTransportFactory}->getTransport($client);
my $iprot = $self->{inputProtocolFactory}->getProtocol($itrans);
my $oprot = $self->{outputProtocolFactory}->getProtocol($otrans);
eval {
$self->_clientBegin($iprot, $oprot);
while (1)
{
$self->{processor}->process($iprot, $oprot);
}
}; if($@) {
$self->_handleException($@);
}
$itrans->close();
$otrans->close();
}
}
#
# ForkingServer that forks a new process for each request
#
package Thrift::ForkingServer;
use base qw( Thrift::Server );
use POSIX ":sys_wait_h";
sub new
{
my $classname = shift;
my @args = @_;
my $self = $classname->SUPER::new(@args);
return bless($self,$classname);
}
sub serve
{
my $self = shift;
# THRIFT-3848: without ignoring SIGCHLD, perl ForkingServer goes into a tight loop
$SIG{CHLD} = 'IGNORE';
$self->{serverTransport}->listen();
while (1)
{
my $client = $self->{serverTransport}->accept();
$self->_client($client);
}
}
sub _client
{
my $self = shift;
my $client = shift;
eval {
my $itrans = $self->{inputTransportFactory}->getTransport($client);
my $otrans = $self->{outputTransportFactory}->getTransport($client);
my $iprot = $self->{inputProtocolFactory}->getProtocol($itrans);
my $oprot = $self->{outputProtocolFactory}->getProtocol($otrans);
$self->_clientBegin($iprot, $oprot);
my $pid = fork();
if ($pid) #parent
{
$self->_parent($pid, $itrans, $otrans);
} else {
$self->_child($itrans, $otrans, $iprot, $oprot);
}
}; if($@) {
$self->_handleException($@);
}
}
sub _parent
{
my $self = shift;
my $pid = shift;
my $itrans = shift;
my $otrans = shift;
# add before collect, otherwise you race w/ waitpid
$self->{children}->{$pid} = 1;
$self->_collectChildren();
# Parent must close socket or the connection may not get closed promptly
$self->tryClose($itrans);
$self->tryClose($otrans);
}
sub _child
{
my $self = shift;
my $itrans = shift;
my $otrans = shift;
my $iprot = shift;
my $oprot = shift;
my $ecode = 0;
eval {
while (1)
{
$self->{processor}->process($iprot, $oprot);
}
}; if($@) {
$ecode = 1;
$self->_handleException($@);
}
$self->tryClose($itrans);
$self->tryClose($otrans);
exit($ecode);
}
sub tryClose
{
my $self = shift;
my $file = shift;
eval {
if (defined $file)
{
$file->close();
}
}; if($@) {
if ($@ =~ m/TException/ and exists $@->{message}) {
my $message = $@->{message};
my $code = $@->{code};
my $out = $code . ':' . $message;
warn $out;
} else {
warn $@;
}
}
}
sub _collectChildren
{
my $self = shift;
while (scalar keys %{$self->{children}})
{
my $pid = waitpid(-1, WNOHANG);
if ($pid>0)
{
delete $self->{children}->{$pid};
}
else
{
last;
}
}
}
1;
| jcgruenhage/dendrite | vendor/src/github.com/apache/thrift/lib/perl/lib/Thrift/Server.pm | Perl | apache-2.0 | 7,258 |
# EXTRACT VARIOUSLY DELIMITED TEXT SEQUENCES FROM STRINGS.
# FOR FULL DOCUMENTATION SEE Balanced.pod
use 5.005;
use strict;
package Text::Balanced;
use Exporter;
use SelfLoader;
use vars qw { $VERSION @ISA %EXPORT_TAGS };
use version; $VERSION = qv('2.0.0');
@ISA = qw ( Exporter );
%EXPORT_TAGS = ( ALL => [ qw(
&extract_delimited
&extract_bracketed
&extract_quotelike
&extract_codeblock
&extract_variable
&extract_tagged
&extract_multiple
&gen_delimited_pat
&gen_extract_tagged
&delimited_pat
) ] );
Exporter::export_ok_tags('ALL');
# PROTOTYPES
sub _match_bracketed($$$$$$);
sub _match_variable($$);
sub _match_codeblock($$$$$$$);
sub _match_quotelike($$$$);
# HANDLE RETURN VALUES IN VARIOUS CONTEXTS
sub _failmsg {
my ($message, $pos) = @_;
$@ = bless { error=>$message, pos=>$pos }, "Text::Balanced::ErrorMsg";
}
sub _fail
{
my ($wantarray, $textref, $message, $pos) = @_;
_failmsg $message, $pos if $message;
return (undef,$$textref,undef) if $wantarray;
return undef;
}
sub _succeed
{
$@ = undef;
my ($wantarray,$textref) = splice @_, 0, 2;
my ($extrapos, $extralen) = @_>18 ? splice(@_, -2, 2) : (0,0);
my ($startlen, $oppos) = @_[5,6];
my $remainderpos = $_[2];
if ($wantarray)
{
my @res;
while (my ($from, $len) = splice @_, 0, 2)
{
push @res, substr($$textref,$from,$len);
}
if ($extralen) { # CORRECT FILLET
my $extra = substr($res[0], $extrapos-$oppos, $extralen, "\n");
$res[1] = "$extra$res[1]";
eval { substr($$textref,$remainderpos,0) = $extra;
substr($$textref,$extrapos,$extralen,"\n")} ;
#REARRANGE HERE DOC AND FILLET IF POSSIBLE
pos($$textref) = $remainderpos-$extralen+1; # RESET \G
}
else {
pos($$textref) = $remainderpos; # RESET \G
}
return @res;
}
else
{
my $match = substr($$textref,$_[0],$_[1]);
substr($match,$extrapos-$_[0]-$startlen,$extralen,"") if $extralen;
my $extra = $extralen
? substr($$textref, $extrapos, $extralen)."\n" : "";
eval {substr($$textref,$_[4],$_[1]+$_[5])=$extra} ; #CHOP OUT PREFIX & MATCH, IF POSSIBLE
pos($$textref) = $_[4]; # RESET \G
return $match;
}
}
# BUILD A PATTERN MATCHING A SIMPLE DELIMITED STRING
sub gen_delimited_pat($;$) # ($delimiters;$escapes)
{
my ($dels, $escs) = @_;
return "" unless $dels =~ /\S/;
$escs = '\\' unless $escs;
$escs .= substr($escs,-1) x (length($dels)-length($escs));
my @pat = ();
my $i;
for ($i=0; $i<length $dels; $i++)
{
my $del = quotemeta substr($dels,$i,1);
my $esc = quotemeta substr($escs,$i,1);
if ($del eq $esc)
{
push @pat, "$del(?:[^$del]*(?:(?:$del$del)[^$del]*)*)$del";
}
else
{
push @pat, "$del(?:[^$esc$del]*(?:$esc.[^$esc$del]*)*)$del";
}
}
my $pat = join '|', @pat;
return "(?:$pat)";
}
*delimited_pat = \&gen_delimited_pat;
# THE EXTRACTION FUNCTIONS
sub extract_delimited (;$$$$)
{
my $textref = defined $_[0] ? \$_[0] : \$_;
my $wantarray = wantarray;
my $del = defined $_[1] ? $_[1] : qq{\'\"\`};
my $pre = defined $_[2] ? $_[2] : '\s*';
my $esc = defined $_[3] ? $_[3] : qq{\\};
my $pat = gen_delimited_pat($del, $esc);
my $startpos = pos $$textref || 0;
return _fail($wantarray, $textref, "Not a delimited pattern", 0)
unless $$textref =~ m/\G($pre)($pat)/gc;
my $prelen = length($1);
my $matchpos = $startpos+$prelen;
my $endpos = pos $$textref;
return _succeed $wantarray, $textref,
$matchpos, $endpos-$matchpos, # MATCH
$endpos, length($$textref)-$endpos, # REMAINDER
$startpos, $prelen; # PREFIX
}
sub extract_bracketed (;$$$)
{
my $textref = defined $_[0] ? \$_[0] : \$_;
my $ldel = defined $_[1] ? $_[1] : '{([<';
my $pre = defined $_[2] ? $_[2] : '\s*';
my $wantarray = wantarray;
my $qdel = "";
my $quotelike;
$ldel =~ s/'//g and $qdel .= q{'};
$ldel =~ s/"//g and $qdel .= q{"};
$ldel =~ s/`//g and $qdel .= q{`};
$ldel =~ s/q//g and $quotelike = 1;
$ldel =~ tr/[](){}<>\0-\377/[[(({{<</ds;
my $rdel = $ldel;
unless ($rdel =~ tr/[({</])}>/)
{
return _fail $wantarray, $textref,
"Did not find a suitable bracket in delimiter: \"$_[1]\"",
0;
}
my $posbug = pos;
$ldel = join('|', map { quotemeta $_ } split('', $ldel));
$rdel = join('|', map { quotemeta $_ } split('', $rdel));
pos = $posbug;
my $startpos = pos $$textref || 0;
my @match = _match_bracketed($textref,$pre, $ldel, $qdel, $quotelike, $rdel);
return _fail ($wantarray, $textref) unless @match;
return _succeed ( $wantarray, $textref,
$match[2], $match[5]+2, # MATCH
@match[8,9], # REMAINDER
@match[0,1], # PREFIX
);
}
sub _match_bracketed($$$$$$) # $textref, $pre, $ldel, $qdel, $quotelike, $rdel
{
my ($textref, $pre, $ldel, $qdel, $quotelike, $rdel) = @_;
my ($startpos, $ldelpos, $endpos) = (pos $$textref = pos $$textref||0);
unless ($$textref =~ m/\G$pre/gc)
{
_failmsg "Did not find prefix: /$pre/", $startpos;
return;
}
$ldelpos = pos $$textref;
unless ($$textref =~ m/\G($ldel)/gc)
{
_failmsg "Did not find opening bracket after prefix: \"$pre\"",
pos $$textref;
pos $$textref = $startpos;
return;
}
my @nesting = ( $1 );
my $textlen = length $$textref;
while (pos $$textref < $textlen)
{
next if $$textref =~ m/\G\\./gcs;
if ($$textref =~ m/\G($ldel)/gc)
{
push @nesting, $1;
}
elsif ($$textref =~ m/\G($rdel)/gc)
{
my ($found, $brackettype) = ($1, $1);
if ($#nesting < 0)
{
_failmsg "Unmatched closing bracket: \"$found\"",
pos $$textref;
pos $$textref = $startpos;
return;
}
my $expected = pop(@nesting);
$expected =~ tr/({[</)}]>/;
if ($expected ne $brackettype)
{
_failmsg qq{Mismatched closing bracket: expected "$expected" but found "$found"},
pos $$textref;
pos $$textref = $startpos;
return;
}
last if $#nesting < 0;
}
elsif ($qdel && $$textref =~ m/\G([$qdel])/gc)
{
$$textref =~ m/\G[^\\$1]*(?:\\.[^\\$1]*)*(\Q$1\E)/gsc and next;
_failmsg "Unmatched embedded quote ($1)",
pos $$textref;
pos $$textref = $startpos;
return;
}
elsif ($quotelike && _match_quotelike($textref,"",1,0))
{
next;
}
else { $$textref =~ m/\G(?:[a-zA-Z0-9]+|.)/gcs }
}
if ($#nesting>=0)
{
_failmsg "Unmatched opening bracket(s): "
. join("..",@nesting)."..",
pos $$textref;
pos $$textref = $startpos;
return;
}
$endpos = pos $$textref;
return (
$startpos, $ldelpos-$startpos, # PREFIX
$ldelpos, 1, # OPENING BRACKET
$ldelpos+1, $endpos-$ldelpos-2, # CONTENTS
$endpos-1, 1, # CLOSING BRACKET
$endpos, length($$textref)-$endpos, # REMAINDER
);
}
sub _revbracket($)
{
my $brack = reverse $_[0];
$brack =~ tr/[({</])}>/;
return $brack;
}
my $XMLNAME = q{[a-zA-Z_:][a-zA-Z0-9_:.-]*};
sub extract_tagged (;$$$$$) # ($text, $opentag, $closetag, $pre, \%options)
{
my $textref = defined $_[0] ? \$_[0] : \$_;
my $ldel = $_[1];
my $rdel = $_[2];
my $pre = defined $_[3] ? $_[3] : '\s*';
my %options = defined $_[4] ? %{$_[4]} : ();
my $omode = defined $options{fail} ? $options{fail} : '';
my $bad = ref($options{reject}) eq 'ARRAY' ? join('|', @{$options{reject}})
: defined($options{reject}) ? $options{reject}
: ''
;
my $ignore = ref($options{ignore}) eq 'ARRAY' ? join('|', @{$options{ignore}})
: defined($options{ignore}) ? $options{ignore}
: ''
;
if (!defined $ldel) { $ldel = '<\w+(?:' . gen_delimited_pat(q{'"}) . '|[^>])*>'; }
$@ = undef;
my @match = _match_tagged($textref, $pre, $ldel, $rdel, $omode, $bad, $ignore);
return _fail(wantarray, $textref) unless @match;
return _succeed wantarray, $textref,
$match[2], $match[3]+$match[5]+$match[7], # MATCH
@match[8..9,0..1,2..7]; # REM, PRE, BITS
}
sub _match_tagged # ($$$$$$$)
{
my ($textref, $pre, $ldel, $rdel, $omode, $bad, $ignore) = @_;
my $rdelspec;
my ($startpos, $opentagpos, $textpos, $parapos, $closetagpos, $endpos) = ( pos($$textref) = pos($$textref)||0 );
unless ($$textref =~ m/\G($pre)/gc)
{
_failmsg "Did not find prefix: /$pre/", pos $$textref;
goto failed;
}
$opentagpos = pos($$textref);
unless ($$textref =~ m/\G$ldel/gc)
{
_failmsg "Did not find opening tag: /$ldel/", pos $$textref;
goto failed;
}
$textpos = pos($$textref);
if (!defined $rdel)
{
$rdelspec = substr($$textref, $-[0], $+[0] - $-[0]);
unless ($rdelspec =~ s/\A([[(<{]+)($XMLNAME).*/ quotemeta "$1\/$2". _revbracket($1) /oes)
{
_failmsg "Unable to construct closing tag to match: $rdel",
pos $$textref;
goto failed;
}
}
else
{
$rdelspec = eval "qq{$rdel}" || do {
my $del;
for (qw,~ ! ^ & * ) _ + - = } ] : " ; ' > . ? / | ',)
{ next if $rdel =~ /\Q$_/; $del = $_; last }
unless ($del) {
use Carp;
croak "Can't interpolate right delimiter $rdel"
}
eval "qq$del$rdel$del";
};
}
while (pos($$textref) < length($$textref))
{
next if $$textref =~ m/\G\\./gc;
if ($$textref =~ m/\G(\n[ \t]*\n)/gc )
{
$parapos = pos($$textref) - length($1)
unless defined $parapos;
}
elsif ($$textref =~ m/\G($rdelspec)/gc )
{
$closetagpos = pos($$textref)-length($1);
goto matched;
}
elsif ($ignore && $$textref =~ m/\G(?:$ignore)/gc)
{
next;
}
elsif ($bad && $$textref =~ m/\G($bad)/gcs)
{
pos($$textref) -= length($1); # CUT OFF WHATEVER CAUSED THE SHORTNESS
goto short if ($omode eq 'PARA' || $omode eq 'MAX');
_failmsg "Found invalid nested tag: $1", pos $$textref;
goto failed;
}
elsif ($$textref =~ m/\G($ldel)/gc)
{
my $tag = $1;
pos($$textref) -= length($tag); # REWIND TO NESTED TAG
unless (_match_tagged(@_)) # MATCH NESTED TAG
{
goto short if $omode eq 'PARA' || $omode eq 'MAX';
_failmsg "Found unbalanced nested tag: $tag",
pos $$textref;
goto failed;
}
}
else { $$textref =~ m/./gcs }
}
short:
$closetagpos = pos($$textref);
goto matched if $omode eq 'MAX';
goto failed unless $omode eq 'PARA';
if (defined $parapos) { pos($$textref) = $parapos }
else { $parapos = pos($$textref) }
return (
$startpos, $opentagpos-$startpos, # PREFIX
$opentagpos, $textpos-$opentagpos, # OPENING TAG
$textpos, $parapos-$textpos, # TEXT
$parapos, 0, # NO CLOSING TAG
$parapos, length($$textref)-$parapos, # REMAINDER
);
matched:
$endpos = pos($$textref);
return (
$startpos, $opentagpos-$startpos, # PREFIX
$opentagpos, $textpos-$opentagpos, # OPENING TAG
$textpos, $closetagpos-$textpos, # TEXT
$closetagpos, $endpos-$closetagpos, # CLOSING TAG
$endpos, length($$textref)-$endpos, # REMAINDER
);
failed:
_failmsg "Did not find closing tag", pos $$textref unless $@;
pos($$textref) = $startpos;
return;
}
sub extract_variable (;$$)
{
my $textref = defined $_[0] ? \$_[0] : \$_;
return ("","","") unless defined $$textref;
my $pre = defined $_[1] ? $_[1] : '\s*';
my @match = _match_variable($textref,$pre);
return _fail wantarray, $textref unless @match;
return _succeed wantarray, $textref,
@match[2..3,4..5,0..1]; # MATCH, REMAINDER, PREFIX
}
sub _match_variable($$)
{
# $#
# $^
# $$
my ($textref, $pre) = @_;
my $startpos = pos($$textref) = pos($$textref)||0;
unless ($$textref =~ m/\G($pre)/gc)
{
_failmsg "Did not find prefix: /$pre/", pos $$textref;
return;
}
my $varpos = pos($$textref);
unless ($$textref =~ m{\G\$\s*(?!::)(\d+|[][&`'+*./|,";%=~:?!\@<>()-]|\^[a-z]?)}gci)
{
unless ($$textref =~ m/\G((\$#?|[*\@\%]|\\&)+)/gc)
{
_failmsg "Did not find leading dereferencer", pos $$textref;
pos $$textref = $startpos;
return;
}
my $deref = $1;
unless ($$textref =~ m/\G\s*(?:::|')?(?:[_a-z]\w*(?:::|'))*[_a-z]\w*/gci
or _match_codeblock($textref, "", '\{', '\}', '\{', '\}', 0)
or $deref eq '$#' or $deref eq '$$' )
{
_failmsg "Bad identifier after dereferencer", pos $$textref;
pos $$textref = $startpos;
return;
}
}
while (1)
{
next if $$textref =~ m/\G\s*(?:->)?\s*[{]\w+[}]/gc;
next if _match_codeblock($textref,
qr/\s*->\s*(?:[_a-zA-Z]\w+\s*)?/,
qr/[({[]/, qr/[)}\]]/,
qr/[({[]/, qr/[)}\]]/, 0);
next if _match_codeblock($textref,
qr/\s*/, qr/[{[]/, qr/[}\]]/,
qr/[{[]/, qr/[}\]]/, 0);
next if _match_variable($textref,'\s*->\s*');
next if $$textref =~ m/\G\s*->\s*\w+(?![{([])/gc;
last;
}
my $endpos = pos($$textref);
return ($startpos, $varpos-$startpos,
$varpos, $endpos-$varpos,
$endpos, length($$textref)-$endpos
);
}
sub extract_codeblock (;$$$$$)
{
my $textref = defined $_[0] ? \$_[0] : \$_;
my $wantarray = wantarray;
my $ldel_inner = defined $_[1] ? $_[1] : '{';
my $pre = defined $_[2] ? $_[2] : '\s*';
my $ldel_outer = defined $_[3] ? $_[3] : $ldel_inner;
my $rd = $_[4];
my $rdel_inner = $ldel_inner;
my $rdel_outer = $ldel_outer;
my $posbug = pos;
for ($ldel_inner, $ldel_outer) { tr/[]()<>{}\0-\377/[[((<<{{/ds }
for ($rdel_inner, $rdel_outer) { tr/[]()<>{}\0-\377/]]))>>}}/ds }
for ($ldel_inner, $ldel_outer, $rdel_inner, $rdel_outer)
{
$_ = '('.join('|',map { quotemeta $_ } split('',$_)).')'
}
pos = $posbug;
my @match = _match_codeblock($textref, $pre,
$ldel_outer, $rdel_outer,
$ldel_inner, $rdel_inner,
$rd);
return _fail($wantarray, $textref) unless @match;
return _succeed($wantarray, $textref,
@match[2..3,4..5,0..1] # MATCH, REMAINDER, PREFIX
);
}
sub _match_codeblock($$$$$$$)
{
my ($textref, $pre, $ldel_outer, $rdel_outer, $ldel_inner, $rdel_inner, $rd) = @_;
my $startpos = pos($$textref) = pos($$textref) || 0;
unless ($$textref =~ m/\G($pre)/gc)
{
_failmsg qq{Did not match prefix /$pre/ at"} .
substr($$textref,pos($$textref),20) .
q{..."},
pos $$textref;
return;
}
my $codepos = pos($$textref);
unless ($$textref =~ m/\G($ldel_outer)/gc) # OUTERMOST DELIMITER
{
_failmsg qq{Did not find expected opening bracket at "} .
substr($$textref,pos($$textref),20) .
q{..."},
pos $$textref;
pos $$textref = $startpos;
return;
}
my $closing = $1;
$closing =~ tr/([<{/)]>}/;
my $matched;
my $patvalid = 1;
while (pos($$textref) < length($$textref))
{
$matched = '';
if ($rd && $$textref =~ m#\G(\Q(?)\E|\Q(s?)\E|\Q(s)\E)#gc)
{
$patvalid = 0;
next;
}
if ($$textref =~ m/\G\s*#.*/gc)
{
next;
}
if ($$textref =~ m/\G\s*($rdel_outer)/gc)
{
unless ($matched = ($closing && $1 eq $closing) )
{
next if $1 eq '>'; # MIGHT BE A "LESS THAN"
_failmsg q{Mismatched closing bracket at "} .
substr($$textref,pos($$textref),20) .
qq{...". Expected '$closing'},
pos $$textref;
}
last;
}
if (_match_variable($textref,'\s*') ||
_match_quotelike($textref,'\s*',$patvalid,$patvalid) )
{
$patvalid = 0;
next;
}
# NEED TO COVER MANY MORE CASES HERE!!!
if ($$textref =~ m#\G\s*(?!$ldel_inner)
( [-+*x/%^&|.]=?
| [!=]~
| =(?!>)
| (\*\*|&&|\|\||<<|>>)=?
| split|grep|map|return
| [([]
)#gcx)
{
$patvalid = 1;
next;
}
if ( _match_codeblock($textref, '\s*', $ldel_inner, $rdel_inner, $ldel_inner, $rdel_inner, $rd) )
{
$patvalid = 1;
next;
}
if ($$textref =~ m/\G\s*$ldel_outer/gc)
{
_failmsg q{Improperly nested codeblock at "} .
substr($$textref,pos($$textref),20) .
q{..."},
pos $$textref;
last;
}
$patvalid = 0;
$$textref =~ m/\G\s*(\w+|[-=>]>|.|\Z)/gc;
}
continue { $@ = undef }
unless ($matched)
{
_failmsg 'No match found for opening bracket', pos $$textref
unless $@;
return;
}
my $endpos = pos($$textref);
return ( $startpos, $codepos-$startpos,
$codepos, $endpos-$codepos,
$endpos, length($$textref)-$endpos,
);
}
my %mods = (
'none' => '[cgimsox]*',
'm' => '[cgimsox]*',
's' => '[cegimsox]*',
'tr' => '[cds]*',
'y' => '[cds]*',
'qq' => '',
'qx' => '',
'qw' => '',
'qr' => '[imsx]*',
'q' => '',
);
sub extract_quotelike (;$$)
{
my $textref = $_[0] ? \$_[0] : \$_;
my $wantarray = wantarray;
my $pre = defined $_[1] ? $_[1] : '\s*';
my @match = _match_quotelike($textref,$pre,1,0);
return _fail($wantarray, $textref) unless @match;
return _succeed($wantarray, $textref,
$match[2], $match[18]-$match[2], # MATCH
@match[18,19], # REMAINDER
@match[0,1], # PREFIX
@match[2..17], # THE BITS
@match[20,21], # ANY FILLET?
);
};
sub _match_quotelike($$$$) # ($textref, $prepat, $allow_raw_match)
{
my ($textref, $pre, $rawmatch, $qmark) = @_;
my ($textlen,$startpos,
$oppos,
$preld1pos,$ld1pos,$str1pos,$rd1pos,
$preld2pos,$ld2pos,$str2pos,$rd2pos,
$modpos) = ( length($$textref), pos($$textref) = pos($$textref) || 0 );
unless ($$textref =~ m/\G($pre)/gc)
{
_failmsg qq{Did not find prefix /$pre/ at "} .
substr($$textref, pos($$textref), 20) .
q{..."},
pos $$textref;
return;
}
$oppos = pos($$textref);
my $initial = substr($$textref,$oppos,1);
if ($initial && $initial =~ m|^[\"\'\`]|
|| $rawmatch && $initial =~ m|^/|
|| $qmark && $initial =~ m|^\?|)
{
unless ($$textref =~ m/ \Q$initial\E [^\\$initial]* (\\.[^\\$initial]*)* \Q$initial\E /gcsx)
{
_failmsg qq{Did not find closing delimiter to match '$initial' at "} .
substr($$textref, $oppos, 20) .
q{..."},
pos $$textref;
pos $$textref = $startpos;
return;
}
$modpos= pos($$textref);
$rd1pos = $modpos-1;
if ($initial eq '/' || $initial eq '?')
{
$$textref =~ m/\G$mods{none}/gc
}
my $endpos = pos($$textref);
return (
$startpos, $oppos-$startpos, # PREFIX
$oppos, 0, # NO OPERATOR
$oppos, 1, # LEFT DEL
$oppos+1, $rd1pos-$oppos-1, # STR/PAT
$rd1pos, 1, # RIGHT DEL
$modpos, 0, # NO 2ND LDEL
$modpos, 0, # NO 2ND STR
$modpos, 0, # NO 2ND RDEL
$modpos, $endpos-$modpos, # MODIFIERS
$endpos, $textlen-$endpos, # REMAINDER
);
}
unless ($$textref =~ m{\G(\b(?:m|s|qq|qx|qw|q|qr|tr|y)\b(?=\s*\S)|<<)}gc)
{
_failmsg q{No quotelike operator found after prefix at "} .
substr($$textref, pos($$textref), 20) .
q{..."},
pos $$textref;
pos $$textref = $startpos;
return;
}
my $op = $1;
$preld1pos = pos($$textref);
if ($op eq '<<') {
$ld1pos = pos($$textref);
my $label;
if ($$textref =~ m{\G([A-Za-z_]\w*)}gc) {
$label = $1;
}
elsif ($$textref =~ m{ \G ' ([^'\\]* (?:\\.[^'\\]*)*) '
| \G " ([^"\\]* (?:\\.[^"\\]*)*) "
| \G ` ([^`\\]* (?:\\.[^`\\]*)*) `
}gcsx) {
$label = $+;
}
else {
$label = "";
}
my $extrapos = pos($$textref);
$$textref =~ m{.*\n}gc;
$str1pos = pos($$textref)--;
unless ($$textref =~ m{.*?\n(?=\Q$label\E\n)}gc) {
_failmsg qq{Missing here doc terminator ('$label') after "} .
substr($$textref, $startpos, 20) .
q{..."},
pos $$textref;
pos $$textref = $startpos;
return;
}
$rd1pos = pos($$textref);
$$textref =~ m{\Q$label\E\n}gc;
$ld2pos = pos($$textref);
return (
$startpos, $oppos-$startpos, # PREFIX
$oppos, length($op), # OPERATOR
$ld1pos, $extrapos-$ld1pos, # LEFT DEL
$str1pos, $rd1pos-$str1pos, # STR/PAT
$rd1pos, $ld2pos-$rd1pos, # RIGHT DEL
$ld2pos, 0, # NO 2ND LDEL
$ld2pos, 0, # NO 2ND STR
$ld2pos, 0, # NO 2ND RDEL
$ld2pos, 0, # NO MODIFIERS
$ld2pos, $textlen-$ld2pos, # REMAINDER
$extrapos, $str1pos-$extrapos, # FILLETED BIT
);
}
$$textref =~ m/\G\s*/gc;
$ld1pos = pos($$textref);
$str1pos = $ld1pos+1;
unless ($$textref =~ m/\G(\S)/gc) # SHOULD USE LOOKAHEAD
{
_failmsg "No block delimiter found after quotelike $op",
pos $$textref;
pos $$textref = $startpos;
return;
}
pos($$textref) = $ld1pos; # HAVE TO DO THIS BECAUSE LOOKAHEAD BROKEN
my ($ldel1, $rdel1) = ("\Q$1","\Q$1");
if ($ldel1 =~ /[[(<{]/)
{
$rdel1 =~ tr/[({</])}>/;
defined(_match_bracketed($textref,"",$ldel1,"","",$rdel1))
|| do { pos $$textref = $startpos; return };
$ld2pos = pos($$textref);
$rd1pos = $ld2pos-1;
}
else
{
$$textref =~ /\G$ldel1[^\\$ldel1]*(\\.[^\\$ldel1]*)*$ldel1/gcs
|| do { pos $$textref = $startpos; return };
$ld2pos = $rd1pos = pos($$textref)-1;
}
my $second_arg = $op =~ /s|tr|y/ ? 1 : 0;
if ($second_arg)
{
my ($ldel2, $rdel2);
if ($ldel1 =~ /[[(<{]/)
{
unless ($$textref =~ /\G\s*(\S)/gc) # SHOULD USE LOOKAHEAD
{
_failmsg "Missing second block for quotelike $op",
pos $$textref;
pos $$textref = $startpos;
return;
}
$ldel2 = $rdel2 = "\Q$1";
$rdel2 =~ tr/[({</])}>/;
}
else
{
$ldel2 = $rdel2 = $ldel1;
}
$str2pos = $ld2pos+1;
if ($ldel2 =~ /[[(<{]/)
{
pos($$textref)--; # OVERCOME BROKEN LOOKAHEAD
defined(_match_bracketed($textref,"",$ldel2,"","",$rdel2))
|| do { pos $$textref = $startpos; return };
}
else
{
$$textref =~ /[^\\$ldel2]*(\\.[^\\$ldel2]*)*$ldel2/gcs
|| do { pos $$textref = $startpos; return };
}
$rd2pos = pos($$textref)-1;
}
else
{
$ld2pos = $str2pos = $rd2pos = $rd1pos;
}
$modpos = pos $$textref;
$$textref =~ m/\G($mods{$op})/gc;
my $endpos = pos $$textref;
return (
$startpos, $oppos-$startpos, # PREFIX
$oppos, length($op), # OPERATOR
$ld1pos, 1, # LEFT DEL
$str1pos, $rd1pos-$str1pos, # STR/PAT
$rd1pos, 1, # RIGHT DEL
$ld2pos, $second_arg, # 2ND LDEL (MAYBE)
$str2pos, $rd2pos-$str2pos, # 2ND STR (MAYBE)
$rd2pos, $second_arg, # 2ND RDEL (MAYBE)
$modpos, $endpos-$modpos, # MODIFIERS
$endpos, $textlen-$endpos, # REMAINDER
);
}
my $def_func =
[
sub { extract_variable($_[0], '') },
sub { extract_quotelike($_[0],'') },
sub { extract_codeblock($_[0],'{}','') },
];
sub extract_multiple (;$$$$) # ($text, $functions_ref, $max_fields, $ignoreunknown)
{
my $textref = defined($_[0]) ? \$_[0] : \$_;
my $posbug = pos;
my ($lastpos, $firstpos);
my @fields = ();
#for ($$textref)
{
my @func = defined $_[1] ? @{$_[1]} : @{$def_func};
my $max = defined $_[2] && $_[2]>0 ? $_[2] : 1_000_000_000;
my $igunk = $_[3];
pos $$textref ||= 0;
unless (wantarray)
{
use Carp;
carp "extract_multiple reset maximal count to 1 in scalar context"
if $^W && defined($_[2]) && $max > 1;
$max = 1
}
my $unkpos;
my $func;
my $class;
my @class;
foreach $func ( @func )
{
if (ref($func) eq 'HASH')
{
push @class, (keys %$func)[0];
$func = (values %$func)[0];
}
else
{
push @class, undef;
}
}
FIELD: while (pos($$textref) < length($$textref))
{
my ($field, $rem);
my @bits;
foreach my $i ( 0..$#func )
{
my $pref;
$func = $func[$i];
$class = $class[$i];
$lastpos = pos $$textref;
if (ref($func) eq 'CODE')
{ ($field,$rem,$pref) = @bits = $func->($$textref) }
elsif (ref($func) eq 'Text::Balanced::Extractor')
{ @bits = $field = $func->extract($$textref) }
elsif( $$textref =~ m/\G$func/gc )
{ @bits = $field = defined($1)
? $1
: substr($$textref, $-[0], $+[0] - $-[0])
}
$pref ||= "";
if (defined($field) && length($field))
{
if (!$igunk) {
$unkpos = $lastpos
if length($pref) && !defined($unkpos);
if (defined $unkpos)
{
push @fields, substr($$textref, $unkpos, $lastpos-$unkpos).$pref;
$firstpos = $unkpos unless defined $firstpos;
undef $unkpos;
last FIELD if @fields == $max;
}
}
push @fields, $class
? bless (\$field, $class)
: $field;
$firstpos = $lastpos unless defined $firstpos;
$lastpos = pos $$textref;
last FIELD if @fields == $max;
next FIELD;
}
}
if ($$textref =~ /\G(.)/gcs)
{
$unkpos = pos($$textref)-1
unless $igunk || defined $unkpos;
}
}
if (defined $unkpos)
{
push @fields, substr($$textref, $unkpos);
$firstpos = $unkpos unless defined $firstpos;
$lastpos = length $$textref;
}
last;
}
pos $$textref = $lastpos;
return @fields if wantarray;
$firstpos ||= 0;
eval { substr($$textref,$firstpos,$lastpos-$firstpos)="";
pos $$textref = $firstpos };
return $fields[0];
}
sub gen_extract_tagged # ($opentag, $closetag, $pre, \%options)
{
my $ldel = $_[0];
my $rdel = $_[1];
my $pre = defined $_[2] ? $_[2] : '\s*';
my %options = defined $_[3] ? %{$_[3]} : ();
my $omode = defined $options{fail} ? $options{fail} : '';
my $bad = ref($options{reject}) eq 'ARRAY' ? join('|', @{$options{reject}})
: defined($options{reject}) ? $options{reject}
: ''
;
my $ignore = ref($options{ignore}) eq 'ARRAY' ? join('|', @{$options{ignore}})
: defined($options{ignore}) ? $options{ignore}
: ''
;
if (!defined $ldel) { $ldel = '<\w+(?:' . gen_delimited_pat(q{'"}) . '|[^>])*>'; }
my $posbug = pos;
for ($ldel, $pre, $bad, $ignore) { $_ = qr/$_/ if $_ }
pos = $posbug;
my $closure = sub
{
my $textref = defined $_[0] ? \$_[0] : \$_;
my @match = Text::Balanced::_match_tagged($textref, $pre, $ldel, $rdel, $omode, $bad, $ignore);
return _fail(wantarray, $textref) unless @match;
return _succeed wantarray, $textref,
$match[2], $match[3]+$match[5]+$match[7], # MATCH
@match[8..9,0..1,2..7]; # REM, PRE, BITS
};
bless $closure, 'Text::Balanced::Extractor';
}
package Text::Balanced::Extractor;
sub extract($$) # ($self, $text)
{
&{$_[0]}($_[1]);
}
package Text::Balanced::ErrorMsg;
use overload '""' => sub { "$_[0]->{error}, detected at offset $_[0]->{pos}" };
1;
__END__
=head1 NAME
Text::Balanced - Extract delimited text sequences from strings.
=head1 SYNOPSIS
use Text::Balanced qw (
extract_delimited
extract_bracketed
extract_quotelike
extract_codeblock
extract_variable
extract_tagged
extract_multiple
gen_delimited_pat
gen_extract_tagged
);
# Extract the initial substring of $text that is delimited by
# two (unescaped) instances of the first character in $delim.
($extracted, $remainder) = extract_delimited($text,$delim);
# Extract the initial substring of $text that is bracketed
# with a delimiter(s) specified by $delim (where the string
# in $delim contains one or more of '(){}[]<>').
($extracted, $remainder) = extract_bracketed($text,$delim);
# Extract the initial substring of $text that is bounded by
# an XML tag.
($extracted, $remainder) = extract_tagged($text);
# Extract the initial substring of $text that is bounded by
# a C<BEGIN>...C<END> pair. Don't allow nested C<BEGIN> tags
($extracted, $remainder) =
extract_tagged($text,"BEGIN","END",undef,{bad=>["BEGIN"]});
# Extract the initial substring of $text that represents a
# Perl "quote or quote-like operation"
($extracted, $remainder) = extract_quotelike($text);
# Extract the initial substring of $text that represents a block
# of Perl code, bracketed by any of character(s) specified by $delim
# (where the string $delim contains one or more of '(){}[]<>').
($extracted, $remainder) = extract_codeblock($text,$delim);
# Extract the initial substrings of $text that would be extracted by
# one or more sequential applications of the specified functions
# or regular expressions
@extracted = extract_multiple($text,
[ \&extract_bracketed,
\&extract_quotelike,
\&some_other_extractor_sub,
qr/[xyz]*/,
'literal',
]);
# Create a string representing an optimized pattern (a la Friedl)
# that matches a substring delimited by any of the specified characters
# (in this case: any type of quote or a slash)
$patstring = gen_delimited_pat(q{'"`/});
# Generate a reference to an anonymous sub that is just like extract_tagged
# but pre-compiled and optimized for a specific pair of tags, and consequently
# much faster (i.e. 3 times faster). It uses qr// for better performance on
# repeated calls, so it only works under Perl 5.005 or later.
$extract_head = gen_extract_tagged('<HEAD>','</HEAD>');
($extracted, $remainder) = $extract_head->($text);
=head1 DESCRIPTION
The various C<extract_...> subroutines may be used to
extract a delimited substring, possibly after skipping a
specified prefix string. By default, that prefix is
optional whitespace (C</\s*/>), but you can change it to whatever
you wish (see below).
The substring to be extracted must appear at the
current C<pos> location of the string's variable
(or at index zero, if no C<pos> position is defined).
In other words, the C<extract_...> subroutines I<don't>
extract the first occurrence of a substring anywhere
in a string (like an unanchored regex would). Rather,
they extract an occurrence of the substring appearing
immediately at the current matching position in the
string (like a C<\G>-anchored regex would).
=head2 General behaviour in list contexts
In a list context, all the subroutines return a list, the first three
elements of which are always:
=over 4
=item [0]
The extracted string, including the specified delimiters.
If the extraction fails C<undef> is returned.
=item [1]
The remainder of the input string (i.e. the characters after the
extracted string). On failure, the entire string is returned.
=item [2]
The skipped prefix (i.e. the characters before the extracted string).
On failure, C<undef> is returned.
=back
Note that in a list context, the contents of the original input text (the first
argument) are not modified in any way.
However, if the input text was passed in a variable, that variable's
C<pos> value is updated to point at the first character after the
extracted text. That means that in a list context the various
subroutines can be used much like regular expressions. For example:
while ( $next = (extract_quotelike($text))[0] )
{
# process next quote-like (in $next)
}
=head2 General behaviour in scalar and void contexts
In a scalar context, the extracted string is returned, having first been
removed from the input text. Thus, the following code also processes
each quote-like operation, but actually removes them from $text:
while ( $next = extract_quotelike($text) )
{
# process next quote-like (in $next)
}
Note that if the input text is a read-only string (i.e. a literal),
no attempt is made to remove the extracted text.
In a void context the behaviour of the extraction subroutines is
exactly the same as in a scalar context, except (of course) that the
extracted substring is not returned.
=head2 A note about prefixes
Prefix patterns are matched without any trailing modifiers (C</gimsox> etc.)
This can bite you if you're expecting a prefix specification like
'.*?(?=<H1>)' to skip everything up to the first <H1> tag. Such a prefix
pattern will only succeed if the <H1> tag is on the current line, since
. normally doesn't match newlines.
To overcome this limitation, you need to turn on /s matching within
the prefix pattern, using the C<(?s)> directive: '(?s).*?(?=<H1>)'
=head2 C<extract_delimited>
The C<extract_delimited> function formalizes the common idiom
of extracting a single-character-delimited substring from the start of
a string. For example, to extract a single-quote delimited string, the
following code is typically used:
($remainder = $text) =~ s/\A('(\\.|[^'])*')//s;
$extracted = $1;
but with C<extract_delimited> it can be simplified to:
($extracted,$remainder) = extract_delimited($text, "'");
C<extract_delimited> takes up to four scalars (the input text, the
delimiters, a prefix pattern to be skipped, and any escape characters)
and extracts the initial substring of the text that
is appropriately delimited. If the delimiter string has multiple
characters, the first one encountered in the text is taken to delimit
the substring.
The third argument specifies a prefix pattern that is to be skipped
(but must be present!) before the substring is extracted.
The final argument specifies the escape character to be used for each
delimiter.
All arguments are optional. If the escape characters are not specified,
every delimiter is escaped with a backslash (C<\>).
If the prefix is not specified, the
pattern C<'\s*'> - optional whitespace - is used. If the delimiter set
is also not specified, the set C</["'`]/> is used. If the text to be processed
is not specified either, C<$_> is used.
In list context, C<extract_delimited> returns a array of three
elements, the extracted substring (I<including the surrounding
delimiters>), the remainder of the text, and the skipped prefix (if
any). If a suitable delimited substring is not found, the first
element of the array is the empty string, the second is the complete
original text, and the prefix returned in the third element is an
empty string.
In a scalar context, just the extracted substring is returned. In
a void context, the extracted substring (and any prefix) are simply
removed from the beginning of the first argument.
Examples:
# Remove a single-quoted substring from the very beginning of $text:
$substring = extract_delimited($text, "'", '');
# Remove a single-quoted Pascalish substring (i.e. one in which
# doubling the quote character escapes it) from the very
# beginning of $text:
$substring = extract_delimited($text, "'", '', "'");
# Extract a single- or double- quoted substring from the
# beginning of $text, optionally after some whitespace
# (note the list context to protect $text from modification):
($substring) = extract_delimited $text, q{"'};
# Delete the substring delimited by the first '/' in $text:
$text = join '', (extract_delimited($text,'/','[^/]*')[2,1];
Note that this last example is I<not> the same as deleting the first
quote-like pattern. For instance, if C<$text> contained the string:
"if ('./cmd' =~ m/$UNIXCMD/s) { $cmd = $1; }"
then after the deletion it would contain:
"if ('.$UNIXCMD/s) { $cmd = $1; }"
not:
"if ('./cmd' =~ ms) { $cmd = $1; }"
See L<"extract_quotelike"> for a (partial) solution to this problem.
=head2 C<extract_bracketed>
Like C<"extract_delimited">, the C<extract_bracketed> function takes
up to three optional scalar arguments: a string to extract from, a delimiter
specifier, and a prefix pattern. As before, a missing prefix defaults to
optional whitespace and a missing text defaults to C<$_>. However, a missing
delimiter specifier defaults to C<'{}()[]E<lt>E<gt>'> (see below).
C<extract_bracketed> extracts a balanced-bracket-delimited
substring (using any one (or more) of the user-specified delimiter
brackets: '(..)', '{..}', '[..]', or '<..>'). Optionally it will also
respect quoted unbalanced brackets (see below).
A "delimiter bracket" is a bracket in list of delimiters passed as
C<extract_bracketed>'s second argument. Delimiter brackets are
specified by giving either the left or right (or both!) versions
of the required bracket(s). Note that the order in which
two or more delimiter brackets are specified is not significant.
A "balanced-bracket-delimited substring" is a substring bounded by
matched brackets, such that any other (left or right) delimiter
bracket I<within> the substring is also matched by an opposite
(right or left) delimiter bracket I<at the same level of nesting>. Any
type of bracket not in the delimiter list is treated as an ordinary
character.
In other words, each type of bracket specified as a delimiter must be
balanced and correctly nested within the substring, and any other kind of
("non-delimiter") bracket in the substring is ignored.
For example, given the string:
$text = "{ an '[irregularly :-(] {} parenthesized >:-)' string }";
then a call to C<extract_bracketed> in a list context:
@result = extract_bracketed( $text, '{}' );
would return:
( "{ an '[irregularly :-(] {} parenthesized >:-)' string }" , "" , "" )
since both sets of C<'{..}'> brackets are properly nested and evenly balanced.
(In a scalar context just the first element of the array would be returned. In
a void context, C<$text> would be replaced by an empty string.)
Likewise the call in:
@result = extract_bracketed( $text, '{[' );
would return the same result, since all sets of both types of specified
delimiter brackets are correctly nested and balanced.
However, the call in:
@result = extract_bracketed( $text, '{([<' );
would fail, returning:
( undef , "{ an '[irregularly :-(] {} parenthesized >:-)' string }" );
because the embedded pairs of C<'(..)'>s and C<'[..]'>s are "cross-nested" and
the embedded C<'E<gt>'> is unbalanced. (In a scalar context, this call would
return an empty string. In a void context, C<$text> would be unchanged.)
Note that the embedded single-quotes in the string don't help in this
case, since they have not been specified as acceptable delimiters and are
therefore treated as non-delimiter characters (and ignored).
However, if a particular species of quote character is included in the
delimiter specification, then that type of quote will be correctly handled.
for example, if C<$text> is:
$text = '<A HREF=">>>>">link</A>';
then
@result = extract_bracketed( $text, '<">' );
returns:
( '<A HREF=">>>>">', 'link</A>', "" )
as expected. Without the specification of C<"> as an embedded quoter:
@result = extract_bracketed( $text, '<>' );
the result would be:
( '<A HREF=">', '>>>">link</A>', "" )
In addition to the quote delimiters C<'>, C<">, and C<`>, full Perl quote-like
quoting (i.e. q{string}, qq{string}, etc) can be specified by including the
letter 'q' as a delimiter. Hence:
@result = extract_bracketed( $text, '<q>' );
would correctly match something like this:
$text = '<leftop: conj /and/ conj>';
See also: C<"extract_quotelike"> and C<"extract_codeblock">.
=head2 C<extract_variable>
C<extract_variable> extracts any valid Perl variable or
variable-involved expression, including scalars, arrays, hashes, array
accesses, hash look-ups, method calls through objects, subroutine calls
through subroutine references, etc.
The subroutine takes up to two optional arguments:
=over 4
=item 1.
A string to be processed (C<$_> if the string is omitted or C<undef>)
=item 2.
A string specifying a pattern to be matched as a prefix (which is to be
skipped). If omitted, optional whitespace is skipped.
=back
On success in a list context, an array of 3 elements is returned. The
elements are:
=over 4
=item [0]
the extracted variable, or variablish expression
=item [1]
the remainder of the input text,
=item [2]
the prefix substring (if any),
=back
On failure, all of these values (except the remaining text) are C<undef>.
In a scalar context, C<extract_variable> returns just the complete
substring that matched a variablish expression. C<undef> is returned on
failure. In addition, the original input text has the returned substring
(and any prefix) removed from it.
In a void context, the input text just has the matched substring (and
any specified prefix) removed.
=head2 C<extract_tagged>
C<extract_tagged> extracts and segments text between (balanced)
specified tags.
The subroutine takes up to five optional arguments:
=over 4
=item 1.
A string to be processed (C<$_> if the string is omitted or C<undef>)
=item 2.
A string specifying a pattern to be matched as the opening tag.
If the pattern string is omitted (or C<undef>) then a pattern
that matches any standard XML tag is used.
=item 3.
A string specifying a pattern to be matched at the closing tag.
If the pattern string is omitted (or C<undef>) then the closing
tag is constructed by inserting a C</> after any leading bracket
characters in the actual opening tag that was matched (I<not> the pattern
that matched the tag). For example, if the opening tag pattern
is specified as C<'{{\w+}}'> and actually matched the opening tag
C<"{{DATA}}">, then the constructed closing tag would be C<"{{/DATA}}">.
=item 4.
A string specifying a pattern to be matched as a prefix (which is to be
skipped). If omitted, optional whitespace is skipped.
=item 5.
A hash reference containing various parsing options (see below)
=back
The various options that can be specified are:
=over 4
=item C<reject =E<gt> $listref>
The list reference contains one or more strings specifying patterns
that must I<not> appear within the tagged text.
For example, to extract
an HTML link (which should not contain nested links) use:
extract_tagged($text, '<A>', '</A>', undef, {reject => ['<A>']} );
=item C<ignore =E<gt> $listref>
The list reference contains one or more strings specifying patterns
that are I<not> be be treated as nested tags within the tagged text
(even if they would match the start tag pattern).
For example, to extract an arbitrary XML tag, but ignore "empty" elements:
extract_tagged($text, undef, undef, undef, {ignore => ['<[^>]*/>']} );
(also see L<"gen_delimited_pat"> below).
=item C<fail =E<gt> $str>
The C<fail> option indicates the action to be taken if a matching end
tag is not encountered (i.e. before the end of the string or some
C<reject> pattern matches). By default, a failure to match a closing
tag causes C<extract_tagged> to immediately fail.
However, if the string value associated with <reject> is "MAX", then
C<extract_tagged> returns the complete text up to the point of failure.
If the string is "PARA", C<extract_tagged> returns only the first paragraph
after the tag (up to the first line that is either empty or contains
only whitespace characters).
If the string is "", the the default behaviour (i.e. failure) is reinstated.
For example, suppose the start tag "/para" introduces a paragraph, which then
continues until the next "/endpara" tag or until another "/para" tag is
encountered:
$text = "/para line 1\n\nline 3\n/para line 4";
extract_tagged($text, '/para', '/endpara', undef,
{reject => '/para', fail => MAX );
# EXTRACTED: "/para line 1\n\nline 3\n"
Suppose instead, that if no matching "/endpara" tag is found, the "/para"
tag refers only to the immediately following paragraph:
$text = "/para line 1\n\nline 3\n/para line 4";
extract_tagged($text, '/para', '/endpara', undef,
{reject => '/para', fail => MAX );
# EXTRACTED: "/para line 1\n"
Note that the specified C<fail> behaviour applies to nested tags as well.
=back
On success in a list context, an array of 6 elements is returned. The elements are:
=over 4
=item [0]
the extracted tagged substring (including the outermost tags),
=item [1]
the remainder of the input text,
=item [2]
the prefix substring (if any),
=item [3]
the opening tag
=item [4]
the text between the opening and closing tags
=item [5]
the closing tag (or "" if no closing tag was found)
=back
On failure, all of these values (except the remaining text) are C<undef>.
In a scalar context, C<extract_tagged> returns just the complete
substring that matched a tagged text (including the start and end
tags). C<undef> is returned on failure. In addition, the original input
text has the returned substring (and any prefix) removed from it.
In a void context, the input text just has the matched substring (and
any specified prefix) removed.
=head2 C<gen_extract_tagged>
(Note: This subroutine is only available under Perl5.005)
C<gen_extract_tagged> generates a new anonymous subroutine which
extracts text between (balanced) specified tags. In other words,
it generates a function identical in function to C<extract_tagged>.
The difference between C<extract_tagged> and the anonymous
subroutines generated by
C<gen_extract_tagged>, is that those generated subroutines:
=over 4
=item *
do not have to reparse tag specification or parsing options every time
they are called (whereas C<extract_tagged> has to effectively rebuild
its tag parser on every call);
=item *
make use of the new qr// construct to pre-compile the regexes they use
(whereas C<extract_tagged> uses standard string variable interpolation
to create tag-matching patterns).
=back
The subroutine takes up to four optional arguments (the same set as
C<extract_tagged> except for the string to be processed). It returns
a reference to a subroutine which in turn takes a single argument (the text to
be extracted from).
In other words, the implementation of C<extract_tagged> is exactly
equivalent to:
sub extract_tagged
{
my $text = shift;
$extractor = gen_extract_tagged(@_);
return $extractor->($text);
}
(although C<extract_tagged> is not currently implemented that way, in order
to preserve pre-5.005 compatibility).
Using C<gen_extract_tagged> to create extraction functions for specific tags
is a good idea if those functions are going to be called more than once, since
their performance is typically twice as good as the more general-purpose
C<extract_tagged>.
=head2 C<extract_quotelike>
C<extract_quotelike> attempts to recognize, extract, and segment any
one of the various Perl quotes and quotelike operators (see
L<perlop(3)>) Nested backslashed delimiters, embedded balanced bracket
delimiters (for the quotelike operators), and trailing modifiers are
all caught. For example, in:
extract_quotelike 'q # an octothorpe: \# (not the end of the q!) #'
extract_quotelike ' "You said, \"Use sed\"." '
extract_quotelike ' s{([A-Z]{1,8}\.[A-Z]{3})} /\L$1\E/; '
extract_quotelike ' tr/\\\/\\\\/\\\//ds; '
the full Perl quotelike operations are all extracted correctly.
Note too that, when using the /x modifier on a regex, any comment
containing the current pattern delimiter will cause the regex to be
immediately terminated. In other words:
'm /
(?i) # CASE INSENSITIVE
[a-z_] # LEADING ALPHABETIC/UNDERSCORE
[a-z0-9]* # FOLLOWED BY ANY NUMBER OF ALPHANUMERICS
/x'
will be extracted as if it were:
'm /
(?i) # CASE INSENSITIVE
[a-z_] # LEADING ALPHABETIC/'
This behaviour is identical to that of the actual compiler.
C<extract_quotelike> takes two arguments: the text to be processed and
a prefix to be matched at the very beginning of the text. If no prefix
is specified, optional whitespace is the default. If no text is given,
C<$_> is used.
In a list context, an array of 11 elements is returned. The elements are:
=over 4
=item [0]
the extracted quotelike substring (including trailing modifiers),
=item [1]
the remainder of the input text,
=item [2]
the prefix substring (if any),
=item [3]
the name of the quotelike operator (if any),
=item [4]
the left delimiter of the first block of the operation,
=item [5]
the text of the first block of the operation
(that is, the contents of
a quote, the regex of a match or substitution or the target list of a
translation),
=item [6]
the right delimiter of the first block of the operation,
=item [7]
the left delimiter of the second block of the operation
(that is, if it is a C<s>, C<tr>, or C<y>),
=item [8]
the text of the second block of the operation
(that is, the replacement of a substitution or the translation list
of a translation),
=item [9]
the right delimiter of the second block of the operation (if any),
=item [10]
the trailing modifiers on the operation (if any).
=back
For each of the fields marked "(if any)" the default value on success is
an empty string.
On failure, all of these values (except the remaining text) are C<undef>.
In a scalar context, C<extract_quotelike> returns just the complete substring
that matched a quotelike operation (or C<undef> on failure). In a scalar or
void context, the input text has the same substring (and any specified
prefix) removed.
Examples:
# Remove the first quotelike literal that appears in text
$quotelike = extract_quotelike($text,'.*?');
# Replace one or more leading whitespace-separated quotelike
# literals in $_ with "<QLL>"
do { $_ = join '<QLL>', (extract_quotelike)[2,1] } until $@;
# Isolate the search pattern in a quotelike operation from $text
($op,$pat) = (extract_quotelike $text)[3,5];
if ($op =~ /[ms]/)
{
print "search pattern: $pat\n";
}
else
{
print "$op is not a pattern matching operation\n";
}
=head2 C<extract_quotelike> and "here documents"
C<extract_quotelike> can successfully extract "here documents" from an input
string, but with an important caveat in list contexts.
Unlike other types of quote-like literals, a here document is rarely
a contiguous substring. For example, a typical piece of code using
here document might look like this:
<<'EOMSG' || die;
This is the message.
EOMSG
exit;
Given this as an input string in a scalar context, C<extract_quotelike>
would correctly return the string "<<'EOMSG'\nThis is the message.\nEOMSG",
leaving the string " || die;\nexit;" in the original variable. In other words,
the two separate pieces of the here document are successfully extracted and
concatenated.
In a list context, C<extract_quotelike> would return the list
=over 4
=item [0]
"<<'EOMSG'\nThis is the message.\nEOMSG\n" (i.e. the full extracted here document,
including fore and aft delimiters),
=item [1]
" || die;\nexit;" (i.e. the remainder of the input text, concatenated),
=item [2]
"" (i.e. the prefix substring -- trivial in this case),
=item [3]
"<<" (i.e. the "name" of the quotelike operator)
=item [4]
"'EOMSG'" (i.e. the left delimiter of the here document, including any quotes),
=item [5]
"This is the message.\n" (i.e. the text of the here document),
=item [6]
"EOMSG" (i.e. the right delimiter of the here document),
=item [7..10]
"" (a here document has no second left delimiter, second text, second right
delimiter, or trailing modifiers).
=back
However, the matching position of the input variable would be set to
"exit;" (i.e. I<after> the closing delimiter of the here document),
which would cause the earlier " || die;\nexit;" to be skipped in any
sequence of code fragment extractions.
To avoid this problem, when it encounters a here document whilst
extracting from a modifiable string, C<extract_quotelike> silently
rearranges the string to an equivalent piece of Perl:
<<'EOMSG'
This is the message.
EOMSG
|| die;
exit;
in which the here document I<is> contiguous. It still leaves the
matching position after the here document, but now the rest of the line
on which the here document starts is not skipped.
To prevent <extract_quotelike> from mucking about with the input in this way
(this is the only case where a list-context C<extract_quotelike> does so),
you can pass the input variable as an interpolated literal:
$quotelike = extract_quotelike("$var");
=head2 C<extract_codeblock>
C<extract_codeblock> attempts to recognize and extract a balanced
bracket delimited substring that may contain unbalanced brackets
inside Perl quotes or quotelike operations. That is, C<extract_codeblock>
is like a combination of C<"extract_bracketed"> and
C<"extract_quotelike">.
C<extract_codeblock> takes the same initial three parameters as C<extract_bracketed>:
a text to process, a set of delimiter brackets to look for, and a prefix to
match first. It also takes an optional fourth parameter, which allows the
outermost delimiter brackets to be specified separately (see below).
Omitting the first argument (input text) means process C<$_> instead.
Omitting the second argument (delimiter brackets) indicates that only C<'{'> is to be used.
Omitting the third argument (prefix argument) implies optional whitespace at the start.
Omitting the fourth argument (outermost delimiter brackets) indicates that the
value of the second argument is to be used for the outermost delimiters.
Once the prefix an dthe outermost opening delimiter bracket have been
recognized, code blocks are extracted by stepping through the input text and
trying the following alternatives in sequence:
=over 4
=item 1.
Try and match a closing delimiter bracket. If the bracket was the same
species as the last opening bracket, return the substring to that
point. If the bracket was mismatched, return an error.
=item 2.
Try to match a quote or quotelike operator. If found, call
C<extract_quotelike> to eat it. If C<extract_quotelike> fails, return
the error it returned. Otherwise go back to step 1.
=item 3.
Try to match an opening delimiter bracket. If found, call
C<extract_codeblock> recursively to eat the embedded block. If the
recursive call fails, return an error. Otherwise, go back to step 1.
=item 4.
Unconditionally match a bareword or any other single character, and
then go back to step 1.
=back
Examples:
# Find a while loop in the text
if ($text =~ s/.*?while\s*\{/{/)
{
$loop = "while " . extract_codeblock($text);
}
# Remove the first round-bracketed list (which may include
# round- or curly-bracketed code blocks or quotelike operators)
extract_codeblock $text, "(){}", '[^(]*';
The ability to specify a different outermost delimiter bracket is useful
in some circumstances. For example, in the Parse::RecDescent module,
parser actions which are to be performed only on a successful parse
are specified using a C<E<lt>defer:...E<gt>> directive. For example:
sentence: subject verb object
<defer: {$::theVerb = $item{verb}} >
Parse::RecDescent uses C<extract_codeblock($text, '{}E<lt>E<gt>')> to extract the code
within the C<E<lt>defer:...E<gt>> directive, but there's a problem.
A deferred action like this:
<defer: {if ($count>10) {$count--}} >
will be incorrectly parsed as:
<defer: {if ($count>
because the "less than" operator is interpreted as a closing delimiter.
But, by extracting the directive using
S<C<extract_codeblock($text, '{}', undef, 'E<lt>E<gt>')>>
the '>' character is only treated as a delimited at the outermost
level of the code block, so the directive is parsed correctly.
=head2 C<extract_multiple>
The C<extract_multiple> subroutine takes a string to be processed and a
list of extractors (subroutines or regular expressions) to apply to that string.
In an array context C<extract_multiple> returns an array of substrings
of the original string, as extracted by the specified extractors.
In a scalar context, C<extract_multiple> returns the first
substring successfully extracted from the original string. In both
scalar and void contexts the original string has the first successfully
extracted substring removed from it. In all contexts
C<extract_multiple> starts at the current C<pos> of the string, and
sets that C<pos> appropriately after it matches.
Hence, the aim of of a call to C<extract_multiple> in a list context
is to split the processed string into as many non-overlapping fields as
possible, by repeatedly applying each of the specified extractors
to the remainder of the string. Thus C<extract_multiple> is
a generalized form of Perl's C<split> subroutine.
The subroutine takes up to four optional arguments:
=over 4
=item 1.
A string to be processed (C<$_> if the string is omitted or C<undef>)
=item 2.
A reference to a list of subroutine references and/or qr// objects and/or
literal strings and/or hash references, specifying the extractors
to be used to split the string. If this argument is omitted (or
C<undef>) the list:
[
sub { extract_variable($_[0], '') },
sub { extract_quotelike($_[0],'') },
sub { extract_codeblock($_[0],'{}','') },
]
is used.
=item 3.
An number specifying the maximum number of fields to return. If this
argument is omitted (or C<undef>), split continues as long as possible.
If the third argument is I<N>, then extraction continues until I<N> fields
have been successfully extracted, or until the string has been completely
processed.
Note that in scalar and void contexts the value of this argument is
automatically reset to 1 (under C<-w>, a warning is issued if the argument
has to be reset).
=item 4.
A value indicating whether unmatched substrings (see below) within the
text should be skipped or returned as fields. If the value is true,
such substrings are skipped. Otherwise, they are returned.
=back
The extraction process works by applying each extractor in
sequence to the text string.
If the extractor is a subroutine it is called in a list context and is
expected to return a list of a single element, namely the extracted
text. It may optionally also return two further arguments: a string
representing the text left after extraction (like $' for a pattern
match), and a string representing any prefix skipped before the
extraction (like $` in a pattern match). Note that this is designed
to facilitate the use of other Text::Balanced subroutines with
C<extract_multiple>. Note too that the value returned by an extractor
subroutine need not bear any relationship to the corresponding substring
of the original text (see examples below).
If the extractor is a precompiled regular expression or a string,
it is matched against the text in a scalar context with a leading
'\G' and the gc modifiers enabled. The extracted value is either
$1 if that variable is defined after the match, or else the
complete match (i.e. $&).
If the extractor is a hash reference, it must contain exactly one element.
The value of that element is one of the
above extractor types (subroutine reference, regular expression, or string).
The key of that element is the name of a class into which the successful
return value of the extractor will be blessed.
If an extractor returns a defined value, that value is immediately
treated as the next extracted field and pushed onto the list of fields.
If the extractor was specified in a hash reference, the field is also
blessed into the appropriate class,
If the extractor fails to match (in the case of a regex extractor), or returns an empty list or an undefined value (in the case of a subroutine extractor), it is
assumed to have failed to extract.
If none of the extractor subroutines succeeds, then one
character is extracted from the start of the text and the extraction
subroutines reapplied. Characters which are thus removed are accumulated and
eventually become the next field (unless the fourth argument is true, in which
case they are discarded).
For example, the following extracts substrings that are valid Perl variables:
@fields = extract_multiple($text,
[ sub { extract_variable($_[0]) } ],
undef, 1);
This example separates a text into fields which are quote delimited,
curly bracketed, and anything else. The delimited and bracketed
parts are also blessed to identify them (the "anything else" is unblessed):
@fields = extract_multiple($text,
[
{ Delim => sub { extract_delimited($_[0],q{'"}) } },
{ Brack => sub { extract_bracketed($_[0],'{}') } },
]);
This call extracts the next single substring that is a valid Perl quotelike
operator (and removes it from $text):
$quotelike = extract_multiple($text,
[
sub { extract_quotelike($_[0]) },
], undef, 1);
Finally, here is yet another way to do comma-separated value parsing:
@fields = extract_multiple($csv_text,
[
sub { extract_delimited($_[0],q{'"}) },
qr/([^,]+)(.*)/,
],
undef,1);
The list in the second argument means:
I<"Try and extract a ' or " delimited string, otherwise extract anything up to a comma...">.
The undef third argument means:
I<"...as many times as possible...">,
and the true value in the fourth argument means
I<"...discarding anything else that appears (i.e. the commas)">.
If you wanted the commas preserved as separate fields (i.e. like split
does if your split pattern has capturing parentheses), you would
just make the last parameter undefined (or remove it).
=head2 C<gen_delimited_pat>
The C<gen_delimited_pat> subroutine takes a single (string) argument and
> builds a Friedl-style optimized regex that matches a string delimited
by any one of the characters in the single argument. For example:
gen_delimited_pat(q{'"})
returns the regex:
(?:\"(?:\\\"|(?!\").)*\"|\'(?:\\\'|(?!\').)*\')
Note that the specified delimiters are automatically quotemeta'd.
A typical use of C<gen_delimited_pat> would be to build special purpose tags
for C<extract_tagged>. For example, to properly ignore "empty" XML elements
(which might contain quoted strings):
my $empty_tag = '<(' . gen_delimited_pat(q{'"}) . '|.)+/>';
extract_tagged($text, undef, undef, undef, {ignore => [$empty_tag]} );
C<gen_delimited_pat> may also be called with an optional second argument,
which specifies the "escape" character(s) to be used for each delimiter.
For example to match a Pascal-style string (where ' is the delimiter
and '' is a literal ' within the string):
gen_delimited_pat(q{'},q{'});
Different escape characters can be specified for different delimiters.
For example, to specify that '/' is the escape for single quotes
and '%' is the escape for double quotes:
gen_delimited_pat(q{'"},q{/%});
If more delimiters than escape chars are specified, the last escape char
is used for the remaining delimiters.
If no escape char is specified for a given specified delimiter, '\' is used.
=head2 C<delimited_pat>
Note that C<gen_delimited_pat> was previously called C<delimited_pat>.
That name may still be used, but is now deprecated.
=head1 DIAGNOSTICS
In a list context, all the functions return C<(undef,$original_text)>
on failure. In a scalar context, failure is indicated by returning C<undef>
(in this case the input text is not modified in any way).
In addition, on failure in I<any> context, the C<$@> variable is set.
Accessing C<$@-E<gt>{error}> returns one of the error diagnostics listed
below.
Accessing C<$@-E<gt>{pos}> returns the offset into the original string at
which the error was detected (although not necessarily where it occurred!)
Printing C<$@> directly produces the error message, with the offset appended.
On success, the C<$@> variable is guaranteed to be C<undef>.
The available diagnostics are:
=over 4
=item C<Did not find a suitable bracket: "%s">
The delimiter provided to C<extract_bracketed> was not one of
C<'()[]E<lt>E<gt>{}'>.
=item C<Did not find prefix: /%s/>
A non-optional prefix was specified but wasn't found at the start of the text.
=item C<Did not find opening bracket after prefix: "%s">
C<extract_bracketed> or C<extract_codeblock> was expecting a
particular kind of bracket at the start of the text, and didn't find it.
=item C<No quotelike operator found after prefix: "%s">
C<extract_quotelike> didn't find one of the quotelike operators C<q>,
C<qq>, C<qw>, C<qx>, C<s>, C<tr> or C<y> at the start of the substring
it was extracting.
=item C<Unmatched closing bracket: "%c">
C<extract_bracketed>, C<extract_quotelike> or C<extract_codeblock> encountered
a closing bracket where none was expected.
=item C<Unmatched opening bracket(s): "%s">
C<extract_bracketed>, C<extract_quotelike> or C<extract_codeblock> ran
out of characters in the text before closing one or more levels of nested
brackets.
=item C<Unmatched embedded quote (%s)>
C<extract_bracketed> attempted to match an embedded quoted substring, but
failed to find a closing quote to match it.
=item C<Did not find closing delimiter to match '%s'>
C<extract_quotelike> was unable to find a closing delimiter to match the
one that opened the quote-like operation.
=item C<Mismatched closing bracket: expected "%c" but found "%s">
C<extract_bracketed>, C<extract_quotelike> or C<extract_codeblock> found
a valid bracket delimiter, but it was the wrong species. This usually
indicates a nesting error, but may indicate incorrect quoting or escaping.
=item C<No block delimiter found after quotelike "%s">
C<extract_quotelike> or C<extract_codeblock> found one of the
quotelike operators C<q>, C<qq>, C<qw>, C<qx>, C<s>, C<tr> or C<y>
without a suitable block after it.
=item C<Did not find leading dereferencer>
C<extract_variable> was expecting one of '$', '@', or '%' at the start of
a variable, but didn't find any of them.
=item C<Bad identifier after dereferencer>
C<extract_variable> found a '$', '@', or '%' indicating a variable, but that
character was not followed by a legal Perl identifier.
=item C<Did not find expected opening bracket at %s>
C<extract_codeblock> failed to find any of the outermost opening brackets
that were specified.
=item C<Improperly nested codeblock at %s>
A nested code block was found that started with a delimiter that was specified
as being only to be used as an outermost bracket.
=item C<Missing second block for quotelike "%s">
C<extract_codeblock> or C<extract_quotelike> found one of the
quotelike operators C<s>, C<tr> or C<y> followed by only one block.
=item C<No match found for opening bracket>
C<extract_codeblock> failed to find a closing bracket to match the outermost
opening bracket.
=item C<Did not find opening tag: /%s/>
C<extract_tagged> did not find a suitable opening tag (after any specified
prefix was removed).
=item C<Unable to construct closing tag to match: /%s/>
C<extract_tagged> matched the specified opening tag and tried to
modify the matched text to produce a matching closing tag (because
none was specified). It failed to generate the closing tag, almost
certainly because the opening tag did not start with a
bracket of some kind.
=item C<Found invalid nested tag: %s>
C<extract_tagged> found a nested tag that appeared in the "reject" list
(and the failure mode was not "MAX" or "PARA").
=item C<Found unbalanced nested tag: %s>
C<extract_tagged> found a nested opening tag that was not matched by a
corresponding nested closing tag (and the failure mode was not "MAX" or "PARA").
=item C<Did not find closing tag>
C<extract_tagged> reached the end of the text without finding a closing tag
to match the original opening tag (and the failure mode was not
"MAX" or "PARA").
=back
=head1 AUTHOR
Damian Conway (damian@conway.org)
=head1 BUGS AND IRRITATIONS
There are undoubtedly serious bugs lurking somewhere in this code, if
only because parts of it give the impression of understanding a great deal
more about Perl than they really do.
Bug reports and other feedback are most welcome.
=head1 COPYRIGHT
Copyright (c) 1997-2001, Damian Conway. All Rights Reserved.
This module is free software. It may be used, redistributed
and/or modified under the same terms as Perl itself.
| leighpauls/k2cro4 | third_party/cygwin/lib/perl5/5.10/Text/Balanced.pm | Perl | bsd-3-clause | 67,673 |
use strict;
use warnings;
use Carp;
package MatrixResult;
sub new {
my $class = shift;
my ($args) = @_;
my $self = {
matrix => $args->{matrix},
sum => $args->{sum},
side => $args->{side},
success => $args->{success}
};
return bless $self, $class;
}
1; | Bigsby/matrix.sum | src/pl/results/matrixResult.pm | Perl | mit | 311 |
package Genesis::Legacy;
use strict;
use warnings;
use Genesis;
use Genesis::UI;
use JSON::PP qw/encode_json decode_json/;
sub same {
my ($a, $b) = @_;
die "Arguments are not arrays" unless ref($a) eq 'ARRAY' && ref($b) eq 'ARRAY';
return 0 unless scalar(@$a) == scalar(@$b);
return 0 unless join(',',map {length} @$a) eq join(',',map {length} @$b);
return 0 unless join("\0", @$a) eq join("\0",@$b);
return 1;
}
sub validate_features {
my ($self, @features) = @_;
my $id = $self->id;
my ($k, $v) = ($self->{name}, $self->{version});
$k ||= "dev";
$v = "latest" unless defined $v;
#### ----
my $label = defined($self->metadata->{subkits}) ? "subkit" : "feature";
for my $sk (@features) {
die "You specified a feature without a name\n" unless $sk;
die "No subkit '$sk' found in kit $id.\n"
if $label eq "subkit" && ! -d $self->path("subkits/$sk");
}
my %requested_features = map { $_ => 1 } @features;
for my $sk (@{$self->metadata->{"${label}s"}}) {
if ($sk->{choices}) {
my $matches = 0;
my ($min_matches,$max_matches) = (1,1);
if (defined $sk->{pick}) {
if ($sk->{pick} =~ /^(?:(\d+)|(\d+)?-(\d+)?)$/) {
($min_matches,$max_matches) = (defined $1) ? ($1,$1) : ($2, $3);
} else {
$self->kit_bug("$sk->{type} pick is invalid");
}
}
my @choices;
for my $choice (@{$sk->{choices}}) {
push @choices, $choice->{$label} if defined $choice && defined $choice->{$label};
if (! defined $choice->{$label}){
$min_matches = 0;
} elsif ($requested_features{$choice->{$label}}) {
$matches++;
}
}
my $choices = join(", ", map { "'$_'" } @choices);
if ($max_matches && $matches > $max_matches) {
die "You selected too many ${label}s for your $sk->{type}. Should be only one of $choices\n";
}
if ($min_matches && $matches < $min_matches) {
die "You must select a $label to provide your $sk->{type}. Should be one of $choices\n";
}
}
}
}
sub process_params {
# $self is a Genesis::Kit object
my ($self, %opts) = @_;
my $env = $opts{env} or die "process_params() was not given an 'env' option.\n";
# for legacy!
$opts{kit} = $self->name;
$opts{version} = $self->version;
$opts{params} = $self->metadata->{params} || {};
$opts{secrets_base} = $env->secrets_base;
my @answers;
my $resolveable_params = {
"params.vault_prefix" => $env->secrets_slug, # for backwards compatibility
"params.vault" => $env->secrets_slug,
"params.env" => $env->name,
};
for my $feature ("base", @{$opts{features}}) {
next unless defined $opts{params}{$feature} && @{$opts{params}{$feature}};
my $defaults = load_yaml_file($self->path($feature eq "base" ? "base/params.yml"
: "subkits/$feature/params.yml"));
for my $q (@{$opts{params}{$feature}}) {
my $answer;
my $vault_path;
# Expand any values from default and examples for vault prefix
foreach (qw(description ask default example validate err_msg)) {
$q->{$_} =~ s/\$\{([^}]*)\}/resolve_params_ref($1,$resolveable_params)/ge if defined($q->{$_});
}
if (defined($q->{validate}) && $q->{validate} eq 'vault_path') {
if (defined($q->{default})) {
while ($q->{default} =~ s#/[^/]+/\.\./#/#) {};
}
if (defined($q->{example})) {
while ($q->{example} =~ s#/[^/]+/\.\./#/#) {};
}
}
if ($q->{ask}) {
$q->{type} ||= "string";
print "\n";
if ($q->{param}) {
print csprintf("#y{Required parameter:} #W{$q->{param}}\n\n");
} else {
$vault_path = "$opts{secrets_base}$q->{vault}";
print csprintf("#y{Secret data required} -- will be stored in Vault under #W{$vault_path}\n\n");
}
chomp $q->{description};
print "$q->{description}\n";
print "(e.g. $q->{example})\n" if defined $q->{example};
if ($q->{param}) {
my $type = $q->{type};
if (defined($q->{validate}) && $q->{validate} =~ /^vault_path(_and_key)?$/ && ! vaulted()) {
print csprintf("#y{Warning:} Cannot validate vault paths when --no-secrets option specified");
}
if ($type eq 'boolean') {
$answer = prompt_for_boolean($q->{ask},$q->{default});
} elsif ($type eq 'string') {
$answer = prompt_for_line($q->{ask},$q->{label},$q->{default},$q->{validate},$q->{err_msg});
} elsif ($type =~ m/^(block|multi-?line)$/) {
$answer = prompt_for_block($q->{ask},$q->{label},$q->{default});
} elsif ($type eq 'list') {
$answer = prompt_for_list('line',$q->{ask},$q->{label},$q->{min_count},$q->{max_count},$q->{validate},$q->{err_msg});
} elsif ($type =~ m/^(block|multi-?line)-list$/) {
$answer = prompt_for_list('block',$q->{ask},$q->{label},$q->{min_count},$q->{max_count});
} elsif ($type =~ m/^(multi-)?choice$/) {
my ($choices,$labels)=([],[]);
if (ref($q->{validate}) eq 'ARRAY') {
foreach (@{$q->{validate}}) {
if (ref($_) eq 'ARRAY') {
push @$choices, $_->[0];
push @$labels, $_->[1];
} else {
push @$choices, $_;
push @$labels, undef;
}
}
}
if ($type eq 'choice') {
$answer = prompt_for_choice($q->{ask},$choices,$q->{default},$labels,$q->{err_msg});
} else {
$answer = prompt_for_choices($q->{ask},$choices,$q->{min_count},$q->{max_count},$labels,$q->{err_msg});
}
} else {
$self->kit_bug("Unsupported type '$type' for parameter '$q->{param}'!!");
}
print "\n";
} else {
my ($path, $key) = split /:/, $vault_path;
if ($q->{type} =~ /^(boolean|string)$/) {
$env->vault->query(
{ interactive => 1, onfailure => "Failed to save data to $vault_path in Vault" },
'prompt', $q->{ask}, '--', ($q->{echo} ? "ask" : "set"), $path, $key
);
} elsif ($q->{type} eq "multi-line") {
$answer = prompt_for_block($q->{ask});
my $tmpdir = workdir;
open my $fh, ">", "$tmpdir/param" or die "Could not write to $tmpdir/param: $!\n";
print $fh $answer;
close $fh;
$env->vault->query(
{ onfailure => "Failed to save data to $vault_path in Vault" },
'set', $path, "${key}\@${tmpdir}/param"
);
} else {
$self->kit_bug("Unsupported parameter type '$q->{type}' for $q->{vault}!!");
}
print "\n";
next;
}
}
my @values;
my $is_default = 0;
if (! $q->{ask}) {
$is_default = 1;
if (defined $q->{param}) {
$q->{params} = [$q->{param}];
}
for my $p (@{$q->{params}}) {
# Should we throw an error here if the default value is
# a spruce operator like (( param ))?
push @values, { $p => $defaults->{params}{$p} };
$resolveable_params->{"params.$p"} = $defaults->{params}{$p};
}
} else {
push @values, { $q->{param} => $answer };
$resolveable_params->{"params.$q->{param}"} = $answer;
}
push @answers, {
comment => $q->{description},
example => $q->{example},
values => \@values,
default => $is_default,
};
}
}
return \@answers;
}
sub dereference_param {
my ($env, $key) = @_;
my $default = undef;
if ($key =~ m/^maybe:/) {
$key =~ s/^maybe://;
$default = "";
}
my $val = $env->lookup($key, $default);
die "Unable to resolve '$key' for ".$env->name.". This must be defined in the environment YAML.\n"
unless defined $val;
return $val;
}
sub dereference_params {
my ($cmd, $env) = @_;
$cmd =~ s/\$\{(.*?)\}/dereference_param($env, $1)/ge;
return $cmd;
}
sub run_param_hook {
my ($env,$params,@features) = @_;
my $hook = $env->kit->path("hooks/params");
return $params unless -f $hook;
chmod(0755,$hook) unless -x $hook;
my $dir = workdir;
my $infile = "$dir/in";
open my $ifh, ">", $infile or die "Unable to write to $infile: $!\n";
print $ifh encode_json($params);
close $ifh;
my $rc = run(
{interactive => 1, env => {
GENESIS => $ENV{GENESIS_CALLBACK_BIN},
GENESIS_ENVIRONMENT_NAME => $env->name,
GENESIS_VAULT_PREFIX => $env->secrets_slug }},
$hook, "$dir/in", "$dir/out", @features
);
die "\nNew environment creation cancelled.\n" if $rc == 130;
die "\nError running params hook for ".$env->kit->id.". Contact your kit author for a bugfix.\n" if $rc;
# FIXME: get a better error message when json fails to load
open my $ofh, "<", "$dir/out";
my @json = <$ofh>;
close $ofh;
return decode_json(join("\n",@json));
}
sub new_environment {
my ($self) = @_;
local %ENV = %ENV;
my %env = $self->get_environment_variables('new');
$ENV{$_} = $env{$_} for (keys %env);
my ($k, $kit, $version) = ($self->{kit}, $self->{kit}->name, $self->{kit}->version);
my $meta = $k->metadata;
$k->run_hook('prereqs', env => $self) if $k->has_hook('prereqs');
my @features = prompt_for_env_features($self);
my $params = process_params($k,
env => $self,
features => \@features,
);
$params = run_param_hook($self, $params, @features);
## create the environment file
my $file = "$self->{name}.yml";
my ($parent, %existing_info);
if ($self->{name} =~ m/-/) { # multi-level environment; make/use a top-level
($parent = $file) =~ s/-.*\.yml/.yml/;
if (-e $parent) {
explain("Using existing #C{$parent} file as base config.");
%existing_info = %{load_yaml_file($parent)};
}
}
open my $fh, ">", $file or die "Couldn't write to $file: $!";
print $fh "---";
print $fh "\nkit:\n" if (
!%existing_info ||
$existing_info{kit}{name} ne $kit ||
$existing_info{kit}{version} ne $version ||
!same($existing_info{kit}{features}||[],\@features)
);
if (!%existing_info || $existing_info{kit}{name} ne $kit) {
print $fh " name: $kit\n";
error "#y{WARNING:} $parent specifies a different kit name ($existing_info{kit}{name})"
if %existing_info;
}
if (!%existing_info || $existing_info{kit}{version} ne $version) {
print $fh " version: $version\n";
error "#y{WARNING:} $parent specifies a different kit version ($existing_info{kit}{version})"
if %existing_info;
}
print $fh " features:\n";
print $fh " - (( replace ))\n";
print $fh " - $_\n" foreach (@features);
# genesis block
my $genesis_out = '';
$genesis_out .= sprintf " env: %s\n",$self->name;
$genesis_out .= sprintf " bosh_env: %s\n", $ENV{BOSH_ALIAS}
if $ENV{BOSH_ALIAS} && ($ENV{BOSH_ALIAS} ne $ENV{GENESIS_ENVIRONMENT});
$genesis_out .= sprintf " min_version: %s\n",$ENV{GENESIS_MIN_VERSION}
if $ENV{GENESIS_MIN_VERSION};
$genesis_out .= sprintf " secrets_path: %s\n",$ENV{GENESIS_SECRETS_SLUG}
if $ENV{GENESIS_SECRETS_SLUG_OVERRIDE};
$genesis_out .= sprintf " root_ca_path: %s\n",$ENV{GENESIS_ENV_ROOT_CA_PATH}
if $ENV{GENESIS_ENV_ROOT_CA_PATH};
$genesis_out .= sprintf " secrets_mount: %s\n",$ENV{GENESIS_SECRETS_MOUNT}
if $ENV{GENESIS_SECRETS_MOUNT_OVERRIDE};
$genesis_out .= sprintf " exodus_mount: %s\n",$ENV{GENESIS_EXODUS_MOUNT}
if $ENV{GENESIS_EXODUS_MOUNT_OVERRIDE};
$genesis_out .= sprintf " ci_mount: %s\n",$ENV{GENESIS_CI_MOUNT}
if $ENV{GENESIS_CI_MOUNT_OVERRIDE};
$genesis_out .= sprintf " credhub_exodus_env: %s\n",$ENV{GENESIS_CREDHUB_EXODUS_SOURCE_OVERRIDE}
if $ENV{GENESIS_CREDHUB_EXODUS_SOURCE_OVERRIDE};
my $overpad = [sort {length($a) <=> length($b)} ($genesis_out =~ /:\s+/g)]->[0];
$genesis_out =~ s/$overpad/: /g;
print $fh "\ngenesis:\n$genesis_out";
my $params_out = '';
for my $param (@$params) {
$params_out .= "\n";
my $indent = " # ";
if (defined $param->{comment}) {
for my $line (split /\n/, $param->{comment}) {
$params_out .= "${indent}$line\n";
}
}
if (defined $param->{example}) {
$params_out .= "${indent}(e.g. $param->{example})\n";
}
$indent = $param->{default} ? " #" : " ";
for my $val (@{$param->{values}}) {
my $k = (keys(%$val))[0];
# if the value is a spruce operator, we know it's a string, and don't need fancy encoding of the value
# this helps us not run into issues resolving the operator
my $v = $val->{$k};
if (defined $v && ! ref($v) && $v =~ m/^\(\(.*\)\)$/) {
$params_out .= "${indent}$k: $v\n";
next;
}
my $tmpdir = workdir;
open my $tmpfile, ">", "$tmpdir/value_formatting";
print $tmpfile encode_json($val);
close $tmpfile;
open my $spruce, "-|", "spruce merge $tmpdir/value_formatting";
for my $line (<$spruce>) {
chomp $line;
next unless $line;
next if $line eq "---";
$params_out .= "${indent}$line\n";
}
close $spruce;
die "Unable to convert JSON to spruce-compatible YAML. This is a bug\n"
if $? >> 8;
}
}
$params_out ||= " {}\n";
print $fh "\nparams:$params_out";
close $fh;
explain("Created #C{$file} environment file\n");
}
sub prompt_for_env_features {
my ($self) = @_;
my ($kit, $version) = ($self->{kit}{name}, $self->{kit}{version});
my $meta = $self->{kit}->metadata;
my @features;
my $features_meta = $meta->{features} || $meta->{subkits} || [];
my @meta_key = (defined $meta->{features}) ? 'feature' : 'subkit';
foreach my $feature (@$features_meta) {
my $prompt = $feature->{prompt}."\n";
if (exists $feature->{choices}) {
my (@choices,@labels,$default);
foreach (@{$feature->{choices}}) {
push @choices, $_->{feature} || $_->{subkit};
push @labels, $_->{label};
$default = ($_->{feature} || $_->{subkit}) if $_->{default} && $_->{default} =~ /^(y(es)?|t(rue)?|1)$/i;
}
if (exists $feature->{pick}) {
$feature->{pick} =~ /^\d+(-\d+)?$/
or $self->kit_bug("$feature->{type} pick invalid!!");
my ($min, $max) = ($feature->{pick} =~ /-/)
? split('-',$feature->{pick})
: ($feature->{pick},$feature->{pick});
my $selections = grep {$_} prompt_for_choices($prompt,\@choices,$min,$max,\@labels);
push @features, @$selections;
} else {
push @features, grep {$_} (prompt_for_choice($prompt,\@choices,$default,\@labels));
}
} else {
push(@features, ($feature->{feature} || $feature->{subkit})) if prompt_for_boolean($prompt,$feature->{default});
}
}
if ($self->{kit}->has_hook('subkits')) {
@features = $self->{kit}->run_hook('subkit', features => \@features);
}
Genesis::Legacy::validate_features($self->{kit}, @features);
return @features;
}
sub resolve_params_ref {
my ($key,$references) = @_;
die("\$\{$key\} referenced but not found -- perhaps it hasn't been defined yet. Contact your Kit author for a bugfix.\n")
unless exists($references->{$key});
return $references->{$key};
}
1;
| starkandwayne/genesis | lib/Genesis/Legacy.pm | Perl | mit | 14,458 |
package OpenCloset::Donation::Controller::Clothes;
use Mojo::Base 'Mojolicious::Controller';
use Data::Pageset;
use List::Util qw/uniq/;
use Path::Tiny;
use Try::Tiny;
use OpenCloset::Constants::Category;
use OpenCloset::Constants::Status qw/$REPAIR $RECYCLE_1 $RECYCLE_2 $RECYCLE_3 $UNRECYCLE/;
has schema => sub { shift->app->schema };
=head1 METHODS
=head2 add
# clothes.add
GET /users/:id/donations/:donation_id/clothes/new
=cut
sub add {
my $self = shift;
my $user = $self->stash('user');
my $donation = $self->stash('donation');
my $form = $donation->donation_forms->next;
my $available = $donation->clothes( { status_id => { 'NOT IN' => [ $RECYCLE_1, $RECYCLE_2, $RECYCLE_3, $UNRECYCLE ] } } );
my $recycle1 = $donation->clothes( { status_id => $RECYCLE_1 } );
my $recycle2 = $donation->clothes( { status_id => $RECYCLE_2 } );
my $recycle3 = $donation->clothes( { status_id => $RECYCLE_3 } );
my $unrecycle = $donation->clothes( { status_id => $UNRECYCLE } );
my $all_clothes = $donation->clothes;
my $msg = $self->render_to_string(
'sms/clothes_info',
format => 'txt',
all => $all_clothes,
available => $available,
recycle1 => $recycle1,
recycle2 => $recycle2,
recycle3 => $recycle3,
unrecycle => $unrecycle
);
chomp $msg;
my @tags = $self->schema->resultset('Tag')->search->all;
$self->render(
form => $form,
available => $available,
recycle1 => $recycle1,
recycle2 => $recycle2,
recycle3 => $recycle3,
unrecycle => $unrecycle,
sms_body => $msg,
tags => \@tags,
);
}
=head2 create
# clothes.create
POST /users/:id/donations/:donation_id/clothes
=cut
sub create {
my $self = shift;
my $user = $self->stash('user');
my $donation = $self->stash('donation');
my @categories = @OpenCloset::Constants::Category::ALL;
my $v = $self->validation;
$v->required('discard');
my $discard = $v->param('discard');
if ($discard) {
$v->optional('code');
$v->optional('gender');
}
else {
$v->required('code')->like(qr/^[a-zA-Z0-9]{4,5}$/);
$v->required('gender')->in(qw/male female unisex/);
}
$v->required('status-id');
$v->required('category')->in(@categories);
$v->optional('color');
$v->optional('photo')->upload;
## TODO: category 별 size validation
$v->optional('neck')->size( 2, 3 );
$v->optional('bust')->size( 2, 3 );
$v->optional('waist')->size( 2, 3 );
$v->optional('hip')->size( 2, 3 );
$v->optional('topbelly')->size( 2, 3 );
$v->optional('belly')->size( 2, 3 );
$v->optional('arm')->size( 1, 3 );
$v->optional('thigh')->size( 2, 3 );
$v->optional('length')->size( 2, 3 );
$v->optional('foot')->size( 3, 3 );
$v->optional('cuff')->like(qr/^\d{1,3}(\.)?(\d{1,2})?$/);
$v->optional('comment');
$v->optional('quantity')->like(qr/^\d+$/);
$v->optional('tags');
if ( $v->has_error ) {
my $failed = $v->failed;
return $self->error( 400, 'Parameter Validation Failed: ' . join( ', ', @$failed ) );
}
my $status_id = $v->param('status-id');
my $category = $v->param('category');
my $code = $v->param('code') || '';
my $gender = $v->param('gender');
my $color = $v->param('color');
my $neck = $v->param('neck');
my $bust = $v->param('bust');
my $waist = $v->param('waist');
my $hip = $v->param('hip');
my $topbelly = $v->param('topbelly');
my $belly = $v->param('belly');
my $arm = $v->param('arm');
my $thigh = $v->param('thigh');
my $cuff = $v->param('cuff');
my $length = $v->param('length') || $v->param('foot');
my $comment = $v->param('comment');
my $quantity = $v->param('quantity');
my $tags = $v->every_param('tags');
if ( $self->schema->resultset('Clothes')->find( { code => sprintf( '%05s', $code ) } ) ) {
return $self->error( 400, "Duplicate clothes code: $code" );
}
my $input = {
donation_id => $donation->id,
status_id => $status_id,
code => $code,
neck => $neck,
bust => $bust,
waist => $waist,
hip => $hip,
topbelly => $topbelly,
belly => $belly,
arm => $arm,
thigh => $thigh,
length => $length,
cuff => $cuff,
color => $color,
gender => $gender,
category => $category,
price => $OpenCloset::Constants::Category::PRICE{$category},
comment => $comment
};
if ( $discard && $quantity ) {
## transaction
my $guard = $self->schema->txn_scope_guard;
my ( $success, $error ) = try {
for ( 1 .. $quantity ) {
my $success = $self->_create_clothes( $donation, $discard, $input, $tags );
die "Failed to create clothes($quantity)" unless $success;
}
$guard->commit;
return 1;
}
catch {
my $err = $_;
$self->log->error("Transaction error: clothes.create");
return ( undef, $err );
};
return $self->error( 500, $error ) unless $success;
}
else {
my $guard = $self->schema->txn_scope_guard;
my ( $success, $error ) = try {
my $success = $self->_create_clothes( $donation, $discard, $input, $tags );
die "Failed to create a clothes" unless $success;
$guard->commit;
return 1;
}
catch {
my $err = $_;
$self->log->error("Transaction error: clothes.create");
return ( undef, $err );
};
return $self->error( 500, $error ) unless $success;
## upload photo
my $photo = $v->param('photo');
if ( $photo->size ) {
my $temp = Path::Tiny->tempfile( UNLINK => 0, DIR => './db' );
$photo->move_to("$temp");
$self->minion->enqueue( upload_clothes_photo => [ $code, $temp ] );
}
}
$self->redirect_to('clothes.add');
}
=head2 repair_list
# repair_clothes
GET /clothes/repair
=cut
sub repair_list {
my $self = shift;
my $page = $self->param('p') || 1;
my $q = $self->param('q');
my $alteration_at = $self->param('alteration_at');
my $session = $self->session;
my $cond;
my $attr = {
rows => 15,
page => $page,
order_by => [qw/repair_clothes.done repair_clothes.alteration_at/],
join => 'repair_clothes'
};
## TODO: cookie 를 공유하기 때문에 service 별 namespace 를 붙이는 것이 좋겠다
if ($q) {
$q = sprintf( '%05s', uc $q );
unless ( $q =~ /^0[JPK]/ ) {
$cond = { code => $q };
}
else {
my @repair_list = uniq( @{ $session->{donation}{repair_list} ||= [] }, $q );
$session->{donation}{repair_list} = [@repair_list];
$cond = { code => { -in => [@repair_list] } };
}
}
elsif ($alteration_at) {
$cond = { 'repair_clothes.alteration_at' => $alteration_at };
}
else {
delete $session->{donation}{repair_list};
$cond = {
-and => [
category => { -in => [ $PANTS, $SKIRT ] },
-or => [
status_id => $REPAIR,
'repair_clothes.id' => { '!=' => undef },
]
]
};
}
my $rs = $self->schema->resultset('Clothes')->search( $cond, $attr );
my $pageset = Data::Pageset->new(
{
total_entries => $rs->pager->total_entries,
entries_per_page => $rs->pager->entries_per_page,
pages_per_set => 5,
current_page => $page,
}
);
my $summary = $self->schema->resultset('RepairClothes')->search(
{
alteration_at => { '!=' => undef },
-or => [
{ done => undef },
{ done => 1 },
]
},
{
select => [
'alteration_at',
{ count => '*', -as => 'sum' }
],
group_by => 'alteration_at'
}
);
$self->render( clothes => $rs, pageset => $pageset, summary => $summary );
}
=sub _create_clothes
$self->_create_clothes($donation, $discard, $input, $tags?)
=cut
sub _create_clothes {
my ( $self, $donation, $discard, $input, $tags ) = @_;
my $code = $input->{code};
my $category = $input->{category};
$code = $self->generate_discard_code($category) if $discard;
$code = sprintf( '%05s', uc $code );
die "Failed to generate discard clothes code($category)\n" unless $code;
my $clothes = $self->schema->resultset('Clothes')->find( { code => $code } );
die "Duplicate clothes code: $code\n" if $clothes;
$input->{code} = $code;
my $group_id;
if ( my $clothes = $donation->clothes->next ) {
$group_id = $clothes->group_id;
}
unless ($group_id) {
my $group = $self->schema->resultset('Group')->create( {} );
$group_id = $group->id;
}
$input->{group_id} = $group_id;
$clothes = $self->schema->resultset('Clothes')->create($input);
die "Failed to create a new clothes\n" unless $clothes;
my $status_id = $input->{status_id};
if ( "$RECYCLE_1 $RECYCLE_2 $RECYCLE_3 $UNRECYCLE" =~ m/\b$status_id\b/ ) {
my $clothes_code = $self->schema->resultset('ClothesCode')->find( { category => $category } );
die "Not found clothes category in ClothesCode: $category\n" unless $clothes_code;
$clothes_code->update( { code => sprintf( '%05s', $code ) } );
}
for my $name (@$tags) {
my $tag = $self->schema->resultset('Tag')->find_or_create( { name => $name } );
$clothes->create_related( 'clothes_tags', { tag_id => $tag->id } );
}
return 1;
}
=head2 tags
# clothes.tags
GET /tags
=cut
sub tags {
my $self = shift;
my $tags = $self->schema->resultset('Tag')->search( undef, { order_by => 'name' } );
$self->render( tags => $tags );
}
1;
| opencloset/donation | lib/OpenCloset/Donation/Controller/Clothes.pm | Perl | mit | 10,505 |
#!/usr/bin/perl
my @array=split(/\s*,\s*/,'');
print "$#array\n";
| holtzermann17/Noosphere | bin/test/testarray.pl | Perl | mit | 68 |
package MARC::Field;
use strict;
use integer;
use Carp;
use constant SUBFIELD_INDICATOR => "\x1F";
use constant END_OF_FIELD => "\x1E";
use vars qw( $ERROR );
=head1 NAME
MARC::Field - Perl extension for handling MARC fields
=head1 SYNOPSIS
use MARC::Field;
my $field = MARC::Field->new( 245, '1', '0',
'a' => 'Raccoons and ripe corn / ',
'c' => 'Jim Arnosky.'
);
$field->add_subfields( "a", "1st ed." );
=head1 DESCRIPTION
Defines MARC fields for use in the MARC::Record module. I suppose
you could use them on their own, but that wouldn't be very interesting.
=head1 EXPORT
None by default. Any errors are stored in C<$MARC::Field::ERROR>, which
C<$MARC::Record> usually bubbles up to C<$MARC::Record::ERROR>.
=head1 METHODS
=head2 new()
The constructor, which will return a MARC::Field object. Typically you will
pass in the tag number, indicator 1, indicator 2, and then a list of any
subfield/data pairs. For example:
my $field = MARC::Field->new(
245, '1', '0',
'a' => 'Raccoons and ripe corn / ',
'c' => 'Jim Arnosky.'
);
Or if you want to add a field < 010 that does not have indicators.
my $field = MARC::Field->new( '001', ' 14919759' );
=cut
sub new {
my $class = shift;
$class = $class;
## MARC spec indicates that tags can have alphabetical
## characters in them! If they do appear we assume that
## they have indicators like tags > 010
my $tagno = shift;
($tagno =~ /^[0-9A-Za-z]{3}$/)
or croak( "Tag \"$tagno\" is not a valid tag." );
my $is_control = (($tagno =~ /^\d+$/) && ($tagno < 10));
my $self = bless {
_tag => $tagno,
_warnings => [],
_is_control_field => $is_control,
}, $class;
if ( $is_control ) {
$self->{_data} = shift;
} else {
for my $indcode ( qw( _ind1 _ind2 ) ) {
my $indicator = shift;
if ( $indicator !~ /^[0-9A-Za-z ]$/ ) {
$self->_warn( "Invalid indicator \"$indicator\" forced to blank" ) unless ($indicator eq "");
$indicator = " ";
}
$self->{$indcode} = $indicator;
} # for
(@_ >= 2)
or croak( "Field $tagno must have at least one subfield" );
# Normally, we go thru add_subfields(), but internally we can cheat
$self->{_subfields} = [@_];
}
return $self;
} # new()
=head2 tag()
Returns the three digit tag for the field.
=cut
sub tag {
my $self = shift;
return $self->{_tag};
}
=head2 indicator(indno)
Returns the specified indicator. Returns C<undef> and sets
C<$MARC::Field::ERROR> if the I<indno> is not 1 or 2, or if
the tag doesn't have indicators.
=cut
sub indicator($) {
my $self = shift;
my $indno = shift;
$self->_warn( "Fields below 010 do not have indicators" )
if $self->is_control_field;
if ( $indno == 1 ) {
return $self->{_ind1};
} elsif ( $indno == 2 ) {
return $self->{_ind2};
} else {
croak( "Indicator number must be 1 or 2" );
}
}
=head2 is_control_field()
Tells whether this field is one of the control tags from 001-009.
=cut
sub is_control_field {
my $self = shift;
return $self->{_is_control_field};
}
=head2 subfield(code)
When called in a scalar context returns the text from the first subfield
matching the subfield code.
my $subfield = $field->subfield( 'a' );
Or if you think there might be more than one you can get all of them by
calling in a list context:
my @subfields = $field->subfield( 'a' );
If no matching subfields are found, C<undef> is returned in a scalar context
and an empty list in a list context.
If the tag is less than an 010, C<undef> is returned and
C<$MARC::Field::ERROR> is set.
=cut
sub subfield {
my $self = shift;
my $code_wanted = shift;
croak( "Fields below 010 do not have subfields, use data()" )
if $self->is_control_field;
my @data = @{$self->{_subfields}};
my @found;
while ( defined( my $code = shift @data ) ) {
if ( $code eq $code_wanted ) {
push( @found, shift @data );
} else {
shift @data;
}
}
if ( wantarray() ) { return @found; }
return( $found[0] );
}
=head2 subfields()
Returns all the subfields in the field. What's returned is a list of
list refs, where the inner list is a subfield code and the subfield data.
For example, this might be the subfields from a 245 field:
(
[ 'a', 'Perl in a nutshell :' ],
[ 'b', 'A desktop quick reference.' ],
)
=cut
sub subfields {
my $self = shift;
$self->_warn( "Fields below 010 do not have subfields" )
if $self->is_control_field;
my @list;
my @data = @{$self->{_subfields}};
while ( defined( my $code = shift @data ) ) {
push( @list, [$code, shift @data] );
}
return @list;
}
=head2 data()
Returns the data part of the field, if the tag number is less than 10.
=cut
sub data {
my $self = shift;
croak( "data() is only for tags less than 010, use subfield()" )
unless $self->is_control_field;
$self->{_data} = $_[0] if @_;
return $self->{_data};
}
=head2 add_subfields(code,text[,code,text ...])
Adds subfields to the end of the subfield list.
$field->add_subfields( 'c' => '1985' );
Returns the number of subfields added, or C<undef> if there was an error.
=cut
sub add_subfields {
my $self = shift;
croak( "Subfields are only for tags >= 10" )
if $self->is_control_field;
push( @{$self->{_subfields}}, @_ );
return @_/2;
}
=head2 delete_subfield()
delete_subfield() allows you to remove subfields from a field:
# delete any subfield a in the field
$field->delete_subfield(code => 'a');
# delete any subfield a or u in the field
$field->delete_subfield(code => ['a', 'u']);
If you want to only delete subfields at a particular position you can
use the pos parameter:
# delete subfield u at the first position
$field->delete_subfield(code => 'u', pos => 0);
# delete subfield u at first or second position
$field->delete_subfield(code => 'u', pos => [0,1]);
You can specify a regex to for only deleting subfields that match:
# delete any subfield u that matches zombo.com
$field->delete_subfield(code => 'u', match => qr/zombo.com/);
=cut
sub delete_subfield {
my ($self, %options) = @_;
my $codes = _normalize_arrayref($options{code});
my $positions = _normalize_arrayref($options{'pos'});
my $match = $options{match};
croak 'match must be a compiled regex'
if $match and ref($match) ne 'Regexp';
my @current_subfields = @{$self->{_subfields}};
my @new_subfields = ();
my $removed = 0;
my $subfield_num = $[ - 1; # users $[ preferences control indexing
while (@current_subfields > 0) {
$subfield_num += 1;
my $subfield_code = shift @current_subfields;
my $subfield_value = shift @current_subfields;
if ((@$codes==0 or grep {$_ eq $subfield_code} @$codes)
and (!$match or $subfield_value =~ $match)
and (@$positions==0 or grep {$_ == $subfield_num} @$positions)) {
$removed += 1;
next;
}
push( @new_subfields, $subfield_code, $subfield_value);
}
$self->{_subfields} = \@new_subfields;
return $removed;
}
=head2 delete_subfields()
Delete all subfields with a given subfield code. This is here for backwards
compatability, you should use the more flexible delete_subfield().
=cut
sub delete_subfields {
my ($self, $code) = @_;
return $self->delete_subfield(code => $code);
}
=head2 update()
Allows you to change the values of the field. You can update indicators
and subfields like this:
$field->update( ind2 => '4', a => 'The ballad of Abe Lincoln');
If you attempt to update a subfield which does not currently exist in the field,
then a new subfield will be appended to the field. If you don't like this
auto-vivification you must check for the existence of the subfield prior to
update.
if ( $field->subfield( 'a' ) ) {
$field->update( 'a' => 'Cryptonomicon' );
}
If you want to update a field that has no indicators or subfields (000-009)
just call update() with one argument, the string that you would like to
set the field to.
$field = $record->field( '003' );
$field->update('IMchF');
Note: when doing subfield updates be aware that C<update()> will only
update the first occurrence. If you need to do anything more complicated
you will probably need to create a new field and use C<replace_with()>.
Returns the number of items modified.
=cut
sub update {
my $self = shift;
## tags 000 - 009 don't have indicators or subfields
if ( $self->is_control_field ) {
$self->{_data} = shift;
return(1);
}
## otherwise we need to update subfields and indicators
my @data = @{$self->{_subfields}};
my $changes = 0;
while ( @_ ) {
my $arg = shift;
my $val = shift;
## indicator update
if ($arg =~ /^ind[12]$/) {
$self->{"_$arg"} = $val;
$changes++;
}
## subfield update
else {
my $found = 0;
## update existing subfield
for ( my $i=0; $i<@data; $i+=2 ) {
if ($data[$i] eq $arg) {
$data[$i+1] = $val;
$found = 1;
$changes++;
last;
}
} # for
## append new subfield
if ( !$found ) {
push( @data, $arg, $val );
$changes++;
}
}
} # while
## synchronize our subfields
$self->{_subfields} = \@data;
return($changes);
}
=head2 replace_with()
Allows you to replace an existing field with a new one. You need to pass
C<replace()> a MARC::Field object to replace the existing field with. For
example:
$field = $record->field('245');
my $new_field = new MARC::Field('245','0','4','The ballad of Abe Lincoln.');
$field->replace_with($new_field);
Doesn't return a meaningful or reliable value.
=cut
sub replace_with {
my ($self,$new) = @_;
ref($new) =~ /^MARC::Field$/
or croak("Must pass a MARC::Field object");
%$self = %$new;
}
=head2 as_string( [$subfields] )
Returns a string of all subfields run together. A space is added to
the result between each subfield. The tag number and subfield
character are not included.
Subfields appear in the output string in the order in which they
occur in the field.
If C<$subfields> is specified, then only those subfields will be included.
my $field = MARC::Field->new(
245, '1', '0',
'a' => 'Abraham Lincoln',
'h' => '[videorecording] :',
'b' => 'preserving the union /',
'c' => 'A&E Home Video.'
);
print $field->as_string( 'abh' ); # Only those three subfields
# prints 'Abraham Lincoln [videorecording] : preserving the union /'.
Note that subfield h comes before subfield b in the output.
=cut
sub as_string() {
my $self = shift;
my $subfields = shift;
if ( $self->is_control_field ) {
return $self->{_data};
}
my @subs;
my $subs = $self->{_subfields};
my $nfields = @$subs / 2;
for my $i ( 1..$nfields ) {
my $offset = ($i-1)*2;
my $code = $subs->[$offset];
my $text = $subs->[$offset+1];
push( @subs, $text ) if !$subfields || $code =~ /^[$subfields]$/;
} # for
return join( " ", @subs );
}
=head2 as_formatted()
Returns a pretty string for printing in a MARC dump.
=cut
sub as_formatted() {
my $self = shift;
my @lines;
if ( $self->is_control_field ) {
push( @lines, sprintf( "%03s %s", $self->{_tag}, $self->{_data} ) );
} else {
my $hanger = sprintf( "%03s %1.1s%1.1s", $self->{_tag}, $self->{_ind1}, $self->{_ind2} );
my $subs = $self->{_subfields};
my $nfields = @$subs / 2;
my $offset = 0;
for my $i ( 1..$nfields ) {
push( @lines, sprintf( "%-6.6s _%1.1s%s", $hanger, $subs->[$offset++], $subs->[$offset++] ) );
$hanger = "";
} # for
}
return join( "\n", @lines );
}
=head2 as_usmarc()
Returns a string for putting into a USMARC file. It's really only
useful by C<MARC::Record::as_usmarc()>.
=cut
sub as_usmarc() {
my $self = shift;
# Tags < 010 are pretty easy
if ( $self->is_control_field ) {
return $self->data . END_OF_FIELD;
} else {
my @subs;
my @subdata = @{$self->{_subfields}};
while ( @subdata ) {
push( @subs, join( "", SUBFIELD_INDICATOR, shift @subdata, shift @subdata ) );
} # while
return
join( "",
$self->indicator(1),
$self->indicator(2),
@subs,
END_OF_FIELD, );
}
}
=head2 clone()
Makes a copy of the field. Note that this is not just the same as saying
my $newfield = $field;
since that just makes a copy of the reference. To get a new object, you must
my $newfield = $field->clone;
Returns a MARC::Field record.
=cut
sub clone {
my $self = shift;
my $tagno = $self->{_tag};
my $is_control = (($tagno =~ /^\d+$/) && ($tagno < 10));
my $clone =
bless {
_tag => $tagno,
_warnings => [],
_is_control_field => $is_control,
}, ref($self);
if ( $is_control ) {
$clone->{_data} = $self->{_data};
} else {
$clone->{_ind1} = $self->{_ind1};
$clone->{_ind2} = $self->{_ind2};
$clone->{_subfields} = [@{$self->{_subfields}}];
}
return $clone;
}
=head2 warnings()
Returns the warnings that were created when the record was read.
These are things like "Invalid indicators converted to blanks".
The warnings are items that you might be interested in, or might
not. It depends on how stringently you're checking data. If
you're doing some grunt data analysis, you probably don't care.
=cut
sub warnings() {
my $self = shift;
return @{$self->{_warnings}};
}
# NOTE: _warn is an object method
sub _warn($) {
my $self = shift;
push( @{$self->{_warnings}}, join( "", @_ ) );
}
sub _gripe(@) {
$ERROR = join( "", @_ );
warn $ERROR;
return;
}
sub _normalize_arrayref {
my $ref = shift;
if (ref($ref) eq 'ARRAY') { return $ref }
elsif (defined $ref) { return [$ref] }
return [];
}
1;
__END__
=head1 SEE ALSO
See the "SEE ALSO" section for L<MARC::Record>.
=head1 TODO
See the "TODO" section for L<MARC::Record>.
=cut
=head1 LICENSE
This code may be distributed under the same terms as Perl itself.
Please note that these modules are not products of or supported by the
employers of the various contributors to the code.
=head1 AUTHOR
Andy Lester, C<< <andy@petdance.com> >>
=cut
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/MARC/Field.pm | Perl | mit | 15,124 |
#!/usr/bin/perl
use warnings;
use strict;
open( IN1, $ARGV[0] ) || die "Input file error.\n";
# Given: Positive integers n<=100 and m<=20.
# Return: The total number of pairs of rabbits that will remain after the
# n-th month (n<=100) if all rabbits live for m months (m<=20).
sub fib { # (n, n-1)
$_[0] + $_[1]; # Finds how many in next generation
}
our @ar = split( " ", <IN1> ); # (month, life expectancy)
our $month = 1; # start month
our @rabbits = (0, 0, 1, 0); # (babyPast, AdultPast, babyCurr, AdultCurr)
our @hist; # history array
my $n = 1; # used to create rabbit history
while( $n++ < $ar[1] ) { # creates list length = num of months rabbits live
push( @hist, 0 ); # creating history of rabbits
}
# print "@hist\n"; # test the creation of history variable
while( $month++ < $ar[0] ) {
$rabbits[0] = $rabbits[2]; # changing past baby
$rabbits[1] = $rabbits[3]; # changing past adult
$rabbits[2] = $rabbits[1]; # changing current baby
$rabbits[3] = $rabbits[0] + $rabbits[1]; # changing current adults
my $add = shift( @hist ); # the rabbit adjustment
push( @hist, $rabbits[0] ); #changing rabbit baby history
$rabbits[3] -= $add; # adjust for rabbits dying
# print "@rabbits\n"; # debugging
# print "@hist\n\n"; # debugging
}
close( IN1 );
my $total = $rabbits[2] + $rabbits[3];
print "$total\n";
| erictleung/rosalind-programming | bioinformatics-stronghold/FIBD_Mortal_Fibonacci_Rabbits/mortalFibRabbits.pl | Perl | mit | 1,364 |
#('Alice', 'Bob') という配列を作って出力してみましょう。
#関数を使って 'Alice' を取り出して出力してみましょう。
#関数を使って 'Bob' を取り出して出力してみましょう。
#1.の配列を元に、関数を使って ('Zappa', 'Alice', 'Bob', 'Chris') という配列を作って出力してみましょう。
use strict;
use warnings;
use utf8;
my @array = ('Alice', 'Bob');
print "@array\n";
print "--------------------\n";
my @array2 = ('Alice', 'Bob');
my $alice = shift @array2;
print "@array2\n";
print "$alice\n";
print "--------------------\n";
my @array3 = ('Alice', 'Bob');
my $bob = pop @array3;
print "@array3\n";
print "$bob\n";
print "--------------------\n";
my @array4 = ('Alice', 'Bob');
unshift @array4, 'Zappa';
push @array4, 'Chris';
print "@array4\n";
| perl-entrance-org/workshop-2014-03 | code/tokyo2/array.pl | Perl | mit | 842 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.