code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
use aliased 'My::App::Contact';
use aliased 'My::App::Type::Contact' => 'ContactType';
my $contact_type = ContactType->new();
my $contact = Contact->new({ type => $contact_type });
| jmcveigh/komodo-tools | scripts/perl/shorten_long_class_names/alias_to_another_name.pl | Perl | bsd-2-clause | 187 |
package Zonemaster::Test::Syntax v1.0.1;
use strict;
use warnings;
use 5.014002;
use Zonemaster;
use Zonemaster::Util;
use Zonemaster::Recursor;
use Zonemaster::DNSName;
use Zonemaster::TestMethods;
use Zonemaster::Constants qw[:name];
use Carp;
use List::MoreUtils qw[uniq none any];
use Mail::RFC822::Address qw[valid];
use Time::Local;
###
### Entry points
###
sub all {
my ( $class, $zone ) = @_;
my @results;
push @results, $class->syntax01( $zone->name ) if Zonemaster->config->should_run( 'syntax01' );
push @results, $class->syntax02( $zone->name ) if Zonemaster->config->should_run( 'syntax02' );
push @results, $class->syntax03( $zone->name ) if Zonemaster->config->should_run( 'syntax03' );
if ( any { $_->tag eq q{ONLY_ALLOWED_CHARS} } @results ) {
foreach my $local_nsname ( uniq map { $_->string } @{ Zonemaster::TestMethods->method2( $zone ) },
@{ Zonemaster::TestMethods->method3( $zone ) } )
{
push @results, $class->syntax04( $local_nsname ) if Zonemaster->config->should_run( 'syntax04' );
}
push @results, $class->syntax05( $zone ) if Zonemaster->config->should_run( 'syntax05' );
if ( none { $_->tag eq q{NO_RESPONSE_SOA_QUERY} } @results ) {
push @results, $class->syntax06( $zone ) if Zonemaster->config->should_run( 'syntax06' );
push @results, $class->syntax07( $zone ) if Zonemaster->config->should_run( 'syntax07' );
}
push @results, $class->syntax08( $zone ) if Zonemaster->config->should_run( 'syntax08' );
}
return @results;
} ## end sub all
###
### Metadata Exposure
###
sub metadata {
my ( $class ) = @_;
return {
syntax01 => [
qw(
ONLY_ALLOWED_CHARS
NON_ALLOWED_CHARS
)
],
syntax02 => [
qw(
INITIAL_HYPHEN
TERMINAL_HYPHEN
NO_ENDING_HYPHENS
)
],
syntax03 => [
qw(
DISCOURAGED_DOUBLE_DASH
NO_DOUBLE_DASH
)
],
syntax04 => [
qw(
NAMESERVER_DISCOURAGED_DOUBLE_DASH
NAMESERVER_NON_ALLOWED_CHARS
NAMESERVER_NUMERIC_TLD
NAMESERVER_SYNTAX_OK
)
],
syntax05 => [
qw(
RNAME_MISUSED_AT_SIGN
RNAME_NO_AT_SIGN
NO_RESPONSE_SOA_QUERY
)
],
syntax06 => [
qw(
RNAME_RFC822_INVALID
NO_RESPONSE_SOA_QUERY
)
],
syntax07 => [
qw(
MNAME_DISCOURAGED_DOUBLE_DASH
MNAME_NON_ALLOWED_CHARS
MNAME_NUMERIC_TLD
MNAME_SYNTAX_OK
NO_RESPONSE_SOA_QUERY
)
],
syntax08 => [
qw(
MX_DISCOURAGED_DOUBLE_DASH
MX_NON_ALLOWED_CHARS
MX_NUMERIC_TLD
MX_SYNTAX_OK
NO_RESPONSE_MX_QUERY
)
],
};
} ## end sub metadata
sub translation {
return {
'NAMESERVER_DISCOURAGED_DOUBLE_DASH' =>
'Nameserver ({name}) has a label ({label}) with a double hyphen (\'--\') in position 3 and 4 (with a prefix which is not \'xn--\').',
'NAMESERVER_NON_ALLOWED_CHARS' => 'Found illegal characters in the nameserver ({name}).',
'NAMESERVER_NUMERIC_TLD' => 'Nameserver ({name}) within a \'numeric only\' TLD ({tld}).',
'NAMESERVER_SYNTAX_OK' => 'Nameserver ({name}) syntax is valid.',
'MNAME_DISCOURAGED_DOUBLE_DASH' =>
'SOA MNAME ({name}) has a label ({label}) with a double hyphen (\'--\') in position 3 and 4 (with a prefix which is not \'xn--\').',
'MNAME_NON_ALLOWED_CHARS' => 'Found illegal characters in SOA MNAME ({name}).',
'MNAME_NUMERIC_TLD' => 'SOA MNAME ({name}) within a \'numeric only\' TLD ({tld}).',
'MNAME_SYNTAX_OK' => 'SOA MNAME ({name}) syntax is valid.',
'MX_DISCOURAGED_DOUBLE_DASH' =>
'Domain name MX ({name}) has a label ({label}) with a double hyphen (\'--\') in position 3 and 4 (with a prefix which is not \'xn--\').',
'MX_NON_ALLOWED_CHARS' => 'Found illegal characters in MX ({name}).',
'MX_NUMERIC_TLD' => 'Domain name MX ({name}) within a \'numeric only\' TLD ({tld}).',
'MX_SYNTAX_OK' => 'Domain name MX ({name}) syntax is valid.',
'DISCOURAGED_DOUBLE_DASH' =>
'Domain name ({name}) has a label ({label}) with a double hyphen (\'--\') in position 3 and 4 (with a prefix which is not \'xn--\').',
'INITIAL_HYPHEN' => 'Domain name ({name}) has a label ({label}) starting with an hyphen (\'-\').',
'TERMINAL_HYPHEN' => 'Domain name ({name}) has a label ({label}) ending with an hyphen (\'-\').',
'NON_ALLOWED_CHARS' => 'Found illegal characters in the domain name ({name}).',
'ONLY_ALLOWED_CHARS' => 'No illegal characters in the domain name ({name}).',
'RNAME_MISUSED_AT_SIGN' => 'There must be no misused \'@\' character in the SOA RNAME field ({rname}).',
'RNAME_RFC822_INVALID' => 'There must be no illegal characters in the SOA RNAME field ({rname}).',
'RNAME_RFC822_VALID' => 'The SOA RNAME field ({rname}) is compliant with RFC2822.',
'NO_ENDING_HYPHENS' => 'Both ends of all labels of the domain name ({name}) have no hyphens.',
'NO_DOUBLE_DASH' =>
'Domain name ({name}) has no label with a double hyphen (\'--\') in position 3 and 4 (with a prefix which is not \'xn--\').',
'RNAME_NO_AT_SIGN' => 'There is no misused \'@\' character in the SOA RNAME field ({rname}).',
'NO_RESPONSE_SOA_QUERY' => 'No response from nameserver(s) on SOA queries.',
'NO_RESPONSE_MX_QUERY' => 'No response from nameserver(s) on MX queries.',
};
} ## end sub translation
sub version {
return "$Zonemaster::Test::Syntax::VERSION";
}
###
### Tests
###
sub syntax01 {
my ( $class, $item ) = @_;
my @results;
my $name = get_name( $item );
if ( _name_has_only_legal_characters( $name ) ) {
push @results,
info(
ONLY_ALLOWED_CHARS => {
name => $name,
}
);
}
else {
push @results,
info(
NON_ALLOWED_CHARS => {
name => $name,
}
);
}
return @results;
} ## end sub syntax01
sub syntax02 {
my ( $class, $item ) = @_;
my @results;
my $name = get_name( $item );
foreach my $local_label ( @{ $name->labels } ) {
if ( _label_starts_with_hyphen( $local_label ) ) {
push @results,
info(
INITIAL_HYPHEN => {
label => $local_label,
name => $name,
}
);
}
if ( _label_ends_with_hyphen( $local_label ) ) {
push @results,
info(
TERMINAL_HYPHEN => {
label => $local_label,
name => $name,
}
);
}
} ## end foreach my $local_label ( @...)
if ( scalar @{ $name->labels } and not scalar @results ) {
push @results,
info(
NO_ENDING_HYPHENS => {
name => $name,
}
);
}
return @results;
} ## end sub syntax02
sub syntax03 {
my ( $class, $item ) = @_;
my @results;
my $name = get_name( $item );
foreach my $local_label ( @{ $name->labels } ) {
if ( _label_not_ace_has_double_hyphen_in_position_3_and_4( $local_label ) ) {
push @results,
info(
DISCOURAGED_DOUBLE_DASH => {
label => $local_label,
name => $name,
}
);
}
}
if ( scalar @{ $name->labels } and not scalar @results ) {
push @results,
info(
NO_DOUBLE_DASH => {
name => $name,
}
);
}
return @results;
} ## end sub syntax03
sub syntax04 {
my ( $class, $item ) = @_;
my @results;
my $name = get_name( $item );
push @results, check_name_syntax( q{NAMESERVER}, $name );
return @results;
}
sub syntax05 {
my ( $class, $zone ) = @_;
my @results;
my $p = $zone->query_one( $zone->name, q{SOA} );
if ( $p and my ( $soa ) = $p->get_records( q{SOA}, q{answer} ) ) {
my $rname = $soa->rname;
$rname =~ s/\\./\./smgx;
if ( index( $rname, q{@} ) != -1 ) {
push @results,
info(
RNAME_MISUSED_AT_SIGN => {
rname => $soa->rname,
}
);
}
else {
push @results,
info(
RNAME_NO_AT_SIGN => {
rname => $soa->rname,
}
);
}
} ## end if ( $p and my ( $soa ...))
else {
push @results, info( NO_RESPONSE_SOA_QUERY => {} );
}
return @results;
} ## end sub syntax05
sub syntax06 {
my ( $class, $zone ) = @_;
my @results;
my $p = $zone->query_one( $zone->name, q{SOA} );
if ( $p and my ( $soa ) = $p->get_records( q{SOA}, q{answer} ) ) {
my $rname = $soa->rname;
$rname =~ s/([^\\])[.]/$1@/smx; # Replace first non-escaped dot with an at-sign
$rname =~ s/[\\][.]/./smgx; # Un-escape dots
$rname =~ s/[.]\z//smgx; # Validator does not like final dots
if ( not valid( $rname ) ) {
push @results,
info(
RNAME_RFC822_INVALID => {
rname => $rname,
}
);
}
else {
push @results,
info(
RNAME_RFC822_VALID => {
rname => $rname,
}
);
}
} ## end if ( $p and my ( $soa ...))
else {
push @results, info( NO_RESPONSE_SOA_QUERY => {} );
}
return @results;
} ## end sub syntax06
sub syntax07 {
my ( $class, $zone ) = @_;
my @results;
my $p = $zone->query_one( $zone->name, q{SOA} );
if ( $p and my ( $soa ) = $p->get_records( q{SOA}, q{answer} ) ) {
my $mname = $soa->mname;
push @results, check_name_syntax( q{MNAME}, $mname );
}
else {
push @results, info( NO_RESPONSE_SOA_QUERY => {} );
}
return @results;
}
sub syntax08 {
my ( $class, $zone ) = @_;
my @results;
my $p = $zone->query_one( $zone->name, q{MX} );
if ( $p ) {
my %mx = map { $_->exchange => 1 } $p->get_records( q{MX}, q{answer} );
foreach my $mx ( sort keys %mx ) {
push @results, check_name_syntax( q{MX}, $mx );
}
}
else {
push @results, info( NO_RESPONSE_MX_QUERY => {} );
}
return @results;
}
###
### Internal Tests with Boolean (0|1) return value.
###
sub _name_has_only_legal_characters {
my ( $name ) = @_;
if ( List::MoreUtils::all { m/\A[-A-Za-z0-9]+\z/smx } @{ $name->labels } ) {
return 1;
}
else {
return 0;
}
}
sub _label_starts_with_hyphen {
my ( $label ) = @_;
return 0 if not $label;
if ( $label =~ /\A-/smgx ) {
return 1;
}
else {
return 0;
}
}
sub _label_ends_with_hyphen {
my ( $label ) = @_;
return 0 if not $label;
if ( $label =~ /-\z/smgx ) {
return 1;
}
else {
return 0;
}
}
sub _label_not_ace_has_double_hyphen_in_position_3_and_4 {
my ( $label ) = @_;
return 0 if not $label;
if ( $label =~ /\A..--/smx and $label !~ /\Axn/ismx ) {
return 1;
}
else {
return 0;
}
}
###
### Common part for syntax04, syntax07 and syntax08
###
sub get_name {
my ( $item ) = @_;
my $name;
if ( not ref $item ) {
$name = name( $item );
}
elsif ( ref( $item ) eq q{Zonemaster::Zone} ) {
$name = $item->name;
}
elsif ( ref( $item ) eq q{Zonemaster::DNSName} ) {
$name = $item;
}
return $name;
}
sub check_name_syntax {
my ( $info_label_prefix, $name ) = @_;
my @results;
$name = get_name( $name );
if ( not _name_has_only_legal_characters( $name ) ) {
push @results,
info(
$info_label_prefix
. q{_NON_ALLOWED_CHARS} => {
name => $name,
}
);
}
foreach my $local_label ( @{ $name->labels } ) {
if ( _label_not_ace_has_double_hyphen_in_position_3_and_4( $local_label ) ) {
push @results,
info(
$info_label_prefix
. q{_DISCOURAGED_DOUBLE_DASH} => {
label => $local_label,
name => "$name",
}
);
}
}
my $tld = @{ $name->labels }[-1];
if ( $tld =~ /\A\d+\z/smgx ) {
push @results,
info(
$info_label_prefix
. q{_NUMERIC_TLD} => {
name => "$name",
tld => $tld,
}
);
}
if ( not scalar @results ) {
push @results,
info(
$info_label_prefix
. q{_SYNTAX_OK} => {
name => "$name",
}
);
}
return @results;
} ## end sub check_name_syntax
1;
=head1 NAME
Zonemaster::Test::Syntax - test validating the syntax of host names and other data
=head1 SYNOPSIS
my @results = Zonemaster::Test::Syntax->all($zone);
=head1 METHODS
=over
=item all($zone)
Runs the default set of tests and returns a list of log entries made by the tests.
=item translation()
Returns a refernce to a hash with translation data. Used by the builtin translation system.
=item metadata()
Returns a reference to a hash, the keys of which are the names of all test methods in the module, and the corresponding values are references to
lists with all the tags that the method can use in log entries.
=item version()
Returns a version string for the module.
=back
=head1 TESTS
=over
=item syntax01($name)
Verifies that the name (Zonemaster::DNSName) given contains only allowed characters.
=item syntax02($name)
Verifies that the name (Zonemaster::DNSName) given does not start or end with a hyphen ('-').
=item syntax03($name)
Verifies that the name (Zonemaster::DNSName) given does not contain a hyphen in 3rd and 4th position (in the exception of 'xn--').
=item syntax04($name)
Verify that a nameserver (Zonemaster::DNSName) given is conform to previous syntax rules. It also verify name total length as well as labels.
=item syntax05($zone)
Verify that a SOA rname (Zonemaster::DNSName) given has a conform usage of at sign (@).
=item syntax06($zone)
Verify that a SOA rname (Zonemaster::DNSName) given is RFC822 compliant.
=item syntax07($zone)
Verify that SOA mname of zone given is conform to previous syntax rules (syntax01, syntax02, syntax03). It also verify name total length as well as labels.
=item syntax08(@mx_names)
Verify that MX name (Zonemaster::DNSName) given is conform to previous syntax rules (syntax01, syntax02, syntax03). It also verify name total length as well as labels.
=back
=head1 INTERNAL METHODS
=over
=item get_name($item)
Converts argument to a L<Zonemaster::DNSName> object.
=item check_name_syntax
Implementation of some tests that are used on several kinds of input.
=back
=cut
| dolmen/p5-Zonemaster | lib/Zonemaster/Test/Syntax.pm | Perl | bsd-2-clause | 15,719 |
/* Part of SWI-Prolog
Author: Jan Wielemaker
E-mail: jan@swi-prolog.org
WWW: http://www.swi-prolog.org
Copyright (c) 2021, SWI-Prolog Solutions b.v.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(build_tools,
[ build_steps/3, % +Steps, +SrcDir, +Options
prolog_install_prefix/1, % -Prefix
run_process/3, % +Executable, +Argv, +Options
has_program/3, % +Spec, -Path, +Env
path_sep/1, % -Separator
ensure_build_dir/3 % +Dir, +State0, -State
]).
:- autoload(library(lists), [selectchk/3, member/2, append/3, last/2]).
:- autoload(library(option), [option/2, option/3, dict_options/2]).
:- autoload(library(pairs), [pairs_values/2]).
:- autoload(library(process), [process_create/3, process_wait/2]).
:- autoload(library(readutil), [read_stream_to_codes/3]).
:- autoload(library(dcg/basics), [string/3]).
:- autoload(library(apply), [foldl/4, maplist/2]).
:- autoload(library(filesex), [directory_file_path/3, make_directory_path/1]).
:- autoload(library(prolog_config), [apple_bundle_libdir/1]).
:- autoload(library(solution_sequences), [distinct/2]).
% The plugins. Load them in the order of preference.
:- use_module(conan).
:- use_module(cmake).
:- use_module(make).
:- multifile
prolog:build_file/2, % ?File, ?Toolchain
prolog:build_step/4, % ?Step, ?Tool, ?SrcDir, ?BuildDir
prolog:build_environment/2, % ?Name, ?Value
prolog_pack:environment/2. % ?Name, ?Value (backward compatibility)
/** <module> Utilities for building foreign resources
This module implements the build system that is used by pack_install/1
and pack_rebuild/1. The build system is a plugin based system where each
plugin knows about a specific build toolchain. The plugins recognise
whether they are applicable based on the existence of files that are
unique to the toolchain. Currently it supports
- [conan](https://conan.io/) for the installation of dependencies
- [cmake](https://cmake.org/) for configuration and building
- [GNU tools](https://www.gnu.org) including `automake` and `autoconf`
for configuration and building
*/
%! build_steps(+Steps:list, SrcDir:atom, +Options) is det.
%
% Run the desired build steps. Normally, Steps is the list below,
% optionally prefixed with `distclean` or `clean`. `[test]` may be
% omited if ``--no-test`` is effective.
%
% [[dependencies], [configure], build, [test], install]
%
% Each step finds an applicable toolchain based on known unique files
% and calls the matching plugin to perform the step. A step may fail,
% which causes the system to try an alternative. A step that wants to
% abort the build process must throw an exception.
%
% @tbd If no tool is willing to execute some step, the step is
% skipped. This is ok for some steps such as `dependencies` or `test`.
% Possibly we should force the `install` step to succeed?
build_steps(Steps, SrcDir, Options) :-
dict_options(Dict0, Options),
setup_path,
build_environment(BuildEnv, Options),
State0 = Dict0.put(#{ env: BuildEnv,
src_dir: SrcDir
}),
foldl(build_step, Steps, State0, _State).
build_step(Spec, State0, State) :-
step_name(Spec, Step),
prolog:build_file(File, Tool),
directory_file_path(State0.src_dir, File, Path),
exists_file(Path),
prolog:build_step(Step, Tool, State0, State),
post_step(Step, Tool, State),
!.
build_step([_], State, State) :-
!.
build_step(Step, State, State) :-
print_message(warning, build(step_failed(Step))).
step_name([Step], Step) :- % options
!.
step_name(Step, Step).
post_step(configure, _, State) :-
!,
save_build_environment(State.bin_dir, State.env).
post_step(_, _, _).
%! ensure_build_dir(+Dir, +State0, -State) is det.
%
% Create the build directory. Dir is normally either '.' to build in
% the source directory or `build` to create a `build` subdir.
ensure_build_dir(_, State0, State) :-
_ = State0.get(bin_dir),
!,
State = State0.
ensure_build_dir(., State0, State) :-
!,
State = State0.put(bin_dir, State0.src_dir).
ensure_build_dir(Dir, State0, State) :-
directory_file_path(State0.src_dir, Dir, BinDir),
make_directory_path(BinDir),
!,
State = State0.put(bin_dir, BinDir).
/*******************************
* ENVIRONMENT *
*******************************/
%! build_environment(-Env, +Options) is det.
%
% Assemble a clean build environment for creating extensions to
% SWI-Prolog. Env is a list of `Var=Value` pairs. The variable names
% depend on the `pack_version(Version)` term from `pack.pl`. When
% absent or `1`, the old names are used. These names are confusing and
% conflict with some build environments. Using `2` (or later), the new
% names are used. The list below first names the new name and than
% between parenthesis, the new name. Provided variables are:
%
% $ ``PATH`` :
% contains the environment path with the directory
% holding the currently running SWI-Prolog instance prepended
% in front of it. As a result, `swipl` is always present and
% runs the same SWI-Prolog instance as the current Prolog process.
% $ ``SWIPL`` :
% contains the absolute file name of the running executable.
% $ ``SWIPL_PACK_VERSION`` :
% Version of the pack system (1 or 2)
% $ ``SWIPL_VERSION`` (``SWIPLVERSION``) :
% contains the numeric SWI-Prolog version defined as
% _|Major*10000+Minor*100+Patch|_.
% $ ``SWIPL_HOME_DIR`` (``SWIHOME``) :
% contains the directory holding the SWI-Prolog home.
% $ ``SWIPL_ARCH`` (``SWIARCH``) :
% contains the machine architecture identifier.
% $ ``SWIPL_MODULE_DIR`` (``PACKSODIR``) :
% constains the destination directory for shared objects/DLLs
% relative to a Prolog pack, i.e., ``lib/$SWIARCH``.
% $ ``SWIPL_MODULE_LIB`` (``SWISOLIB``) :
% The SWI-Prolog library or an empty string when it is not required
% to link modules against this library (e.g., ELF systems)
% $ ``SWIPL_LIB`` (``SWILIB``) :
% The SWI-Prolog library we need to link to for programs that
% _embed_ SWI-Prolog (normally ``-lswipl``).
% $ ``SWIPL_INCLUDE_DIRS`` :
% CMake style variable that contains the directory holding
% ``SWI-Prolog.h``, ``SWI-Stream.h`` and ``SWI-cpp.h``.
% $ ``SWIPL_LIBRARIES_DIR`` :
% CMake style variable that contains the directory holding `libswipl`
% $ ``SWIPL_CC`` (``CC``) :
% Prefered C compiler
% $ ``SWIPL_LD`` (``LD``) :
% Prefered linker
% $ ``SWIPL_CFLAGS`` (``CLFLAGS``) :
% C-Flags for building extensions. Always contains ``-ISWIPL-INCLUDE-DIR``.
% $ ``SWIPL_MODULE_LDFLAGS`` (``LDSOFLAGS``) :
% Link flags for linking modules.
% $ ``SWIPL_MODULE_EXT`` (``SOEXT``) :
% File name extension for modules (e.g., `so` or `dll`)
% $ ``SWIPL_PREFIX`` (``PREFIX``) :
% Install prefix for global binaries, libraries and include files.
%
% In addition, several environment variables are passes verbatim:
% ``TMP``, ``TEMP``, ``USER``, ``HOME``, ``LANG``, ``CC``,
% ``CXX``, ``LD``, ``CFLAGS``, ``CXXFLAGS`` and ``LDFLAGS`` are
% passed verbatim unless redefined for version 1 packs as described
% above.
build_environment(Env, Options) :-
findall(Name=Value,
distinct(Name, user_environment(Name, Value)),
UserEnv),
findall(Name=Value,
( def_environment(Name, Value, Options),
\+ memberchk(Name=_, UserEnv)
),
DefEnv),
append(UserEnv, DefEnv, Env).
user_environment(Name, Value) :-
prolog:build_environment(Name, Value).
user_environment(Name, Value) :-
prolog_pack:environment(Name, Value).
%! prolog:build_environment(-Name, -Value) is nondet.
%
% Hook to define the environment for building packs. This
% Multifile hook extends the process environment for building
% foreign extensions. A value provided by this hook overrules
% defaults provided by def_environment/3. In addition to changing
% the environment, this may be used to pass additional values to
% the environment, as in:
%
% ==
% prolog:build_environment('USER', User) :-
% getenv('USER', User).
% ==
%
% @arg Name is an atom denoting a valid variable name
% @arg Value is either an atom or number representing the
% value of the variable.
%! def_environment(-Name, -Value, +Options) is nondet.
%
% True if Name=Value must appear in the environment for building
% foreign extensions.
def_environment('PATH', Value, _) :-
getenv('PATH', PATH),
current_prolog_flag(executable, Exe),
file_directory_name(Exe, ExeDir),
prolog_to_os_filename(ExeDir, OsExeDir),
path_sep(Sep),
atomic_list_concat([OsExeDir, Sep, PATH], Value).
def_environment('SWIPL', Value, _) :-
current_prolog_flag(executable, Value).
def_environment('SWIPL_PACK_VERSION', Value, Options) :-
option(pack_version(Value), Options, 1).
def_environment(VAR, Value, Options) :-
env_name(version, VAR, Options),
current_prolog_flag(version, Value).
def_environment(VAR, Value, Options) :-
env_name(home, VAR, Options),
current_prolog_flag(home, Value).
def_environment(VAR, Value, Options) :-
env_name(arch, VAR, Options),
current_prolog_flag(arch, Value).
def_environment(VAR, Value, Options) :-
env_name(module_dir, VAR, Options),
current_prolog_flag(arch, Arch),
atom_concat('lib/', Arch, Value).
def_environment(VAR, Value, Options) :-
env_name(module_lib, VAR, Options),
current_prolog_flag(c_libplso, Value).
def_environment(VAR, '-lswipl', Options) :-
env_name(lib, VAR, Options).
def_environment(VAR, Value, Options) :-
env_name(cc, VAR, Options),
( getenv('CC', Value)
-> true
; default_c_compiler(Value)
-> true
; current_prolog_flag(c_cc, Value)
).
def_environment(VAR, Value, Options) :-
env_name(ld, VAR, Options),
( getenv('LD', Value)
-> true
; current_prolog_flag(c_cc, Value)
).
def_environment('SWIPL_INCLUDE_DIRS', Value, _) :- % CMake style environment
current_prolog_flag(home, Home),
atom_concat(Home, '/include', Value).
def_environment('SWIPL_LIBRARIES_DIR', Value, _) :-
swipl_libraries_dir(Value).
def_environment(VAR, Value, Options) :-
env_name(cflags, VAR, Options),
( getenv('CFLAGS', SystemFlags)
-> Extra = [' ', SystemFlags]
; Extra = []
),
current_prolog_flag(c_cflags, Value0),
current_prolog_flag(home, Home),
atomic_list_concat([Value0, ' -I"', Home, '/include"' | Extra], Value).
def_environment(VAR, Value, Options) :-
env_name(module_ldflags, VAR, Options),
( getenv('LDFLAGS', SystemFlags)
-> Extra = [SystemFlags|System]
; Extra = System
),
( current_prolog_flag(windows, true)
-> current_prolog_flag(home, Home),
atomic_list_concat(['-L"', Home, '/bin"'], SystemLib),
System = [SystemLib]
; apple_bundle_libdir(LibDir)
-> atomic_list_concat(['-L"', LibDir, '"'], SystemLib),
System = [SystemLib]
; current_prolog_flag(c_libplso, '')
-> System = [] % ELF systems do not need this
; prolog_library_dir(SystemLibDir),
atomic_list_concat(['-L"',SystemLibDir,'"'], SystemLib),
System = [SystemLib]
),
current_prolog_flag(c_ldflags, LDFlags),
atomic_list_concat([LDFlags, '-shared' | Extra], ' ', Value).
def_environment(VAR, Value, Options) :-
env_name(module_ext, VAR, Options),
current_prolog_flag(shared_object_extension, Value).
def_environment('PREFIX', Value, _) :-
prolog_install_prefix(Value).
def_environment(Pass, Value, _) :-
pass_env(Pass),
getenv(Pass, Value).
swipl_libraries_dir(Dir) :-
current_prolog_flag(windows, true),
!,
current_prolog_flag(home, Home),
atom_concat(Home, '/bin', Dir).
swipl_libraries_dir(Dir) :-
apple_bundle_libdir(Dir),
!.
swipl_libraries_dir(Dir) :-
prolog_library_dir(Dir).
pass_env('TMP').
pass_env('TEMP').
pass_env('USER').
pass_env('HOME').
pass_env('LANG').
pass_env('CC').
pass_env('CXX').
pass_env('LD').
pass_env('CFLAGS').
pass_env('CXXFLAGS').
pass_env('LDFLAGS').
env_name(Id, Name, Options) :-
option(pack_version(V), Options, 1),
must_be(oneof([1,2]), V),
env_name_v(Id, V, Name).
env_name_v(version, 1, 'SWIPLVERSION').
env_name_v(version, 2, 'SWIPL_VERSION').
env_name_v(home, 1, 'SWIHOME').
env_name_v(home, 2, 'SWIPL_HOME_DIR').
env_name_v(module_dir, 1, 'PACKSODIR').
env_name_v(module_dir, 2, 'SWIPL_MODULE_DIR').
env_name_v(module_lib, 1, 'SWISOLIB').
env_name_v(module_lib, 2, 'SWIPL_MODULE_LIB').
env_name_v(lib, 1, 'SWILIB').
env_name_v(lib, 2, 'SWIPL_LIB').
env_name_v(arch, 1, 'SWIARCH').
env_name_v(arch, 2, 'SWIPL_ARCH').
env_name_v(cc, 1, 'CC').
env_name_v(cc, 2, 'SWIPL_CC').
env_name_v(ld, 1, 'LD').
env_name_v(ld, 2, 'SWIPL_LD').
env_name_v(cflags, 1, 'CFLAGS').
env_name_v(cflags, 2, 'SWIPL_CFLAGS').
env_name_v(module_ldflags, 1, 'LDSOFLAGS').
env_name_v(module_ldflags, 2, 'SWIPL_MODULE_LDFLAGS').
env_name_v(module_ext, 1, 'SOEXT').
env_name_v(module_ext, 2, 'SWIPL_MODULE_EXT').
env_name_v(prefix, 1, 'PREFIX').
env_name_v(prefix, 2, 'SWIPL_PREFIX').
%! prolog_library_dir(-Dir) is det.
%
% True when Dir is the directory holding ``libswipl.$SOEXT``
:- multifile
prolog:runtime_config/2.
prolog_library_dir(Dir) :-
prolog:runtime_config(c_libdir, Dir),
!.
prolog_library_dir(Dir) :-
current_prolog_flag(home, Home),
( current_prolog_flag(c_libdir, Rel)
-> atomic_list_concat([Home, Rel], /, Dir)
; current_prolog_flag(arch, Arch)
-> atomic_list_concat([Home, lib, Arch], /, Dir)
).
%! default_c_compiler(-CC) is semidet.
%
% Try to find a suitable C compiler for compiling packages with
% foreign code.
%
% @tbd Needs proper defaults for Windows. Find MinGW? Find MSVC?
default_c_compiler(CC) :-
preferred_c_compiler(CC),
has_program(path(CC), _),
!.
preferred_c_compiler(gcc).
preferred_c_compiler(clang).
preferred_c_compiler(cc).
%! save_build_environment(+BuildDir, +Env) is det.
%
% Create a shell-script ``buildenv.sh`` that contains the build
% environment. This may be _sourced_ in the build directory to run the
% build steps outside Prolog. It may also be useful for debugging
% purposes.
save_build_environment(BuildDir, Env) :-
directory_file_path(BuildDir, 'buildenv.sh', EnvFile),
setup_call_cleanup(
open(EnvFile, write, Out),
write_env_script(Out, Env),
close(Out)).
write_env_script(Out, Env) :-
format(Out,
'# This file contains the environment that can be used to\n\c
# build the foreign pack outside Prolog. This file must\n\c
# be loaded into a bourne-compatible shell using\n\c
#\n\c
# $ source buildenv.sh\n\n',
[]),
forall(member(Var=Value, Env),
format(Out, '~w=\'~w\'\n', [Var, Value])),
format(Out, '\nexport ', []),
forall(member(Var=_, Env),
format(Out, ' ~w', [Var])),
format(Out, '\n', []).
%! prolog_install_prefix(-Prefix) is semidet.
%
% Return the directory that can be passed into `configure` or `cmake`
% to install executables and other related resources in a similar
% location as SWI-Prolog itself. Tries these rules:
%
% 1. If the Prolog flag `pack_prefix` at a writable directory, use
% this.
% 2. If the current executable can be found on $PATH and the parent
% of the directory of the executable is writable, use this.
% 3. If the user has a writable ``~/bin`` directory, use ``~``.
prolog_install_prefix(Prefix) :-
current_prolog_flag(pack_prefix, Prefix),
access_file(Prefix, write),
!.
prolog_install_prefix(Prefix) :-
current_prolog_flag(os_argv, [Name|_]),
has_program(path(Name), EXE),
file_directory_name(EXE, Bin),
file_directory_name(Bin, Prefix0),
( local_prefix(Prefix0, Prefix1)
-> Prefix = Prefix1
; Prefix = Prefix0
),
access_file(Prefix, write),
!.
prolog_install_prefix(Prefix) :-
expand_file_name(~, [UserHome]),
directory_file_path(UserHome, bin, BinDir),
exists_directory(BinDir),
access_file(BinDir, write),
!,
Prefix = UserHome.
local_prefix('/usr', '/usr/local').
/*******************************
* RUN PROCESSES *
*******************************/
%! run_process(+Executable, +Argv, +Options) is det.
%
% Run Executable. Defined options:
%
% - directory(+Dir)
% Execute in the given directory
% - output(-Out)
% Unify Out with a list of codes representing stdout of the
% command. Otherwise the output is handed to print_message/2
% with level =informational=.
% - error(-Error)
% As output(Out), but messages are printed at level =error=.
% - env(+Environment)
% Environment passed to the new process.
%
% If Executable is path(Program) and we have an environment we make
% sure to use the ``PATH`` from this environment for searching
% `Program`.
run_process(path(Exe), Argv, Options) :-
option(env(BuildEnv), Options),
!,
setup_call_cleanup(
b_setval('$build_tool_env', BuildEnv),
run_process(pack_build_path(Exe), Argv, Options),
nb_delete('$build_tool_env')).
run_process(Executable, Argv, Options) :-
\+ option(output(_), Options),
\+ option(error(_), Options),
current_prolog_flag(unix, true),
current_prolog_flag(threads, true),
!,
process_create_options(Options, Extra),
process_create(Executable, Argv,
[ stdout(pipe(Out)),
stderr(pipe(Error)),
process(PID)
| Extra
]),
thread_create(relay_output([output-Out, error-Error]), Id, []),
process_wait(PID, Status),
thread_join(Id, _),
( Status == exit(0)
-> true
; throw(error(process_error(process(Executable, Argv), Status), _))
).
run_process(Executable, Argv, Options) :-
process_create_options(Options, Extra),
setup_call_cleanup(
process_create(Executable, Argv,
[ stdout(pipe(Out)),
stderr(pipe(Error)),
process(PID)
| Extra
]),
( read_stream_to_codes(Out, OutCodes, []),
read_stream_to_codes(Error, ErrorCodes, []),
process_wait(PID, Status)
),
( close(Out),
close(Error)
)),
print_error(ErrorCodes, Options),
print_output(OutCodes, Options),
( Status == exit(0)
-> true
; throw(error(process_error(process(Executable, Argv), Status), _))
).
process_create_options(Options, Extra) :-
option(directory(Dir), Options, .),
( option(env(Env), Options)
-> Extra = [cwd(Dir), env(Env)]
; Extra = [cwd(Dir)]
).
relay_output([]) :- !.
relay_output(Output) :-
pairs_values(Output, Streams),
wait_for_input(Streams, Ready, infinite),
relay(Ready, Output, NewOutputs),
relay_output(NewOutputs).
relay([], Outputs, Outputs).
relay([H|T], Outputs0, Outputs) :-
selectchk(Type-H, Outputs0, Outputs1),
( at_end_of_stream(H)
-> close(H),
relay(T, Outputs1, Outputs)
; read_pending_codes(H, Codes, []),
relay(Type, Codes),
relay(T, Outputs0, Outputs)
).
relay(error, Codes) :-
set_prolog_flag(message_context, []),
print_error(Codes, []).
relay(output, Codes) :-
print_output(Codes, []).
print_output(OutCodes, Options) :-
option(output(Codes), Options),
!,
Codes = OutCodes.
print_output(OutCodes, _) :-
print_message(informational, build(process_output(OutCodes))).
print_error(OutCodes, Options) :-
option(error(Codes), Options),
!,
Codes = OutCodes.
print_error(OutCodes, _) :-
phrase(classify_message(Level), OutCodes, _),
print_message(Level, build(process_output(OutCodes))).
classify_message(error) -->
string(_), "fatal:",
!.
classify_message(error) -->
string(_), "error:",
!.
classify_message(warning) -->
string(_), "warning:",
!.
classify_message(informational) -->
[].
:- multifile user:file_search_path/2.
user:file_search_path(pack_build_path, Dir) :-
nb_current('$build_tool_env', Env),
memberchk('PATH'=Path, Env),
path_sep(Sep),
atomic_list_concat(Dirs, Sep, Path),
member(Dir, Dirs),
Dir \== ''.
%! has_program(+Spec) is semidet.
%! has_program(+Spec, -Path) is semidet.
%! has_program(+Spec, -Path, +Env:list) is semidet.
%
% True when the OS has the program Spec at the absolute file location
% Path. Normally called as e.g. has_program(path(cmake), CMakeExe).
% The second allows passing in an environment as Name=Value pairs. If
% this contains a value for ``PATH``, this is used rather than the
% current path variable.
has_program(Prog) :-
has_program(Prog, _).
has_program(Program, Path) :-
has_program(Program, Path, []).
has_program(path(Program), Path, Env), memberchk('PATH'=_, Env) =>
setup_call_cleanup(
b_setval('$build_tool_env', Env),
has_program(pack_build_path(Program), Path, []),
nb_delete('$build_tool_env')).
has_program(Name, Path, Env), plain_program_name(Name) =>
has_program(path(Name), Path, Env).
has_program(Program, Path, _Env) =>
exe_options(ExeOptions),
absolute_file_name(Program, Path,
[ file_errors(fail)
| ExeOptions
]).
plain_program_name(Name) :-
atom(Name),
\+ sub_atom(Name, _, _, _, '/').
exe_options(Options) :-
current_prolog_flag(windows, true),
!,
Options = [ extensions(['',exe,com]), access(read) ].
exe_options(Options) :-
Options = [ access(execute) ].
%! path_sep(-Sep) is det.
%
% Path separator for the OS. `;` for Windows, `:` for POSIX.
path_sep(Sep) :-
( current_prolog_flag(windows, true)
-> Sep = ';'
; Sep = ':'
).
/*******************************
* OS PATHS *
*******************************/
setup_path :-
current_prolog_flag(windows, true),
!,
setup_path([make, gcc]).
setup_path.
%! setup_path(+Programs) is det.
%
% Deals with specific platforms to add specific directories to
% ``$PATH`` such that we can find the tools. Currently deals with
% MinGW on Windows to provide `make` and `gcc`.
setup_path(Programs) :-
maplist(has_program, Programs).
setup_path(_) :-
current_prolog_flag(windows, true),
!,
( mingw_extend_path
-> true
; print_message(error, build(no_mingw))
).
setup_path(_).
mingw_extend_path :-
mingw_root(MinGW),
directory_file_path(MinGW, bin, MinGWBinDir),
atom_concat(MinGW, '/msys/*/bin', Pattern),
expand_file_name(Pattern, MsysDirs),
last(MsysDirs, MSysBinDir),
prolog_to_os_filename(MinGWBinDir, WinDirMinGW),
prolog_to_os_filename(MSysBinDir, WinDirMSYS),
getenv('PATH', Path0),
atomic_list_concat([WinDirMSYS, WinDirMinGW, Path0], ';', Path),
setenv('PATH', Path).
mingw_root(MinGwRoot) :-
current_prolog_flag(executable, Exe),
sub_atom(Exe, 1, _, _, :),
sub_atom(Exe, 0, 1, _, PlDrive),
Drives = [PlDrive,c,d],
member(Drive, Drives),
format(atom(MinGwRoot), '~a:/MinGW', [Drive]),
exists_directory(MinGwRoot),
!.
/*******************************
* MESSAGES *
*******************************/
:- multifile prolog:message//1.
prolog:message(build(Msg)) -->
message(Msg).
message(no_mingw) -->
[ 'Cannot find MinGW and/or MSYS.'-[] ].
message(process_output(Codes)) -->
{ split_lines(Codes, Lines) },
process_lines(Lines).
message(step_failed(Step)) -->
[ 'No build plugin could execute build step ~p'-[Step] ].
split_lines([], []) :- !.
split_lines(All, [Line1|More]) :-
append(Line1, [0'\n|Rest], All),
!,
split_lines(Rest, More).
split_lines(Line, [Line]).
process_lines([]) --> [].
process_lines([H|T]) -->
[ '~s'-[H] ],
( {T==[]}
-> []
; [nl], process_lines(T)
).
| josd/eye | eye-wasm/swipl-wasm/home/library/build/tools.pl | Perl | mit | 26,274 |
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Lucy::Index::Posting::ScorePosting;
use Lucy;
our $VERSION = '0.004000';
$VERSION = eval $VERSION;
1;
__END__
| kidaa/lucy | perl/lib/Lucy/Index/Posting/ScorePosting.pm | Perl | apache-2.0 | 904 |
package Venn::CLI::Command::add_container;
use v5.14;
use Venn::CLI::Dependencies;
use Pod::Usage;
use Mouse;
use Venn::CLI::Types qw( NoEmptyStr );
extends qw(Venn::CLI::Command);
with qw(
Venn::CLI::Role::GenericVerb
Venn::CLI::Role::Container
);
has 'organization' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Organization, used for location',
);
has 'hub' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Hub, used for location',
);
has 'continent' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Continent, used for location',
);
has 'country' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Organization, used for location',
);
has 'campus' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Organization, used for location',
);
has 'city' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Organization, used for location',
);
has 'building' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Organization, used for location',
);
has 'rack_name' => (
is => 'ro',
isa => NoEmptyStr,
#required => 1,
documentation => 'Rack for the filer',
);
sub run {
my ( $self, $key ) = @_;
my $path = sprintf "%s/%s/%s", $self->verb_rest_uri, $self->container_type, $key;
$self->bail_if_exists($path, 'container');
my $response = $self->submit_to_api($path, 'PUT', {
organization => $self->organization,
hub => $self->hub,
continent => $self->continent,
country => $self->country,
campus => $self->campus,
city => $self->city,
building => $self->building,
rack_name => $self->rack_name,
});
return $self->print_response($response);
}
no Mouse;
__PACKAGE__->meta->make_immutable;
__DATA__
=head1 NAME
Venn::CLI::Command::add_container - Defines a new container in Venn
=head1 AUTHOR
Venn Engineering
Josh Arenberg, Norbert Csongradi, Ryan Kupfer, Hai-Long Nguyen
=head1 LICENSE
Copyright 2013,2014,2015 Morgan Stanley
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
| hlnguyen21/venn-cli | lib/Venn/CLI/Command/add_container.pm | Perl | apache-2.0 | 3,368 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
=head1 NAME
Bio::EnsEMBL::DBSQL::GenomeContainer - Encapsulates all access to
genome related information
=head1 SYNOPSIS
use Bio::EnsEMBL::Registry;
Bio::EnsEMBL::Registry->load_registry_from_db(
-host => 'ensembldb.ensembl.org',
-user => 'anonymous'
);
$genome =
Bio::EnsEMBL::Registry->get_adaptor( "human", "core", "GenomeContainer" );
my $version = $genome->get_version;
my $ref_length = $genome->get_ref_length;
my $coord_systems = $genome->get_coord_systems;
=head1 DESCRIPTION
This module is responsible for fetching and storing genome-wide information.
Genome is an abstract object which contains information linking the species, the assembly and the ensembl annotation.
=head1 METHODS
=cut
package Bio::EnsEMBL::DBSQL::GenomeContainer;
use strict;
use warnings;
use Bio::EnsEMBL::Genome;
use Bio::EnsEMBL::DBSQL::DBAdaptor;
use Bio::EnsEMBL::DBSQL::BaseAdaptor;
use Bio::EnsEMBL::Utils::Exception qw( deprecate throw warning );
use Bio::EnsEMBL::Utils::Scalar qw( assert_ref );
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::DBSQL::BaseAdaptor);
=head2 new
Arg [...] : Superclass args. See Bio::EnsEMBL::DBSQL::BaseAdaptor
Description: Instantiates a Bio::EnsEMBL::DBSQL::GenomeContainer
Returntype : Bio::EnsEMBL::GenomeContainer
Exceptions : none
Caller : DBAdaptor
Status : Stable
=cut
sub new {
my $class = shift;
my $self = $class->SUPER::new(@_);
# cache creation could go here
return $self;
}
=head2 store
Arg [1] : Statistic
The type of statistic to store
Arg [2] : Value
The corresponding value for the statistic
Arg [3] : (optional) Attribute
If more than one value exists for the statistics, it will be distinguished by its attribute
Example : $genome_adaptor->store('coding_cnt', 20769);
Description: Stores a genome statistic in the database
Returntype : The database identifier (dbID) of the newly stored genome statistic
Exceptions :
Caller : general
Status : Stable
=cut
sub store {
my ($self, $statistic, $value, $attribute) = @_;
my $stats_id = $self->fetch_by_statistic($statistic, $attribute)->dbID;
if (defined $stats_id) {
$self->update($statistic, $value, $attribute);
} else {
my $db = $self->db();
my $species_id = $db->species_id();
my $store_genome_sql = q{
INSERT INTO genome_statistics
SET statistic = ?,
value = ?,
species_id = ?,
timestamp = now()
};
if (defined $attribute) {
$store_genome_sql .= ", attrib_type_id = ?";
}
my $sth = $self->prepare($store_genome_sql);
$sth->bind_param(1, $statistic, SQL_VARCHAR);
$sth->bind_param(2, $value, SQL_INTEGER);
$sth->bind_param(3, $species_id, SQL_INTEGER);
if (defined $attribute) {
my $attribute_adaptor = $db->get_AttributeAdaptor();
my $attribute_object = Bio::EnsEMBL::Attribute->new(-code => $attribute);
my $attribute_type_id = $attribute_adaptor->_store_type($attribute_object);
$sth->bind_param(4, $attribute_type_id, SQL_VARCHAR);
}
$sth->execute();
$sth->finish();
$stats_id = $sth->{'mysql_insertid'};
}
return $stats_id;
}
=head2 update
Arg [1] : Statistic
The type of statistic to update
Arg [2] : Value
The corresponding value for the statistic
Arg [3] : (optional) Attribute
If more than one value exists for the statistics, it will be distinguished by its attribute
Example : $genome_adaptor->update('coding_cnt', 20769);
Description: Updates an existing genome statistic in the database
Returntype : none
Exceptions :
Caller : general
Status : Stable
=cut
sub update {
my ($self, $statistic, $value, $attribute) = @_;
my $db = $self->db();
my $update_genome_sql = q{
UPDATE genome_statistics
SET value = ?,
timestamp = now()
};
if (defined $attribute) {
$update_genome_sql .= ', attrib_type_id = ?';
}
$update_genome_sql .= ' WHERE statistic = ? and species_id = ?';
my $sth = $self->prepare($update_genome_sql);
$sth->bind_param(1, $value, SQL_INTEGER);
my $increment = 2;
if (defined $attribute) {
my $attribute_adaptor = $db->get_AttributeAdaptor();
my $attribute_object = Bio::EnsEMBL::Attribute->new(-code => $attribute);
my $attribute_type_id = $attribute_adaptor->_store_type($attribute_object);
$sth->bind_param($increment, $attribute_type_id, SQL_VARCHAR);
$increment++;
}
$sth->bind_param($increment++, $statistic, SQL_VARCHAR);
$sth->bind_param($increment, $db->species_id(), SQL_INTEGER);
$sth->execute();
$sth->finish();
}
=head2 _meta_container
Arg [1] : none
Example : $meta_container = $genome->_meta_container();
Description: Internal method to return a MetaContainer object for the genome
Returntype : Bio::EnsEMBL::DBSQL::MetaContainer
Exceptions : none
Caller : general
Status : Stable
=cut
sub _meta_container {
my $self = shift;
return $self->db->get_adaptor('MetaContainer');
}
=head2 get_version
Arg [1] : (optional) assembly version
Example : $version = $genome->get_version();
Description: Getter/Setter for the assembly version
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_version {
my ($self, $version) = @_;
if (defined $version) {
$self->{'version'} = $version;
}
if (!defined $self->{'version'}) {
my $csa = $self->db()->get_adaptor('CoordSystem');
$self->{'version'} = $csa->get_default_version;
}
return $self->{'version'};
}
=head2 get_accession
Arg [1] : (optional) assembly accession
Example : $accession = $genome->get_accession();
Description: Getter/setter for the accession of the assembly currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_accession {
my ($self, $accession) = @_;
if (defined $accession) {
$self->{'accession'} = $accession;
}
if (!defined $self->{'accession'}) {
$self->{'accession'} = $self->_meta_container->single_value_by_key('assembly.accession');
}
return $self->{'accession'};
}
=head2 get_assembly_name
Arg [1] : (optional) assembly name
Example : $assembly_name = $genome->get_assembly_name();
Description: Getter/setter for the name of the assembly currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_assembly_name {
my ($self, $assembly_name) = @_;
if (defined $assembly_name) {
$self->{'assembly_name'} = $assembly_name;
}
if (!defined $self->{'assembly_name'}) {
$self->{'assembly_name'} = $self->_meta_container->single_value_by_key('assembly.name');
}
return $self->{'assembly_name'};
}
=head2 get_assembly_date
Arg [1] : (optional) assembly date
Example : $assembly_date = $genome->get_assembly_date();
Description: Getter/setter for the date of the assembly currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_assembly_date {
my ($self, $assembly_date) = @_;
if (defined $assembly_date) {
$self->{'assembly_date'} = $assembly_date;
}
if (!defined $self->{'assembly_date'}) {
$self->{'assembly_date'} = $self->_meta_container->single_value_by_key('assembly.date');
}
return $self->{'assembly_date'};
}
=head2 get_genebuild_start_date
Arg [1] : (optional) genebuild start date
Example : $genebuild_start_date = $genome->get_genebuild_start_date();
Description: Getter/setter for the start date of the genebuild currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_genebuild_start_date {
my ($self, $genebuild_start_date) = @_;
if (defined $genebuild_start_date) {
$self->{'genebuild_start_date'} = $genebuild_start_date;
}
if (!defined $self->{'genebuild_start_date'}) {
$self->{'genebuild_start_date'} = $self->_meta_container->single_value_by_key('genebuild.start_date');
}
return $self->{'genebuild_start_date'};
}
=head2 get_genebuild_method
Arg [1] : (optional) genebuild start date
Example : $genebuild_method = $genome->get_genebuild_method();
Description: Getter/setter for the method of the genebuild currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_genebuild_method {
my ($self, $genebuild_method) = @_;
if (defined $genebuild_method) {
$self->{'genebuild_method'} = $genebuild_method;
}
if (!defined $self->{'genebuild_method'}) {
$self->{'genebuild_method'} = $self->_meta_container->single_value_by_key('genebuild.method');
}
return $self->{'genebuild_method'};
}
=head2 get_genebuild_initial_release_date
Arg [1] : (optional) genebuild initial release date
Example : $genebuild_initial_release_date = $genome->get_initial_release_date();
Description: Getter/setter for the initial release date of the genebuild currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_genebuild_initial_release_date {
my ($self, $genebuild_initial_release_date) = @_;
if (defined $genebuild_initial_release_date) {
$self->{'genebuild_initial_release_date'} = $genebuild_initial_release_date;
}
if (!defined $self->{'genebuild_initial_release_date'}) {
$self->{'genebuild_initial_release_date'} = $self->_meta_container->single_value_by_key('genebuild.initial_release_date');
}
return $self->{'genebuild_initial_release_date'};
}
=head2 get_genebuild_last_geneset_update
Arg [1] : (optional) genebuild last geneset update
Example : $genebuild_last_geneset_update = $genome->get_last_geneset_update();
Description: Getter/setter for the last geneset update of the genebuild currently used
Returntype : string
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_genebuild_last_geneset_update {
my ($self, $genebuild_last_geneset_update) = @_;
if (defined $genebuild_last_geneset_update) {
$self->{'genebuild_last_geneset_update'} = $genebuild_last_geneset_update;
}
if (!defined $self->{'genebuild_last_geneset_update'}) {
$self->{'genebuild_last_geneset_update'} = $self->_meta_container->single_value_by_key('genebuild.last_geneset_update');
}
return $self->{'genebuild_last_geneset_update'};
}
=head2 _get_length
Arg [1] : none
Example : $length = $genome->_get_length('toplevel');
Description: Internal method to return the length for a type of slices
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub _get_length {
my ($self, $cs_name) = @_;
my $slice_adaptor = $self->db->get_adaptor('Slice');
my $seqlevel = $slice_adaptor->fetch_all($cs_name);
my $count;
foreach my $seq (@$seqlevel) {
$count += $seq->length();
}
return $count;
}
=head2 get_ref_length
Arg [1] : (optional) golden path length
Example : $ref_length = $genome->get_ref_length();
Description: Getter/setter for the golden path of the assembly currently used
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_ref_length {
my ($self, $ref_length) = @_;
if (defined $ref_length) {
$self->{'ref_length'} = $ref_length;
}
if (!defined $self->{'ref_length'}) {
$self->{'ref_length'} = $self->fetch_by_statistic('ref_length')->value();
}
return $self->{'ref_length'};
}
=head2 get_total_length
Arg [1] : (optional) base pair length
Example : $total_length = $genome->get_total_length();
Description: Getter/setter for the total length (number of base pairs) for the assembly currently used
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_total_length {
my ($self, $total_length) = @_;
if (defined $total_length) {
$self->{'total_length'} = $total_length;
}
if (!defined $self->{'total_length'}) {
$self->{'total_length'} = $self->fetch_by_statistic('total_length')->value;
}
return $self->{'total_length'};
}
=head2 get_toplevel
Arg [1] : none
Example : $toplevel = $genome->get_toplevel();
Description: Returns the toplevel for the assembly currently used
Returntype : ListRef of Bio::EnsEMBL::Slice
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_toplevel {
my ($self) = @_;
my $sa = $self->db->get_adaptor('Slice');
$self->{'toplevel'} = $sa->fetch_all('toplevel', undef, undef, 1);
return $self->{'toplevel'};
}
=head2 get_karyotype
Arg [1] : none
Example : $karyotype = $genome->get_karyotype();
Description: Returns the karyotype for the assembly currently used
Returntype : ListRef of Bio::EnsEMBL::Slice
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_karyotype {
my ($self) = @_;
my $sa = $self->db->get_adaptor('Slice');
$self->{'karyotype'} = $sa->fetch_all_karyotype;
return $self->{'karyotype'};
}
=head2 get_coord_systems
Arg [1] : none
Example : $coord_systems = $genome->get_coord_systems();
Description: Returns the coord_systems for the assembly currently used
Returntype : ListRef of Bio::EnsEMBL::CoordSystem
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_coord_systems {
my ($self, $all) = @_;
my $csa = $self->db->get_adaptor('CoordSystem');
if (!$all) {
my $version = $self->get_version();
$self->{'coord_systems'} = $csa->fetch_all_by_version($version);
} else {
$self->{'coord_systems'} = $csa->fetch_all();
}
return $self->{'coord_systems'};
}
=head2 _get_count
Arg [1] : none
Example : $count = $genome->_get_count('coding_cnt');
Description: Internal method to return a count for a given attribute code
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub _get_count {
my ($self, $code, $attribute) = @_;
my $statistic = $self->fetch_by_statistic($code, $attribute);
return $statistic->value();
}
=head2 get_count
Arg [1] : none
Example : $count = $genome->get_count('coding_cnt');
Description: Retrieve a count for a given attribute code
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_count {
my ($self, $code, $attribute) = @_;
my $statistic = $self->fetch_by_statistic($code, $attribute);
return $statistic->value();
}
=head2 fetch_all_statistics
Arg [1] : none
Example : $list = $genome->fetch_all_statistics();
Description: Retrieve all entries stored in the genome_statistics table
Returntype : ArrayRef of Bio::EnsEMBL::Genome
Exceptions : none
Caller : general
Status : Stable
=cut
sub fetch_all_statistics {
my ($self) = @_;
my $db = $self->db;
my $species_id = $self->db->species_id();
my @results;
my $sql = q{
SELECT genome_statistics_id, statistic, value, species_id, code, name, description
FROM genome_statistics, attrib_type
WHERE genome_statistics.attrib_type_id = attrib_type.attrib_type_id
AND species_id=?
};
my $sth = $self->prepare($sql);
$sth->bind_param(1, $species_id, SQL_INTEGER);
$sth->execute();
my $results = $self->_obj_from_sth($sth);
$sth->finish();
return $results;
}
=head2 fetch_by_statistic
Arg [1] : string $statistic
Example : $results = $genome->fetch_by_statistic('coding_cnt');
Description: Returns a Genome object for a given statistic
Returntype : Bio::EnsEMBL::Genome
Exceptions : none
Caller : general
Status : Stable
=cut
sub fetch_by_statistic {
my ($self, $statistic_name, $attribute) = @_;
my $db = $self->db;
my $fetch_sql = q{
SELECT genome_statistics_id, statistic, value, species_id, code, name, description
FROM genome_statistics, attrib_type
WHERE genome_statistics.attrib_type_id = attrib_type.attrib_type_id
AND statistic = ? AND species_id=?
};
if (defined $attribute) {
$fetch_sql .= " AND code = ?";
}
my $sth = $self->prepare($fetch_sql);
$sth->bind_param(1, $statistic_name, SQL_VARCHAR);
$sth->bind_param(2, $self->db->species_id, SQL_INTEGER);
if (defined $attribute) {
$sth->bind_param(3, $attribute, SQL_VARCHAR);
}
$sth->execute();
my ($dbID, $statistic, $value, $species_id, $code, $name, $desc);
$sth->bind_columns(\$dbID, \$statistic, \$value, \$species_id, \$code, \$name, \$desc);
my @results = $sth->fetchrow_array;
$sth->finish();
return Bio::EnsEMBL::Genome->new_fast({'dbID' => $dbID,
'statistic' => $statistic,
'code' => $code,
'name' => $name,
'description' => $desc,
'value' => $value});
}
=head2 is_empty
Arg [1] : none
Example : $results = $genome->is_empty;
Description: Boolean to check if there is data in the genome container
Returntype : Boolean
Exceptions : none
Caller : general
Status : Stable
=cut
sub is_empty {
my $self = shift;
my $db = $self->db;
my $species_id = $self->db->species_id();
my $is_empty = 1;
my $count_sql = q{
SELECT count(*) FROM genome_statistics
};
my $sth = $self->prepare($count_sql);
$sth->execute();
if ($sth->fetchrow()) {
$is_empty = 0;
}
$sth->finish();
return $is_empty;
}
=head2 get_attrib
Arg [1] : statistic
Example : $results = $genome->_get_attrib('coding_cnt');
Description: Returns the attribute object for a given statistic
Returntype : Bio::EnsEMBL::Attrib
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_attrib {
my ($self, $statistic) = @_;
my $db = $self->db();
my $attribute_adaptor = $db->get_adaptor('attribute');
my @attribs = @{ $attribute_adaptor->fetch_by_code($statistic) };
my $attrib = Bio::EnsEMBL::Attribute->new(
-code => $attribs[1],
-name => $attribs[2],
-description => $attribs[3]
);
return $attrib;
}
=head2 get_coding_count
Arg [1] : (optional) coding count
Example : $coding_count = $genome->get_coding_count();
Description: Getter/setter for the number of coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_coding_count {
my ($self, $coding_count) = @_;
if (defined $coding_count) {
$self->{'coding_count'} = $coding_count;
}
if (!defined $self->{'coding_count'}) {
$self->{'coding_count'} = $self->_get_count('coding_cnt');
}
return $self->{'coding_count'};
}
=head2 get_rcoding_count
Arg [1] : (optional) readthrough coding count
Example : $rcoding_count = $genome->get_rcoding_count();
Description: Getter/setter for the number of readthrough coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_rcoding_count {
my ($self, $rcoding_count) = @_;
if (defined $rcoding_count) {
$self->{'rcoding_count'} = $rcoding_count;
}
if (!defined $self->{'rcoding_count'}) {
$self->{'rcoding_count'} = $self->_get_count('coding_rcnt');
}
return $self->{'rcoding_count'};
}
=head2 get_snoncoding_count
Arg [1] : (optional) short non coding count
Example : $snoncoding_count = $genome->get_snoncoding_count();
Description: Getter/setter for the number of short non coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_snoncoding_count {
my ($self, $snoncoding_count) = @_;
if (defined $snoncoding_count) {
$self->{'snoncoding_count'} = $snoncoding_count;
}
if (!defined $self->{'snoncoding_count'}) {
$self->{'snoncoding_count'} = $self->_get_count('noncoding_cnt_s');
}
return $self->{'snoncoding_count'};
}
=head2 get_rsnoncoding_count
Arg [1] : (optional) readthrough short non coding count
Example : $rsnoncoding_count = $genome->get_rsnoncoding_count();
Description: Getter/setter for the number of readthrough short non coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_rsnoncoding_count {
my ($self, $rsnoncoding_count) = @_;
if (defined $rsnoncoding_count) {
$self->{'rsnoncoding_count'} = $rsnoncoding_count;
}
if (!defined $self->{'rsnoncoding_count'}) {
$self->{'rsnoncoding_count'} = $self->_get_count('noncoding_rcnt_s');
}
return $self->{'rsnoncoding_count'};
}
=head2 get_mnoncoding_count
Arg [1] : (optional) miscellaneous non coding count
Example : $mnoncoding_count = $genome->get_mnoncoding_count();
Description: Getter/setter for the number of miscellaneous non coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_mnoncoding_count {
my ($self, $mnoncoding_count) = @_;
if (defined $mnoncoding_count) {
$self->{'mnoncoding_count'} = $mnoncoding_count;
}
if (!defined $self->{'mnoncoding_count'}) {
$self->{'mnoncoding_count'} = $self->_get_count('noncoding_cnt_m');
}
return $self->{'mnoncoding_count'};
}
=head2 get_rmnoncoding_count
Arg [1] : (optional) readthrough miscellaneous non coding count
Example : $rmnoncoding_count = $genome->get_rmnoncoding_count();
Description: Getter/setter for the number of readthrough miscellaneous non coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_rmnoncoding_count {
my ($self, $rmnoncoding_count) = @_;
if (defined $rmnoncoding_count) {
$self->{'rmnoncoding_count'} = $rmnoncoding_count;
}
if (!defined $self->{'rmnoncoding_count'}) {
$self->{'rmnoncoding_count'} = $self->_get_count('noncoding_rcnt_m');
}
return $self->{'rmnoncoding_count'};
}
=head2 get_lnoncoding_count
Arg [1] : (optional) long non coding count
Example : $lnoncoding_count = $genome->get_lnoncoding_count();
Description: Getter/setter for the number of long non coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_lnoncoding_count {
my ($self, $lnoncoding_count) = @_;
if (defined $lnoncoding_count) {
$self->{'lnoncoding_count'} = $lnoncoding_count;
}
if (!defined $self->{'lnoncoding_count'}) {
$self->{'lnoncoding_count'} = $self->_get_count('noncoding_cnt_l');
}
return $self->{'lnoncoding_count'};
}
=head2 get_rlnoncoding_count
Arg [1] : (optional) readthrough long non coding count
Example : $rlnoncoding_count = $genome->get_rlnoncoding_count();
Description: Getter/setter for the number of readthrough long non coding genes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_rlnoncoding_count {
my ($self, $rlnoncoding_count) = @_;
if (defined $rlnoncoding_count) {
$self->{'rlnoncoding_count'} = $rlnoncoding_count;
}
if (!defined $self->{'rlnoncoding_count'}) {
$self->{'rlnoncoding_count'} = $self->_get_count('noncoding_rcnt_l');
}
return $self->{'rlnoncoding_count'};
}
=head2 get_pseudogene_count
Arg [1] : (optional) pseudogene count
Example : $pseudogene_count = $genome->get_pseudogene_count();
Description: Getter/setter for the number of pseudogenes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_pseudogene_count {
my ($self, $pseudogene_count) = @_;
if (defined $pseudogene_count) {
$self->{'pseudogene_count'} = $pseudogene_count;
}
if (!defined $self->{'pseudogene_count'}) {
$self->{'pseudogene_count'} = $self->_get_count('pseudogene_cnt');
}
return $self->{'pseudogene_count'};
}
=head2 get_rpseudogene_count
Arg [1] : (optional) readthrough pseudogene count
Example : $rpseudogene_count = $genome->get_rpseudogene_count();
Description: Getter/setter for the number of readthrough pseudogenes in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_rpseudogene_count {
my ($self, $rpseudogene_count) = @_;
if (defined $rpseudogene_count) {
$self->{'rpseudogene_count'} = $rpseudogene_count;
}
if (!defined $self->{'rpseudogene_count'}) {
$self->{'rpseudogene_count'} = $self->_get_count('pseudogene_rcnt');
}
return $self->{'rpseudogene_count'};
}
=head2 get_alt_coding_count
Arg [1] : (optional) alt coding count
Example : $alt_coding_count = $genome->get_alt_coding_count();
Description: Getter/setter for the number of coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_coding_count {
my ($self, $alt_coding_count) = @_;
if (defined $alt_coding_count) {
$self->{'alt_coding_count'} = $alt_coding_count;
}
if (!defined $self->{'alt_coding_count'}) {
$self->{'alt_coding_count'} = $self->_get_count('coding_acnt');
}
return $self->{'alt_coding_count'};
}
=head2 get_alt_rcoding_count
Arg [1] : (optional) alt readthrough coding count
Example : $alt_rcoding_count = $genome->get_alt_rcoding_count();
Description: Getter/setter for the number of readthrough coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_rcoding_count {
my ($self, $alt_rcoding_count) = @_;
if (defined $alt_rcoding_count) {
$self->{'alt_rcoding_count'} = $alt_rcoding_count;
}
if (!defined $self->{'alt_rcoding_count'}) {
$self->{'alt_rcoding_count'} = $self->_get_count('coding_racnt');
}
return $self->{'alt_rcoding_count'};
}
=head2 get_alt_snoncoding_count
Arg [1] : (optional) alt short non coding count
Example : $alt_snoncoding_count = $genome->get_alt_snoncoding_count();
Description: Getter/setter for the number of short non coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_snoncoding_count {
my ($self, $alt_snoncoding_count) = @_;
if (defined $alt_snoncoding_count) {
$self->{'alt_snoncoding_count'} = $alt_snoncoding_count;
}
if (!defined $self->{'alt_snoncoding_count'}) {
$self->{'alt_snoncoding_count'} = $self->_get_count('noncoding_acnt_s');
}
return $self->{'alt_snoncoding_count'};
}
=head2 get_alt_rsnoncoding_count
Arg [1] : (optional) alt readthrough short non coding count
Example : $alt_rsnoncoding_count = $genome->get_alt_rsnoncoding_count();
Description: Getter/setter for the number of readthrough short non coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_rsnoncoding_count {
my ($self, $alt_rsnoncoding_count) = @_;
if (defined $alt_rsnoncoding_count) {
$self->{'alt_rsnoncoding_count'} = $alt_rsnoncoding_count;
}
if (!defined $self->{'alt_rsnoncoding_count'}) {
$self->{'alt_rsnoncoding_count'} = $self->_get_count('noncoding_racnt_s');
}
return $self->{'alt_rsnoncoding_count'};
}
=head2 get_alt_mnoncoding_count
Arg [1] : (optional) alt miscellaneous non coding count
Example : $alt_mnoncoding_count = $genome->get_alt_mnoncoding_count();
Description: Getter/setter for the number of miscellaneous non coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_mnoncoding_count {
my ($self, $alt_mnoncoding_count) = @_;
if (defined $alt_mnoncoding_count) {
$self->{'alt_mnoncoding_count'} = $alt_mnoncoding_count;
}
if (!defined $self->{'alt_mnoncoding_count'}) {
$self->{'alt_mnoncoding_count'} = $self->_get_count('noncoding_acnt_m');
}
return $self->{'alt_mnoncoding_count'};
}
=head2 get_alt_rmnoncoding_count
Arg [1] : (optional) alt readthrough miscellaneous non coding count
Example : $alt_rmnoncoding_count = $genome->get_alt_rmnoncoding_count();
Description: Getter/setter for the number of readthrough miscellaneous non coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_rmnoncoding_count {
my ($self, $alt_rmnoncoding_count) = @_;
if (defined $alt_rmnoncoding_count) {
$self->{'alt_rmnoncoding_count'} = $alt_rmnoncoding_count;
}
if (!defined $self->{'alt_rmnoncoding_count'}) {
$self->{'alt_rmnoncoding_count'} = $self->_get_count('noncoding_racnt_m');
}
return $self->{'alt_rmnoncoding_count'};
}
=head2 get_alt_lnoncoding_count
Arg [1] : (optional) alt long non coding count
Example : $alt_lnoncoding_count = $genome->get_alt_lnoncoding_count();
Description: Getter/setter for the number of long non coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_lnoncoding_count {
my ($self, $alt_lnoncoding_count) = @_;
if (defined $alt_lnoncoding_count) {
$self->{'alt_lnoncoding_count'} = $alt_lnoncoding_count;
}
if (!defined $self->{'alt_lnoncoding_count'}) {
$self->{'alt_lnoncoding_count'} = $self->_get_count('noncoding_acnt_l');
}
return $self->{'alt_lnoncoding_count'};
}
=head2 get_alt_rlnoncoding_count
Arg [1] : (optional) alt readthrough long non coding count
Example : $alt_lnoncoding_count = $genome->get_alt_lnoncoding_count();
Description: Getter/setter for the number of readthrough long non coding genes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_rlnoncoding_count {
my ($self, $alt_rlnoncoding_count) = @_;
if (defined $alt_rlnoncoding_count) {
$self->{'alt_rlnoncoding_count'} = $alt_rlnoncoding_count;
}
if (!defined $self->{'alt_rlnoncoding_count'}) {
$self->{'alt_rlnoncoding_count'} = $self->_get_count('noncoding_racnt_l');
}
return $self->{'alt_rlnoncoding_count'};
}
=head2 get_alt_pseudogene_count
Arg [1] : (optional) alt pseudogene count
Example : $alt_pseudogene_count = $genome->get_alt_pseudogene_count();
Description: Getter/setter for the number of pseudogenes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_pseudogene_count {
my ($self, $alt_pseudogene_count) = @_;
if (defined $alt_pseudogene_count) {
$self->{'alt_pseudogene_count'} = $alt_pseudogene_count;
}
if (!defined $self->{'alt_pseudogene_count'}) {
$self->{'alt_pseudogene_count'} = $self->_get_count('pseudogene_acnt');
}
return $self->{'alt_pseudogene_count'};
}
=head2 get_alt_rpseudogene_count
Arg [1] : (optional) alt readthrough pseudogene count
Example : $alt_rpseudogene_count = $genome->get_alt_pseudogene_count();
Description: Getter/setter for the number of readthrough pseudogenes on alternate sequences
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_rpseudogene_count {
my ($self, $alt_rpseudogene_count) = @_;
if (defined $alt_rpseudogene_count) {
$self->{'alt_rpseudogene_count'} = $alt_rpseudogene_count;
}
if (!defined $self->{'alt_rpseudogene_count'}) {
$self->{'alt_rpseudogene_count'} = $self->_get_count('pseudogene_racnt');
}
return $self->{'alt_rpseudogene_count'};
}
=head2 get_short_variation_count
Arg [1] : (optional) short variation count
Example : $short_variation_count = $genome->get_short_variation_count();
Description: Getter/setter for the number of short variants in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_short_variation_count {
my ($self, $short_variation_count) = @_;
if (defined $short_variation_count) {
$self->{'short_variation_count'} = $short_variation_count;
}
if (!defined $self->{'short_variation_count'}) {
$self->{'short_variation_count'} = $self->_get_count('SNPCount');
}
return $self->{'short_variation_count'};
}
=head2 get_prediction_count
Arg [1] : (optional) logic_name
Example : $prediction_count = $genome->get_prediction_count();
Description: Getter for the number of predicted genes in the current build
Can be restricted to a given analysis
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_prediction_count {
my ($self, $logic_name) = @_;
return $self->_get_count('PredictionTranscript', $logic_name);
}
=head2 get_structural_variation_count
Arg [1] : none
Example : $structural_variation_count = $genome->get_structural_variation_count();
Description: Return the number of structural variations in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_structural_variation_count {
my ($self, $structural_variation_count) = @_;
if (defined $structural_variation_count) {
$self->{'structural_variation_count'} = $structural_variation_count;
}
if (!defined $self->{'structural_variation_count'}) {
$self->{'structural_variation_count'} = $self->_get_count('StructuralVariation');
}
return $self->{'structural_variation_count'};
}
=head2 get_transcript_count
Arg [1] : (optional) transcript count
Example : $transcript_count = $genome->get_transcript_count();
Description: Getter/setter for the number of transcripts in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_transcript_count {
my ($self, $transcript_count) = @_;
if (defined $transcript_count) {
$self->{'transcript_count'} = $transcript_count;
}
if (!defined $self->{'transcript_count'}) {
$self->{'transcript_count'} = $self->_get_count('transcript');
}
return $self->{'transcript_count'};
}
=head2 get_alt_transcript_count
Arg [1] : (optional) alt transcript count
Example : $alt_transcript_count = $genome->get_alt_transcript_count();
Description: Getter/setter for the number of transcripts on alternate sequences in the current build
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_alt_transcript_count {
my ($self, $alt_transcript_count) = @_;
if (defined $alt_transcript_count) {
$self->{'alt_transcript_count'} = $alt_transcript_count;
}
if (!defined $self->{'alt_transcript_count'}) {
$self->{'alt_transcript_count'} = $self->_get_count('alt_transcript');
}
return $self->{'alt_transcript_count'};
}
=head2 has_karyotype
Arg : None
Example : $has_karyotype = $genome->has_karyotype();
Description: Boolean indicating whether a genome has a karyotype (ie chromosomes)
or not
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub has_karyotype {
my $self = shift;
my $db = $self->db();
my $slice_adaptor = $db->get_SliceAdaptor();
my $karyotype = $slice_adaptor->fetch_all_karyotype;
return 0 unless scalar(@$karyotype);
return 1;
}
=head2 is_high_coverage
Arg : None
Example : $is_high_coverage = $genome->is_high_coverage();
Description: Boolean indicating whether an assembly is high coverage
or not
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub is_high_coverage {
my $self = shift;
my $coverage_depth = $self->_meta_container->single_value_by_key('assembly.coverage_depth');
return 0 if !$coverage_depth;
$coverage_depth = lc($coverage_depth);
if ($coverage_depth eq 'high') {
return 1;
} elsif (($coverage_depth eq 'low') or ($coverage_depth eq 'medium')) {
return 0;
} elsif ($coverage_depth =~ /^([0-9]+)x$/) {
return $1<6 ? 0 : 1;
}
return 0;
}
=head2 is_polyploid
Arg : None
Example : $is_polyploid = $genome->is_polyploid();
Description: Returns whether the genome is or is not polyploid.
Returntype : integer
Exceptions : none
Caller : general
Status : Stable
=cut
sub is_polyploid {
my $self = shift;
my $polyploid = $self->_meta_container->single_value_by_key('ploidy');
# polyploid could be not defined, meta_key is optional
return 0 unless defined $polyploid;
return $polyploid > 2;
}
=head2 get_genome_components
Arg : None
Example : $components = $genome->get_genome_components();
Description: Returns the list of (diploid) components, for a
polyploid genome
Returntype : Arrayref
Exceptions : none
Caller : general
Status : Stable
=cut
sub get_genome_components {
my $self = shift;
my $sql_helper = $self->dbc->sql_helper;
my $sql =
"SELECT DISTINCT value
FROM seq_region_attrib JOIN attrib_type
USING (attrib_type_id) WHERE attrib_type.code='genome_component'";
return $sql_helper->execute_simple(-SQL => $sql);
}
sub _obj_from_sth {
my $self = shift;
my $sth = shift;
my ($dbID, $statistic, $value, $species_id, $code, $name, $desc);
$sth->bind_columns(\$dbID, \$statistic, \$value, \$species_id, \$code, \$name, \$desc);
my @results;
while ($sth->fetch()) {
push @results,
Bio::EnsEMBL::Genome->new_fast({'dbID' => $dbID,
'statistic' => $statistic,
'code' => $code,
'name' => $name,
'description' => $desc,
'value' => $value});
}
return \@results;
}
1;
| mjg17/ensembl | modules/Bio/EnsEMBL/DBSQL/GenomeContainer.pm | Perl | apache-2.0 | 39,307 |
package #
Date::Manip::TZ::eet00;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 10:41:41 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,2,2,0,0],'+02:00:00',[2,0,0],
'EET',0,[1977,4,3,0,59,59],[1977,4,3,2,59,59],
'0001010200:00:00','0001010202:00:00','1977040300:59:59','1977040302:59:59' ],
],
1977 =>
[
[ [1977,4,3,1,0,0],[1977,4,3,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1977,9,25,0,59,59],[1977,9,25,3,59,59],
'1977040301:00:00','1977040304:00:00','1977092500:59:59','1977092503:59:59' ],
[ [1977,9,25,1,0,0],[1977,9,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1978,4,2,0,59,59],[1978,4,2,2,59,59],
'1977092501:00:00','1977092503:00:00','1978040200:59:59','1978040202:59:59' ],
],
1978 =>
[
[ [1978,4,2,1,0,0],[1978,4,2,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1978,10,1,0,59,59],[1978,10,1,3,59,59],
'1978040201:00:00','1978040204:00:00','1978100100:59:59','1978100103:59:59' ],
[ [1978,10,1,1,0,0],[1978,10,1,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1979,4,1,0,59,59],[1979,4,1,2,59,59],
'1978100101:00:00','1978100103:00:00','1979040100:59:59','1979040102:59:59' ],
],
1979 =>
[
[ [1979,4,1,1,0,0],[1979,4,1,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1979,9,30,0,59,59],[1979,9,30,3,59,59],
'1979040101:00:00','1979040104:00:00','1979093000:59:59','1979093003:59:59' ],
[ [1979,9,30,1,0,0],[1979,9,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1980,4,6,0,59,59],[1980,4,6,2,59,59],
'1979093001:00:00','1979093003:00:00','1980040600:59:59','1980040602:59:59' ],
],
1980 =>
[
[ [1980,4,6,1,0,0],[1980,4,6,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1980,9,28,0,59,59],[1980,9,28,3,59,59],
'1980040601:00:00','1980040604:00:00','1980092800:59:59','1980092803:59:59' ],
[ [1980,9,28,1,0,0],[1980,9,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1981,3,29,0,59,59],[1981,3,29,2,59,59],
'1980092801:00:00','1980092803:00:00','1981032900:59:59','1981032902:59:59' ],
],
1981 =>
[
[ [1981,3,29,1,0,0],[1981,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1981,9,27,0,59,59],[1981,9,27,3,59,59],
'1981032901:00:00','1981032904:00:00','1981092700:59:59','1981092703:59:59' ],
[ [1981,9,27,1,0,0],[1981,9,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1982,3,28,0,59,59],[1982,3,28,2,59,59],
'1981092701:00:00','1981092703:00:00','1982032800:59:59','1982032802:59:59' ],
],
1982 =>
[
[ [1982,3,28,1,0,0],[1982,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1982,9,26,0,59,59],[1982,9,26,3,59,59],
'1982032801:00:00','1982032804:00:00','1982092600:59:59','1982092603:59:59' ],
[ [1982,9,26,1,0,0],[1982,9,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1983,3,27,0,59,59],[1983,3,27,2,59,59],
'1982092601:00:00','1982092603:00:00','1983032700:59:59','1983032702:59:59' ],
],
1983 =>
[
[ [1983,3,27,1,0,0],[1983,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1983,9,25,0,59,59],[1983,9,25,3,59,59],
'1983032701:00:00','1983032704:00:00','1983092500:59:59','1983092503:59:59' ],
[ [1983,9,25,1,0,0],[1983,9,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1984,3,25,0,59,59],[1984,3,25,2,59,59],
'1983092501:00:00','1983092503:00:00','1984032500:59:59','1984032502:59:59' ],
],
1984 =>
[
[ [1984,3,25,1,0,0],[1984,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1984,9,30,0,59,59],[1984,9,30,3,59,59],
'1984032501:00:00','1984032504:00:00','1984093000:59:59','1984093003:59:59' ],
[ [1984,9,30,1,0,0],[1984,9,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1985,3,31,0,59,59],[1985,3,31,2,59,59],
'1984093001:00:00','1984093003:00:00','1985033100:59:59','1985033102:59:59' ],
],
1985 =>
[
[ [1985,3,31,1,0,0],[1985,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1985,9,29,0,59,59],[1985,9,29,3,59,59],
'1985033101:00:00','1985033104:00:00','1985092900:59:59','1985092903:59:59' ],
[ [1985,9,29,1,0,0],[1985,9,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1986,3,30,0,59,59],[1986,3,30,2,59,59],
'1985092901:00:00','1985092903:00:00','1986033000:59:59','1986033002:59:59' ],
],
1986 =>
[
[ [1986,3,30,1,0,0],[1986,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1986,9,28,0,59,59],[1986,9,28,3,59,59],
'1986033001:00:00','1986033004:00:00','1986092800:59:59','1986092803:59:59' ],
[ [1986,9,28,1,0,0],[1986,9,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1987,3,29,0,59,59],[1987,3,29,2,59,59],
'1986092801:00:00','1986092803:00:00','1987032900:59:59','1987032902:59:59' ],
],
1987 =>
[
[ [1987,3,29,1,0,0],[1987,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1987,9,27,0,59,59],[1987,9,27,3,59,59],
'1987032901:00:00','1987032904:00:00','1987092700:59:59','1987092703:59:59' ],
[ [1987,9,27,1,0,0],[1987,9,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1988,3,27,0,59,59],[1988,3,27,2,59,59],
'1987092701:00:00','1987092703:00:00','1988032700:59:59','1988032702:59:59' ],
],
1988 =>
[
[ [1988,3,27,1,0,0],[1988,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1988,9,25,0,59,59],[1988,9,25,3,59,59],
'1988032701:00:00','1988032704:00:00','1988092500:59:59','1988092503:59:59' ],
[ [1988,9,25,1,0,0],[1988,9,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1989,3,26,0,59,59],[1989,3,26,2,59,59],
'1988092501:00:00','1988092503:00:00','1989032600:59:59','1989032602:59:59' ],
],
1989 =>
[
[ [1989,3,26,1,0,0],[1989,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1989,9,24,0,59,59],[1989,9,24,3,59,59],
'1989032601:00:00','1989032604:00:00','1989092400:59:59','1989092403:59:59' ],
[ [1989,9,24,1,0,0],[1989,9,24,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1990,3,25,0,59,59],[1990,3,25,2,59,59],
'1989092401:00:00','1989092403:00:00','1990032500:59:59','1990032502:59:59' ],
],
1990 =>
[
[ [1990,3,25,1,0,0],[1990,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1990,9,30,0,59,59],[1990,9,30,3,59,59],
'1990032501:00:00','1990032504:00:00','1990093000:59:59','1990093003:59:59' ],
[ [1990,9,30,1,0,0],[1990,9,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1991,3,31,0,59,59],[1991,3,31,2,59,59],
'1990093001:00:00','1990093003:00:00','1991033100:59:59','1991033102:59:59' ],
],
1991 =>
[
[ [1991,3,31,1,0,0],[1991,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1991,9,29,0,59,59],[1991,9,29,3,59,59],
'1991033101:00:00','1991033104:00:00','1991092900:59:59','1991092903:59:59' ],
[ [1991,9,29,1,0,0],[1991,9,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1992,3,29,0,59,59],[1992,3,29,2,59,59],
'1991092901:00:00','1991092903:00:00','1992032900:59:59','1992032902:59:59' ],
],
1992 =>
[
[ [1992,3,29,1,0,0],[1992,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1992,9,27,0,59,59],[1992,9,27,3,59,59],
'1992032901:00:00','1992032904:00:00','1992092700:59:59','1992092703:59:59' ],
[ [1992,9,27,1,0,0],[1992,9,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1993,3,28,0,59,59],[1993,3,28,2,59,59],
'1992092701:00:00','1992092703:00:00','1993032800:59:59','1993032802:59:59' ],
],
1993 =>
[
[ [1993,3,28,1,0,0],[1993,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1993,9,26,0,59,59],[1993,9,26,3,59,59],
'1993032801:00:00','1993032804:00:00','1993092600:59:59','1993092603:59:59' ],
[ [1993,9,26,1,0,0],[1993,9,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1994,3,27,0,59,59],[1994,3,27,2,59,59],
'1993092601:00:00','1993092603:00:00','1994032700:59:59','1994032702:59:59' ],
],
1994 =>
[
[ [1994,3,27,1,0,0],[1994,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1994,9,25,0,59,59],[1994,9,25,3,59,59],
'1994032701:00:00','1994032704:00:00','1994092500:59:59','1994092503:59:59' ],
[ [1994,9,25,1,0,0],[1994,9,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1995,3,26,0,59,59],[1995,3,26,2,59,59],
'1994092501:00:00','1994092503:00:00','1995032600:59:59','1995032602:59:59' ],
],
1995 =>
[
[ [1995,3,26,1,0,0],[1995,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1995,9,24,0,59,59],[1995,9,24,3,59,59],
'1995032601:00:00','1995032604:00:00','1995092400:59:59','1995092403:59:59' ],
[ [1995,9,24,1,0,0],[1995,9,24,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1996,3,31,0,59,59],[1996,3,31,2,59,59],
'1995092401:00:00','1995092403:00:00','1996033100:59:59','1996033102:59:59' ],
],
1996 =>
[
[ [1996,3,31,1,0,0],[1996,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1996,10,27,0,59,59],[1996,10,27,3,59,59],
'1996033101:00:00','1996033104:00:00','1996102700:59:59','1996102703:59:59' ],
[ [1996,10,27,1,0,0],[1996,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1997,3,30,0,59,59],[1997,3,30,2,59,59],
'1996102701:00:00','1996102703:00:00','1997033000:59:59','1997033002:59:59' ],
],
1997 =>
[
[ [1997,3,30,1,0,0],[1997,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1997,10,26,0,59,59],[1997,10,26,3,59,59],
'1997033001:00:00','1997033004:00:00','1997102600:59:59','1997102603:59:59' ],
[ [1997,10,26,1,0,0],[1997,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1998,3,29,0,59,59],[1998,3,29,2,59,59],
'1997102601:00:00','1997102603:00:00','1998032900:59:59','1998032902:59:59' ],
],
1998 =>
[
[ [1998,3,29,1,0,0],[1998,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1998,10,25,0,59,59],[1998,10,25,3,59,59],
'1998032901:00:00','1998032904:00:00','1998102500:59:59','1998102503:59:59' ],
[ [1998,10,25,1,0,0],[1998,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[1999,3,28,0,59,59],[1999,3,28,2,59,59],
'1998102501:00:00','1998102503:00:00','1999032800:59:59','1999032802:59:59' ],
],
1999 =>
[
[ [1999,3,28,1,0,0],[1999,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[1999,10,31,0,59,59],[1999,10,31,3,59,59],
'1999032801:00:00','1999032804:00:00','1999103100:59:59','1999103103:59:59' ],
[ [1999,10,31,1,0,0],[1999,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2000,3,26,0,59,59],[2000,3,26,2,59,59],
'1999103101:00:00','1999103103:00:00','2000032600:59:59','2000032602:59:59' ],
],
2000 =>
[
[ [2000,3,26,1,0,0],[2000,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2000,10,29,0,59,59],[2000,10,29,3,59,59],
'2000032601:00:00','2000032604:00:00','2000102900:59:59','2000102903:59:59' ],
[ [2000,10,29,1,0,0],[2000,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2001,3,25,0,59,59],[2001,3,25,2,59,59],
'2000102901:00:00','2000102903:00:00','2001032500:59:59','2001032502:59:59' ],
],
2001 =>
[
[ [2001,3,25,1,0,0],[2001,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2001,10,28,0,59,59],[2001,10,28,3,59,59],
'2001032501:00:00','2001032504:00:00','2001102800:59:59','2001102803:59:59' ],
[ [2001,10,28,1,0,0],[2001,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2002,3,31,0,59,59],[2002,3,31,2,59,59],
'2001102801:00:00','2001102803:00:00','2002033100:59:59','2002033102:59:59' ],
],
2002 =>
[
[ [2002,3,31,1,0,0],[2002,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2002,10,27,0,59,59],[2002,10,27,3,59,59],
'2002033101:00:00','2002033104:00:00','2002102700:59:59','2002102703:59:59' ],
[ [2002,10,27,1,0,0],[2002,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2003,3,30,0,59,59],[2003,3,30,2,59,59],
'2002102701:00:00','2002102703:00:00','2003033000:59:59','2003033002:59:59' ],
],
2003 =>
[
[ [2003,3,30,1,0,0],[2003,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2003,10,26,0,59,59],[2003,10,26,3,59,59],
'2003033001:00:00','2003033004:00:00','2003102600:59:59','2003102603:59:59' ],
[ [2003,10,26,1,0,0],[2003,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2004,3,28,0,59,59],[2004,3,28,2,59,59],
'2003102601:00:00','2003102603:00:00','2004032800:59:59','2004032802:59:59' ],
],
2004 =>
[
[ [2004,3,28,1,0,0],[2004,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2004,10,31,0,59,59],[2004,10,31,3,59,59],
'2004032801:00:00','2004032804:00:00','2004103100:59:59','2004103103:59:59' ],
[ [2004,10,31,1,0,0],[2004,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2005,3,27,0,59,59],[2005,3,27,2,59,59],
'2004103101:00:00','2004103103:00:00','2005032700:59:59','2005032702:59:59' ],
],
2005 =>
[
[ [2005,3,27,1,0,0],[2005,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2005,10,30,0,59,59],[2005,10,30,3,59,59],
'2005032701:00:00','2005032704:00:00','2005103000:59:59','2005103003:59:59' ],
[ [2005,10,30,1,0,0],[2005,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2006,3,26,0,59,59],[2006,3,26,2,59,59],
'2005103001:00:00','2005103003:00:00','2006032600:59:59','2006032602:59:59' ],
],
2006 =>
[
[ [2006,3,26,1,0,0],[2006,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2006,10,29,0,59,59],[2006,10,29,3,59,59],
'2006032601:00:00','2006032604:00:00','2006102900:59:59','2006102903:59:59' ],
[ [2006,10,29,1,0,0],[2006,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2007,3,25,0,59,59],[2007,3,25,2,59,59],
'2006102901:00:00','2006102903:00:00','2007032500:59:59','2007032502:59:59' ],
],
2007 =>
[
[ [2007,3,25,1,0,0],[2007,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2007,10,28,0,59,59],[2007,10,28,3,59,59],
'2007032501:00:00','2007032504:00:00','2007102800:59:59','2007102803:59:59' ],
[ [2007,10,28,1,0,0],[2007,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2008,3,30,0,59,59],[2008,3,30,2,59,59],
'2007102801:00:00','2007102803:00:00','2008033000:59:59','2008033002:59:59' ],
],
2008 =>
[
[ [2008,3,30,1,0,0],[2008,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2008,10,26,0,59,59],[2008,10,26,3,59,59],
'2008033001:00:00','2008033004:00:00','2008102600:59:59','2008102603:59:59' ],
[ [2008,10,26,1,0,0],[2008,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2009,3,29,0,59,59],[2009,3,29,2,59,59],
'2008102601:00:00','2008102603:00:00','2009032900:59:59','2009032902:59:59' ],
],
2009 =>
[
[ [2009,3,29,1,0,0],[2009,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2009,10,25,0,59,59],[2009,10,25,3,59,59],
'2009032901:00:00','2009032904:00:00','2009102500:59:59','2009102503:59:59' ],
[ [2009,10,25,1,0,0],[2009,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2010,3,28,0,59,59],[2010,3,28,2,59,59],
'2009102501:00:00','2009102503:00:00','2010032800:59:59','2010032802:59:59' ],
],
2010 =>
[
[ [2010,3,28,1,0,0],[2010,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2010,10,31,0,59,59],[2010,10,31,3,59,59],
'2010032801:00:00','2010032804:00:00','2010103100:59:59','2010103103:59:59' ],
[ [2010,10,31,1,0,0],[2010,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2011,3,27,0,59,59],[2011,3,27,2,59,59],
'2010103101:00:00','2010103103:00:00','2011032700:59:59','2011032702:59:59' ],
],
2011 =>
[
[ [2011,3,27,1,0,0],[2011,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2011,10,30,0,59,59],[2011,10,30,3,59,59],
'2011032701:00:00','2011032704:00:00','2011103000:59:59','2011103003:59:59' ],
[ [2011,10,30,1,0,0],[2011,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2012,3,25,0,59,59],[2012,3,25,2,59,59],
'2011103001:00:00','2011103003:00:00','2012032500:59:59','2012032502:59:59' ],
],
2012 =>
[
[ [2012,3,25,1,0,0],[2012,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2012,10,28,0,59,59],[2012,10,28,3,59,59],
'2012032501:00:00','2012032504:00:00','2012102800:59:59','2012102803:59:59' ],
[ [2012,10,28,1,0,0],[2012,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2013,3,31,0,59,59],[2013,3,31,2,59,59],
'2012102801:00:00','2012102803:00:00','2013033100:59:59','2013033102:59:59' ],
],
2013 =>
[
[ [2013,3,31,1,0,0],[2013,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2013,10,27,0,59,59],[2013,10,27,3,59,59],
'2013033101:00:00','2013033104:00:00','2013102700:59:59','2013102703:59:59' ],
[ [2013,10,27,1,0,0],[2013,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2014,3,30,0,59,59],[2014,3,30,2,59,59],
'2013102701:00:00','2013102703:00:00','2014033000:59:59','2014033002:59:59' ],
],
2014 =>
[
[ [2014,3,30,1,0,0],[2014,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2014,10,26,0,59,59],[2014,10,26,3,59,59],
'2014033001:00:00','2014033004:00:00','2014102600:59:59','2014102603:59:59' ],
[ [2014,10,26,1,0,0],[2014,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2015,3,29,0,59,59],[2015,3,29,2,59,59],
'2014102601:00:00','2014102603:00:00','2015032900:59:59','2015032902:59:59' ],
],
2015 =>
[
[ [2015,3,29,1,0,0],[2015,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2015,10,25,0,59,59],[2015,10,25,3,59,59],
'2015032901:00:00','2015032904:00:00','2015102500:59:59','2015102503:59:59' ],
[ [2015,10,25,1,0,0],[2015,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2016,3,27,0,59,59],[2016,3,27,2,59,59],
'2015102501:00:00','2015102503:00:00','2016032700:59:59','2016032702:59:59' ],
],
2016 =>
[
[ [2016,3,27,1,0,0],[2016,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2016,10,30,0,59,59],[2016,10,30,3,59,59],
'2016032701:00:00','2016032704:00:00','2016103000:59:59','2016103003:59:59' ],
[ [2016,10,30,1,0,0],[2016,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2017,3,26,0,59,59],[2017,3,26,2,59,59],
'2016103001:00:00','2016103003:00:00','2017032600:59:59','2017032602:59:59' ],
],
2017 =>
[
[ [2017,3,26,1,0,0],[2017,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2017,10,29,0,59,59],[2017,10,29,3,59,59],
'2017032601:00:00','2017032604:00:00','2017102900:59:59','2017102903:59:59' ],
[ [2017,10,29,1,0,0],[2017,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2018,3,25,0,59,59],[2018,3,25,2,59,59],
'2017102901:00:00','2017102903:00:00','2018032500:59:59','2018032502:59:59' ],
],
2018 =>
[
[ [2018,3,25,1,0,0],[2018,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2018,10,28,0,59,59],[2018,10,28,3,59,59],
'2018032501:00:00','2018032504:00:00','2018102800:59:59','2018102803:59:59' ],
[ [2018,10,28,1,0,0],[2018,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2019,3,31,0,59,59],[2019,3,31,2,59,59],
'2018102801:00:00','2018102803:00:00','2019033100:59:59','2019033102:59:59' ],
],
2019 =>
[
[ [2019,3,31,1,0,0],[2019,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2019,10,27,0,59,59],[2019,10,27,3,59,59],
'2019033101:00:00','2019033104:00:00','2019102700:59:59','2019102703:59:59' ],
[ [2019,10,27,1,0,0],[2019,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2020,3,29,0,59,59],[2020,3,29,2,59,59],
'2019102701:00:00','2019102703:00:00','2020032900:59:59','2020032902:59:59' ],
],
2020 =>
[
[ [2020,3,29,1,0,0],[2020,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2020,10,25,0,59,59],[2020,10,25,3,59,59],
'2020032901:00:00','2020032904:00:00','2020102500:59:59','2020102503:59:59' ],
[ [2020,10,25,1,0,0],[2020,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2021,3,28,0,59,59],[2021,3,28,2,59,59],
'2020102501:00:00','2020102503:00:00','2021032800:59:59','2021032802:59:59' ],
],
2021 =>
[
[ [2021,3,28,1,0,0],[2021,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2021,10,31,0,59,59],[2021,10,31,3,59,59],
'2021032801:00:00','2021032804:00:00','2021103100:59:59','2021103103:59:59' ],
[ [2021,10,31,1,0,0],[2021,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2022,3,27,0,59,59],[2022,3,27,2,59,59],
'2021103101:00:00','2021103103:00:00','2022032700:59:59','2022032702:59:59' ],
],
2022 =>
[
[ [2022,3,27,1,0,0],[2022,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2022,10,30,0,59,59],[2022,10,30,3,59,59],
'2022032701:00:00','2022032704:00:00','2022103000:59:59','2022103003:59:59' ],
[ [2022,10,30,1,0,0],[2022,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2023,3,26,0,59,59],[2023,3,26,2,59,59],
'2022103001:00:00','2022103003:00:00','2023032600:59:59','2023032602:59:59' ],
],
2023 =>
[
[ [2023,3,26,1,0,0],[2023,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2023,10,29,0,59,59],[2023,10,29,3,59,59],
'2023032601:00:00','2023032604:00:00','2023102900:59:59','2023102903:59:59' ],
[ [2023,10,29,1,0,0],[2023,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2024,3,31,0,59,59],[2024,3,31,2,59,59],
'2023102901:00:00','2023102903:00:00','2024033100:59:59','2024033102:59:59' ],
],
2024 =>
[
[ [2024,3,31,1,0,0],[2024,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2024,10,27,0,59,59],[2024,10,27,3,59,59],
'2024033101:00:00','2024033104:00:00','2024102700:59:59','2024102703:59:59' ],
[ [2024,10,27,1,0,0],[2024,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2025,3,30,0,59,59],[2025,3,30,2,59,59],
'2024102701:00:00','2024102703:00:00','2025033000:59:59','2025033002:59:59' ],
],
2025 =>
[
[ [2025,3,30,1,0,0],[2025,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2025,10,26,0,59,59],[2025,10,26,3,59,59],
'2025033001:00:00','2025033004:00:00','2025102600:59:59','2025102603:59:59' ],
[ [2025,10,26,1,0,0],[2025,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2026,3,29,0,59,59],[2026,3,29,2,59,59],
'2025102601:00:00','2025102603:00:00','2026032900:59:59','2026032902:59:59' ],
],
2026 =>
[
[ [2026,3,29,1,0,0],[2026,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2026,10,25,0,59,59],[2026,10,25,3,59,59],
'2026032901:00:00','2026032904:00:00','2026102500:59:59','2026102503:59:59' ],
[ [2026,10,25,1,0,0],[2026,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2027,3,28,0,59,59],[2027,3,28,2,59,59],
'2026102501:00:00','2026102503:00:00','2027032800:59:59','2027032802:59:59' ],
],
2027 =>
[
[ [2027,3,28,1,0,0],[2027,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2027,10,31,0,59,59],[2027,10,31,3,59,59],
'2027032801:00:00','2027032804:00:00','2027103100:59:59','2027103103:59:59' ],
[ [2027,10,31,1,0,0],[2027,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2028,3,26,0,59,59],[2028,3,26,2,59,59],
'2027103101:00:00','2027103103:00:00','2028032600:59:59','2028032602:59:59' ],
],
2028 =>
[
[ [2028,3,26,1,0,0],[2028,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2028,10,29,0,59,59],[2028,10,29,3,59,59],
'2028032601:00:00','2028032604:00:00','2028102900:59:59','2028102903:59:59' ],
[ [2028,10,29,1,0,0],[2028,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2029,3,25,0,59,59],[2029,3,25,2,59,59],
'2028102901:00:00','2028102903:00:00','2029032500:59:59','2029032502:59:59' ],
],
2029 =>
[
[ [2029,3,25,1,0,0],[2029,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2029,10,28,0,59,59],[2029,10,28,3,59,59],
'2029032501:00:00','2029032504:00:00','2029102800:59:59','2029102803:59:59' ],
[ [2029,10,28,1,0,0],[2029,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2030,3,31,0,59,59],[2030,3,31,2,59,59],
'2029102801:00:00','2029102803:00:00','2030033100:59:59','2030033102:59:59' ],
],
2030 =>
[
[ [2030,3,31,1,0,0],[2030,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2030,10,27,0,59,59],[2030,10,27,3,59,59],
'2030033101:00:00','2030033104:00:00','2030102700:59:59','2030102703:59:59' ],
[ [2030,10,27,1,0,0],[2030,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2031,3,30,0,59,59],[2031,3,30,2,59,59],
'2030102701:00:00','2030102703:00:00','2031033000:59:59','2031033002:59:59' ],
],
2031 =>
[
[ [2031,3,30,1,0,0],[2031,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2031,10,26,0,59,59],[2031,10,26,3,59,59],
'2031033001:00:00','2031033004:00:00','2031102600:59:59','2031102603:59:59' ],
[ [2031,10,26,1,0,0],[2031,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2032,3,28,0,59,59],[2032,3,28,2,59,59],
'2031102601:00:00','2031102603:00:00','2032032800:59:59','2032032802:59:59' ],
],
2032 =>
[
[ [2032,3,28,1,0,0],[2032,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2032,10,31,0,59,59],[2032,10,31,3,59,59],
'2032032801:00:00','2032032804:00:00','2032103100:59:59','2032103103:59:59' ],
[ [2032,10,31,1,0,0],[2032,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2033,3,27,0,59,59],[2033,3,27,2,59,59],
'2032103101:00:00','2032103103:00:00','2033032700:59:59','2033032702:59:59' ],
],
2033 =>
[
[ [2033,3,27,1,0,0],[2033,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2033,10,30,0,59,59],[2033,10,30,3,59,59],
'2033032701:00:00','2033032704:00:00','2033103000:59:59','2033103003:59:59' ],
[ [2033,10,30,1,0,0],[2033,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2034,3,26,0,59,59],[2034,3,26,2,59,59],
'2033103001:00:00','2033103003:00:00','2034032600:59:59','2034032602:59:59' ],
],
2034 =>
[
[ [2034,3,26,1,0,0],[2034,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2034,10,29,0,59,59],[2034,10,29,3,59,59],
'2034032601:00:00','2034032604:00:00','2034102900:59:59','2034102903:59:59' ],
[ [2034,10,29,1,0,0],[2034,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2035,3,25,0,59,59],[2035,3,25,2,59,59],
'2034102901:00:00','2034102903:00:00','2035032500:59:59','2035032502:59:59' ],
],
2035 =>
[
[ [2035,3,25,1,0,0],[2035,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2035,10,28,0,59,59],[2035,10,28,3,59,59],
'2035032501:00:00','2035032504:00:00','2035102800:59:59','2035102803:59:59' ],
[ [2035,10,28,1,0,0],[2035,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2036,3,30,0,59,59],[2036,3,30,2,59,59],
'2035102801:00:00','2035102803:00:00','2036033000:59:59','2036033002:59:59' ],
],
2036 =>
[
[ [2036,3,30,1,0,0],[2036,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2036,10,26,0,59,59],[2036,10,26,3,59,59],
'2036033001:00:00','2036033004:00:00','2036102600:59:59','2036102603:59:59' ],
[ [2036,10,26,1,0,0],[2036,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2037,3,29,0,59,59],[2037,3,29,2,59,59],
'2036102601:00:00','2036102603:00:00','2037032900:59:59','2037032902:59:59' ],
],
2037 =>
[
[ [2037,3,29,1,0,0],[2037,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2037,10,25,0,59,59],[2037,10,25,3,59,59],
'2037032901:00:00','2037032904:00:00','2037102500:59:59','2037102503:59:59' ],
[ [2037,10,25,1,0,0],[2037,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2038,3,28,0,59,59],[2038,3,28,2,59,59],
'2037102501:00:00','2037102503:00:00','2038032800:59:59','2038032802:59:59' ],
],
2038 =>
[
[ [2038,3,28,1,0,0],[2038,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2038,10,31,0,59,59],[2038,10,31,3,59,59],
'2038032801:00:00','2038032804:00:00','2038103100:59:59','2038103103:59:59' ],
[ [2038,10,31,1,0,0],[2038,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2039,3,27,0,59,59],[2039,3,27,2,59,59],
'2038103101:00:00','2038103103:00:00','2039032700:59:59','2039032702:59:59' ],
],
2039 =>
[
[ [2039,3,27,1,0,0],[2039,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2039,10,30,0,59,59],[2039,10,30,3,59,59],
'2039032701:00:00','2039032704:00:00','2039103000:59:59','2039103003:59:59' ],
[ [2039,10,30,1,0,0],[2039,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2040,3,25,0,59,59],[2040,3,25,2,59,59],
'2039103001:00:00','2039103003:00:00','2040032500:59:59','2040032502:59:59' ],
],
2040 =>
[
[ [2040,3,25,1,0,0],[2040,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2040,10,28,0,59,59],[2040,10,28,3,59,59],
'2040032501:00:00','2040032504:00:00','2040102800:59:59','2040102803:59:59' ],
[ [2040,10,28,1,0,0],[2040,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2041,3,31,0,59,59],[2041,3,31,2,59,59],
'2040102801:00:00','2040102803:00:00','2041033100:59:59','2041033102:59:59' ],
],
2041 =>
[
[ [2041,3,31,1,0,0],[2041,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2041,10,27,0,59,59],[2041,10,27,3,59,59],
'2041033101:00:00','2041033104:00:00','2041102700:59:59','2041102703:59:59' ],
[ [2041,10,27,1,0,0],[2041,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2042,3,30,0,59,59],[2042,3,30,2,59,59],
'2041102701:00:00','2041102703:00:00','2042033000:59:59','2042033002:59:59' ],
],
2042 =>
[
[ [2042,3,30,1,0,0],[2042,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2042,10,26,0,59,59],[2042,10,26,3,59,59],
'2042033001:00:00','2042033004:00:00','2042102600:59:59','2042102603:59:59' ],
[ [2042,10,26,1,0,0],[2042,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2043,3,29,0,59,59],[2043,3,29,2,59,59],
'2042102601:00:00','2042102603:00:00','2043032900:59:59','2043032902:59:59' ],
],
2043 =>
[
[ [2043,3,29,1,0,0],[2043,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2043,10,25,0,59,59],[2043,10,25,3,59,59],
'2043032901:00:00','2043032904:00:00','2043102500:59:59','2043102503:59:59' ],
[ [2043,10,25,1,0,0],[2043,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2044,3,27,0,59,59],[2044,3,27,2,59,59],
'2043102501:00:00','2043102503:00:00','2044032700:59:59','2044032702:59:59' ],
],
2044 =>
[
[ [2044,3,27,1,0,0],[2044,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2044,10,30,0,59,59],[2044,10,30,3,59,59],
'2044032701:00:00','2044032704:00:00','2044103000:59:59','2044103003:59:59' ],
[ [2044,10,30,1,0,0],[2044,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2045,3,26,0,59,59],[2045,3,26,2,59,59],
'2044103001:00:00','2044103003:00:00','2045032600:59:59','2045032602:59:59' ],
],
2045 =>
[
[ [2045,3,26,1,0,0],[2045,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2045,10,29,0,59,59],[2045,10,29,3,59,59],
'2045032601:00:00','2045032604:00:00','2045102900:59:59','2045102903:59:59' ],
[ [2045,10,29,1,0,0],[2045,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2046,3,25,0,59,59],[2046,3,25,2,59,59],
'2045102901:00:00','2045102903:00:00','2046032500:59:59','2046032502:59:59' ],
],
2046 =>
[
[ [2046,3,25,1,0,0],[2046,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2046,10,28,0,59,59],[2046,10,28,3,59,59],
'2046032501:00:00','2046032504:00:00','2046102800:59:59','2046102803:59:59' ],
[ [2046,10,28,1,0,0],[2046,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2047,3,31,0,59,59],[2047,3,31,2,59,59],
'2046102801:00:00','2046102803:00:00','2047033100:59:59','2047033102:59:59' ],
],
2047 =>
[
[ [2047,3,31,1,0,0],[2047,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2047,10,27,0,59,59],[2047,10,27,3,59,59],
'2047033101:00:00','2047033104:00:00','2047102700:59:59','2047102703:59:59' ],
[ [2047,10,27,1,0,0],[2047,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2048,3,29,0,59,59],[2048,3,29,2,59,59],
'2047102701:00:00','2047102703:00:00','2048032900:59:59','2048032902:59:59' ],
],
2048 =>
[
[ [2048,3,29,1,0,0],[2048,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2048,10,25,0,59,59],[2048,10,25,3,59,59],
'2048032901:00:00','2048032904:00:00','2048102500:59:59','2048102503:59:59' ],
[ [2048,10,25,1,0,0],[2048,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2049,3,28,0,59,59],[2049,3,28,2,59,59],
'2048102501:00:00','2048102503:00:00','2049032800:59:59','2049032802:59:59' ],
],
2049 =>
[
[ [2049,3,28,1,0,0],[2049,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2049,10,31,0,59,59],[2049,10,31,3,59,59],
'2049032801:00:00','2049032804:00:00','2049103100:59:59','2049103103:59:59' ],
[ [2049,10,31,1,0,0],[2049,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2050,3,27,0,59,59],[2050,3,27,2,59,59],
'2049103101:00:00','2049103103:00:00','2050032700:59:59','2050032702:59:59' ],
],
2050 =>
[
[ [2050,3,27,1,0,0],[2050,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2050,10,30,0,59,59],[2050,10,30,3,59,59],
'2050032701:00:00','2050032704:00:00','2050103000:59:59','2050103003:59:59' ],
[ [2050,10,30,1,0,0],[2050,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2051,3,26,0,59,59],[2051,3,26,2,59,59],
'2050103001:00:00','2050103003:00:00','2051032600:59:59','2051032602:59:59' ],
],
2051 =>
[
[ [2051,3,26,1,0,0],[2051,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2051,10,29,0,59,59],[2051,10,29,3,59,59],
'2051032601:00:00','2051032604:00:00','2051102900:59:59','2051102903:59:59' ],
[ [2051,10,29,1,0,0],[2051,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2052,3,31,0,59,59],[2052,3,31,2,59,59],
'2051102901:00:00','2051102903:00:00','2052033100:59:59','2052033102:59:59' ],
],
2052 =>
[
[ [2052,3,31,1,0,0],[2052,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2052,10,27,0,59,59],[2052,10,27,3,59,59],
'2052033101:00:00','2052033104:00:00','2052102700:59:59','2052102703:59:59' ],
[ [2052,10,27,1,0,0],[2052,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2053,3,30,0,59,59],[2053,3,30,2,59,59],
'2052102701:00:00','2052102703:00:00','2053033000:59:59','2053033002:59:59' ],
],
2053 =>
[
[ [2053,3,30,1,0,0],[2053,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2053,10,26,0,59,59],[2053,10,26,3,59,59],
'2053033001:00:00','2053033004:00:00','2053102600:59:59','2053102603:59:59' ],
[ [2053,10,26,1,0,0],[2053,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2054,3,29,0,59,59],[2054,3,29,2,59,59],
'2053102601:00:00','2053102603:00:00','2054032900:59:59','2054032902:59:59' ],
],
2054 =>
[
[ [2054,3,29,1,0,0],[2054,3,29,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2054,10,25,0,59,59],[2054,10,25,3,59,59],
'2054032901:00:00','2054032904:00:00','2054102500:59:59','2054102503:59:59' ],
[ [2054,10,25,1,0,0],[2054,10,25,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2055,3,28,0,59,59],[2055,3,28,2,59,59],
'2054102501:00:00','2054102503:00:00','2055032800:59:59','2055032802:59:59' ],
],
2055 =>
[
[ [2055,3,28,1,0,0],[2055,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2055,10,31,0,59,59],[2055,10,31,3,59,59],
'2055032801:00:00','2055032804:00:00','2055103100:59:59','2055103103:59:59' ],
[ [2055,10,31,1,0,0],[2055,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2056,3,26,0,59,59],[2056,3,26,2,59,59],
'2055103101:00:00','2055103103:00:00','2056032600:59:59','2056032602:59:59' ],
],
2056 =>
[
[ [2056,3,26,1,0,0],[2056,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2056,10,29,0,59,59],[2056,10,29,3,59,59],
'2056032601:00:00','2056032604:00:00','2056102900:59:59','2056102903:59:59' ],
[ [2056,10,29,1,0,0],[2056,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2057,3,25,0,59,59],[2057,3,25,2,59,59],
'2056102901:00:00','2056102903:00:00','2057032500:59:59','2057032502:59:59' ],
],
2057 =>
[
[ [2057,3,25,1,0,0],[2057,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2057,10,28,0,59,59],[2057,10,28,3,59,59],
'2057032501:00:00','2057032504:00:00','2057102800:59:59','2057102803:59:59' ],
[ [2057,10,28,1,0,0],[2057,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2058,3,31,0,59,59],[2058,3,31,2,59,59],
'2057102801:00:00','2057102803:00:00','2058033100:59:59','2058033102:59:59' ],
],
2058 =>
[
[ [2058,3,31,1,0,0],[2058,3,31,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2058,10,27,0,59,59],[2058,10,27,3,59,59],
'2058033101:00:00','2058033104:00:00','2058102700:59:59','2058102703:59:59' ],
[ [2058,10,27,1,0,0],[2058,10,27,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2059,3,30,0,59,59],[2059,3,30,2,59,59],
'2058102701:00:00','2058102703:00:00','2059033000:59:59','2059033002:59:59' ],
],
2059 =>
[
[ [2059,3,30,1,0,0],[2059,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2059,10,26,0,59,59],[2059,10,26,3,59,59],
'2059033001:00:00','2059033004:00:00','2059102600:59:59','2059102603:59:59' ],
[ [2059,10,26,1,0,0],[2059,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2060,3,28,0,59,59],[2060,3,28,2,59,59],
'2059102601:00:00','2059102603:00:00','2060032800:59:59','2060032802:59:59' ],
],
2060 =>
[
[ [2060,3,28,1,0,0],[2060,3,28,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2060,10,31,0,59,59],[2060,10,31,3,59,59],
'2060032801:00:00','2060032804:00:00','2060103100:59:59','2060103103:59:59' ],
[ [2060,10,31,1,0,0],[2060,10,31,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2061,3,27,0,59,59],[2061,3,27,2,59,59],
'2060103101:00:00','2060103103:00:00','2061032700:59:59','2061032702:59:59' ],
],
2061 =>
[
[ [2061,3,27,1,0,0],[2061,3,27,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2061,10,30,0,59,59],[2061,10,30,3,59,59],
'2061032701:00:00','2061032704:00:00','2061103000:59:59','2061103003:59:59' ],
[ [2061,10,30,1,0,0],[2061,10,30,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2062,3,26,0,59,59],[2062,3,26,2,59,59],
'2061103001:00:00','2061103003:00:00','2062032600:59:59','2062032602:59:59' ],
],
2062 =>
[
[ [2062,3,26,1,0,0],[2062,3,26,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2062,10,29,0,59,59],[2062,10,29,3,59,59],
'2062032601:00:00','2062032604:00:00','2062102900:59:59','2062102903:59:59' ],
[ [2062,10,29,1,0,0],[2062,10,29,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2063,3,25,0,59,59],[2063,3,25,2,59,59],
'2062102901:00:00','2062102903:00:00','2063032500:59:59','2063032502:59:59' ],
],
2063 =>
[
[ [2063,3,25,1,0,0],[2063,3,25,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2063,10,28,0,59,59],[2063,10,28,3,59,59],
'2063032501:00:00','2063032504:00:00','2063102800:59:59','2063102803:59:59' ],
[ [2063,10,28,1,0,0],[2063,10,28,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2064,3,30,0,59,59],[2064,3,30,2,59,59],
'2063102801:00:00','2063102803:00:00','2064033000:59:59','2064033002:59:59' ],
],
2064 =>
[
[ [2064,3,30,1,0,0],[2064,3,30,4,0,0],'+03:00:00',[3,0,0],
'EEST',1,[2064,10,26,0,59,59],[2064,10,26,3,59,59],
'2064033001:00:00','2064033004:00:00','2064102600:59:59','2064102603:59:59' ],
[ [2064,10,26,1,0,0],[2064,10,26,3,0,0],'+02:00:00',[2,0,0],
'EET',0,[2065,3,29,0,59,59],[2065,3,29,2,59,59],
'2064102601:00:00','2064102603:00:00','2065032900:59:59','2065032902:59:59' ],
],
);
%LastRule = (
'zone' => {
'dstoff' => '+03:00:00',
'stdoff' => '+02:00:00',
},
'rules' => {
'03' => {
'flag' => 'last',
'dow' => '7',
'num' => '0',
'type' => 'u',
'time' => '01:00:00',
'isdst' => '1',
'abb' => 'EEST',
},
'10' => {
'flag' => 'last',
'dow' => '7',
'num' => '0',
'type' => 'u',
'time' => '01:00:00',
'isdst' => '0',
'abb' => 'EET',
},
},
);
1;
| nriley/Pester | Source/Manip/TZ/eet00.pm | Perl | bsd-2-clause | 42,393 |
:- module(sdrt,[mergeSDRS/2]).
:- use_module(library(lists),[member/2,append/3]).
/* =========================================================================
Subordinate SDRT relations
========================================================================= */
subordinate(elaboration).
subordinate(instance).
subordinate(topic).
subordinate(explanation).
subordinate(precondition).
subordinate(commentary).
subordinate(correction).
/* =========================================================================
Coordinate SDRT relations
========================================================================= */
coordinate(continuation).
coordinate(narration).
coordinate(background).
coordinate(result).
/* =========================================================================
Insert DRS into SDRS
========================================================================= */
mergeSDRS(smerge(S1,S2,Rel,_Pops),S3):-
subordinate(Rel), !,
subDRS(S1,Rel,S2,S3).
mergeSDRS(smerge(S1,S2,Rel,_Pops),S3):-
coordinate(Rel), !,
cooDRS(S1,Rel,S2,S3).
/* =========================================================================
Insert subordinate DRS into SDRS
========================================================================= */
% DRS + (S)DRS
%
subDRS(DRS,Rel,B2,SDRS):-
member(DRS,[_:drs(_,_),alfa(_,_,_),merge(_,_)]), !,
SDRS = sdrs([sub(lab(K1,DRS),lab(K2,B2))],[[]:rel(K1,K2,Rel)]).
% SUB + (S)DRS
%
subDRS(sdrs([sub(L1,lab(L2,S1))],R),Rel,B,sdrs([sub(L1,lab(L2,S2))],R)):- !,
subDRS(S1,Rel,B,S2).
% Non-SUB + (S)DRS
%
subDRS(sdrs([lab(K1,B1)],R),Rel,B2,SDRS):- !,
SDRS = sdrs([sub(lab(K1,B1),lab(K2,B2))],[[]:rel(K1,K2,Rel)|R]).
% Recurse down to last label of first argument
%
subDRS(sdrs([X|L1],R1),Rel,New,sdrs([X|L2],R2)):-
subDRS(sdrs(L1,R1),Rel,New,sdrs(L2,R2)).
/* =========================================================================
Insert coordinate DRS into SDRS
========================================================================= */
% Make SDRSs of both DRSs (if needed)
%
cooDRS(B1,Rel,B2,S):-
member(B1,[_:_,merge(_,_),alfa(_,_,_)]), !,
cooDRS(sdrs([lab(_,B1)],[]),Rel,B2,S).
cooDRS(B1,Rel,B2,S):-
member(B2,[_:_,merge(_,_),alfa(_,_,_)]), !,
cooDRS(B1,Rel,sdrs([lab(_,B2)],[]),S).
% Non-SUB + Non-SUB
%
cooDRS(sdrs([lab(K1,B1)],R1),Rel,sdrs([lab(K2,B2)|L],R2),SDRS):- !,
append(R1,[[]:rel(K1,K2,Rel)|R2],R3),
SDRS = sdrs([lab(K1,B1),lab(K2,B2)|L],R3).
% SUB (no pop)
%
cooDRS(sdrs([sub(B1,lab(K3,B3))],R),Rel,B2,sdrs([sub(B1,lab(K3,SDRS))],R)):- !,
cooDRS(B3,Rel,B2,SDRS).
% SUB (pop) + Non-SUB
%
cooDRS(sdrs([sub(lab(K1,B1),B3)],R1),Rel,sdrs([lab(K2,B2)|L],R2),SDRS):- !,
append(R1,[[]:rel(K1,K2,Rel)|R2],R3),
SDRS = sdrs([sub(lab(K1,B1),B3),lab(K2,B2)|L],R3).
% SUB (pop) + SUB
%
cooDRS(sdrs([sub(lab(K1,B1),B3)],R1),Rel,sdrs([sub(lab(K2,B2),B4)|L],R2),SDRS):- !,
append(R1,[[]:rel(K1,K2,Rel)|R2],R3),
SDRS = sdrs([sub(lab(K1,B1),B3),sub(lab(K2,B2),B4)|L],R3).
% Non-SUB + SUB
%
cooDRS(sdrs([lab(K1,B1)],R1),Rel,sdrs([sub(lab(K2,B2),B4)|L],R2),SDRS):- !,
append(R1,[[]:rel(K1,K2,Rel)|R2],R3),
SDRS = sdrs([lab(K1,B1),sub(lab(K2,B2),B4)|L],R3).
% Recurse down to last label of first argument
%
cooDRS(sdrs([X|L1],R1),Rel,New,sdrs([X|L2],R2)):- !,
cooDRS(sdrs(L1,R1),Rel,New,sdrs(L2,R2)).
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_nlu/ext/candc/src/prolog/boxer/sdrt.pl | Perl | mit | 3,350 |
#
# Copyright 2017 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::hp::vc::snmp::mode::components::resources;
use strict;
use warnings;
use Exporter;
our $map_managed_status;
our $map_reason_code;
our $map_moduleport_loop_status;
our $map_moduleport_protection_status;
our @ISA = qw(Exporter);
our @EXPORT_OK = qw($map_managed_status $map_reason_code $map_moduleport_loop_status $map_moduleport_protection_status);
$map_managed_status = {
1 => 'unknown',
2 => 'normal',
3 => 'warning',
4 => 'minor',
5 => 'major',
6 => 'critical',
7 => 'disabled',
8 => 'info',
};
$map_reason_code = {
100 => 'vcNetworkOk',
101 => 'vcNetworkUnknown',
102 => 'vcNetworkDisabled',
104 => 'vcNetworkAbnormal',
105 => 'vcNetworkFailed',
106 => 'vcNetworkDegraded',
109 => 'vcNetworkNoPortsAssignedToPrivateNetwork',
200 => 'vcFabricOk',
202 => 'vcFabricNoPortsConfigured',
203 => 'vcFabricSomePortsAbnormal',
204 => 'vcFabricAllPortsAbnormal',
205 => 'vcFabricWwnMismatch',
206 => 'vcFabricUnknown',
300 => 'vcProfileOk',
301 => 'vcProfileServerAbnormal',
304 => 'vcProfileAllConnectionsFailed',
309 => 'vcProfileSomeConnectionsUnmapped',
310 => 'vcProfileAllConnectionsAbnormal',
311 => 'vcProfileSomeConnectionsAbnormal',
312 => 'vcProfileUEFIBootmodeIncompatibleWithServer',
313 => 'vcProfileUnknown',
400 => 'vcEnetmoduleOk',
401 => 'vcEnetmoduleEnclosureDown',
402 => 'vcEnetmoduleModuleMissing',
404 => 'vcEnetmodulePortprotect',
405 => 'vcEnetmoduleIncompatible',
406 => 'vcEnetmoduleHwDegraded',
407 => 'vcEnetmoduleUnknown',
408 => 'vcFcmoduleOk',
409 => 'vcFcmoduleEnclosureDown',
410 => 'vcFcmoduleModuleMissing',
412 => 'vcFcmoduleHwDegraded',
413 => 'vcFcmoduleIncompatible',
414 => 'vcFcmoduleUnknown',
500 => 'vcPhysicalServerOk',
501 => 'vcPhysicalServerEnclosureDown',
502 => 'vcPhysicalServerFailed',
503 => 'vcPhysicalServerDegraded',
504 => 'vcPhysicalServerUnknown',
600 => 'vcEnclosureOk',
601 => 'vcEnclosureAllEnetModulesFailed',
602 => 'vcEnclosureSomeEnetModulesAbnormal',
603 => 'vcEnclosureSomeModulesOrServersIncompatible',
604 => 'vcEnclosureSomeFcModulesAbnormal',
605 => 'vcEnclosureSomeServersAbnormal',
606 => 'vcEnclosureUnknown',
700 => 'vcDomainOk',
701 => 'vcDomainAbnormalEnclosuresAndProfiles',
702 => 'vcDomainSomeEnclosuresAbnormal',
703 => 'vcDomainUnmappedProfileConnections',
706 => 'vcDomainStackingFailed',
707 => 'vcDomainStackingNotRedundant',
709 => 'vcDomainSomeProfilesAbnormal',
712 => 'vcDomainUnknown',
713 => 'vcDomainOverProvisioned',
801 => 'vcDomainSflowIndirectlyDisabled',
802 => 'vcDomainSflowFailed',
803 => 'vcDomainSflowDegraded',
901 => 'vcDomainPortMonitorIndirectlyDisabled',
};
$map_moduleport_protection_status = {
1 => 'ok',
2 => 'pause-flood-detected',
3 => 'in-pause-condition',
};
$map_moduleport_loop_status = {
1 => 'ok',
2 => 'loop-dectected',
};
1; | nichols-356/centreon-plugins | network/hp/vc/snmp/mode/components/resources.pm | Perl | apache-2.0 | 3,813 |
=pod
=head1 NAME
OSSL_ENCODER,
OSSL_ENCODER_fetch,
OSSL_ENCODER_up_ref,
OSSL_ENCODER_free,
OSSL_ENCODER_get0_provider,
OSSL_ENCODER_get0_properties,
OSSL_ENCODER_is_a,
OSSL_ENCODER_get0_name,
OSSL_ENCODER_get0_description,
OSSL_ENCODER_do_all_provided,
OSSL_ENCODER_names_do_all,
OSSL_ENCODER_gettable_params,
OSSL_ENCODER_get_params
- Encoder method routines
=head1 SYNOPSIS
#include <openssl/encoder.h>
typedef struct ossl_encoder_st OSSL_ENCODER;
OSSL_ENCODER *OSSL_ENCODER_fetch(OSSL_LIB_CTX *ctx, const char *name,
const char *properties);
int OSSL_ENCODER_up_ref(OSSL_ENCODER *encoder);
void OSSL_ENCODER_free(OSSL_ENCODER *encoder);
const OSSL_PROVIDER *OSSL_ENCODER_get0_provider(const OSSL_ENCODER *encoder);
const char *OSSL_ENCODER_get0_properties(const OSSL_ENCODER *encoder);
int OSSL_ENCODER_is_a(const OSSL_ENCODER *encoder, const char *name);
const char *OSSL_ENCODER_get0_name(const OSSL_ENCODER *encoder);
const char *OSSL_ENCODER_get0_description(const OSSL_ENCODER *encoder);
void OSSL_ENCODER_do_all_provided(OSSL_LIB_CTX *libctx,
void (*fn)(OSSL_ENCODER *encoder, void *arg),
void *arg);
int OSSL_ENCODER_names_do_all(const OSSL_ENCODER *encoder,
void (*fn)(const char *name, void *data),
void *data);
const OSSL_PARAM *OSSL_ENCODER_gettable_params(OSSL_ENCODER *encoder);
int OSSL_ENCODER_get_params(OSSL_ENCODER_CTX *ctx, const OSSL_PARAM params[]);
=head1 DESCRIPTION
B<OSSL_ENCODER> is a method for encoders, which know how to
encode an object of some kind to a encoded form, such as PEM,
DER, or even human readable text.
OSSL_ENCODER_fetch() looks for an algorithm within the provider that
has been loaded into the B<OSSL_LIB_CTX> given by I<ctx>, having the
name given by I<name> and the properties given by I<properties>.
The I<name> determines what type of object the fetched encoder
method is expected to be able to encode, and the properties are
used to determine the expected output type.
For known properties and the values they may have, please have a look
in L<provider-encoder(7)/Names and properties>.
OSSL_ENCODER_up_ref() increments the reference count for the given
I<encoder>.
OSSL_ENCODER_free() decrements the reference count for the given
I<encoder>, and when the count reaches zero, frees it.
OSSL_ENCODER_get0_provider() returns the provider of the given
I<encoder>.
OSSL_ENCODER_get0_properties() returns the property definition associated
with the given I<encoder>.
OSSL_ENCODER_is_a() checks if I<encoder> is an implementation of an
algorithm that's identifiable with I<name>.
OSSL_ENCODER_get0_name() returns the name used to fetch the given I<encoder>.
OSSL_ENCODER_get0_description() returns a description of the I<loader>, meant
for display and human consumption. The description is at the discretion of the
I<loader> implementation.
OSSL_ENCODER_names_do_all() traverses all names for the given
I<encoder>, and calls I<fn> with each name and I<data> as arguments.
OSSL_ENCODER_do_all_provided() traverses all encoder
implementations by all activated providers in the library context
I<libctx>, and for each of the implementations, calls I<fn> with the
implementation method and I<arg> as arguments.
OSSL_ENCODER_gettable_params() returns an L<OSSL_PARAM(3)>
array of parameter descriptors.
OSSL_ENCODER_get_params() attempts to get parameters specified
with an L<OSSL_PARAM(3)> array I<params>. Parameters that the
implementation doesn't recognise should be ignored.
=head1 RETURN VALUES
OSSL_ENCODER_fetch() returns a pointer to the key management
implementation represented by an OSSL_ENCODER object, or NULL on
error.
OSSL_ENCODER_up_ref() returns 1 on success, or 0 on error.
OSSL_ENCODER_free() doesn't return any value.
OSSL_ENCODER_get0_provider() returns a pointer to a provider object, or
NULL on error.
OSSL_ENCODER_get0_properties() returns a pointer to a property
definition string, or NULL on error.
OSSL_ENCODER_is_a() returns 1 of I<encoder> was identifiable,
otherwise 0.
OSSL_ENCODER_get0_name() returns the algorithm name from the provided
implementation for the given I<encoder>. Note that the I<encoder> may have
multiple synonyms associated with it. In this case the first name from the
algorithm definition is returned. Ownership of the returned string is retained
by the I<encoder> object and should not be freed by the caller.
OSSL_ENCODER_get0_description() returns a pointer to a decription, or NULL if
there isn't one.
OSSL_ENCODER_names_do_all() returns 1 if the callback was called for all
names. A return value of 0 means that the callback was not called for any names.
=head1 SEE ALSO
L<provider(7)>, L<OSSL_ENCODER_CTX(3)>, L<OSSL_ENCODER_to_bio(3)>,
L<OSSL_ENCODER_CTX_new_for_pkey(3)>, L<OSSL_LIB_CTX(3)>
=head1 HISTORY
The functions described here were added in OpenSSL 3.0.
=head1 COPYRIGHT
Copyright 2019-2021 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/OSSL_ENCODER.pod | Perl | bsd-3-clause | 5,323 |
package Carp;
our $VERSION = '1.09';
# this file is an utra-lightweight stub. The first time a function is
# called, Carp::Heavy is loaded, and the real short/longmessmess_jmp
# subs are installed
our $MaxEvalLen = 0;
our $Verbose = 0;
our $CarpLevel = 0;
our $MaxArgLen = 64; # How much of each argument to print. 0 = all.
our $MaxArgNums = 8; # How many arguments to print. 0 = all.
require Exporter;
our @ISA = ('Exporter');
our @EXPORT = qw(confess croak carp);
our @EXPORT_OK = qw(cluck verbose longmess shortmess);
our @EXPORT_FAIL = qw(verbose); # hook to enable verbose mode
# if the caller specifies verbose usage ("perl -MCarp=verbose script.pl")
# then the following method will be called by the Exporter which knows
# to do this thanks to @EXPORT_FAIL, above. $_[1] will contain the word
# 'verbose'.
sub export_fail { shift; $Verbose = shift if $_[0] eq 'verbose'; @_ }
# fixed hooks for stashes to point to
sub longmess { goto &longmess_jmp }
sub shortmess { goto &shortmess_jmp }
# these two are replaced when Carp::Heavy is loaded
sub longmess_jmp {
local($@, $!);
eval { require Carp::Heavy };
return $@ if $@;
goto &longmess_real;
}
sub shortmess_jmp {
local($@, $!);
eval { require Carp::Heavy };
return $@ if $@;
goto &shortmess_real;
}
sub croak { die shortmess @_ }
sub confess { die longmess @_ }
sub carp { warn shortmess @_ }
sub cluck { warn longmess @_ }
1;
__END__
=head1 NAME
carp - warn of errors (from perspective of caller)
cluck - warn of errors with stack backtrace
(not exported by default)
croak - die of errors (from perspective of caller)
confess - die of errors with stack backtrace
=head1 SYNOPSIS
use Carp;
croak "We're outta here!";
use Carp qw(cluck);
cluck "This is how we got here!";
=head1 DESCRIPTION
The Carp routines are useful in your own modules because
they act like die() or warn(), but with a message which is more
likely to be useful to a user of your module. In the case of
cluck, confess, and longmess that context is a summary of every
call in the call-stack. For a shorter message you can use C<carp>
or C<croak> which report the error as being from where your module
was called. There is no guarantee that that is where the error
was, but it is a good educated guess.
You can also alter the way the output and logic of C<Carp> works, by
changing some global variables in the C<Carp> namespace. See the
section on C<GLOBAL VARIABLES> below.
Here is a more complete description of how c<carp> and c<croak> work.
What they do is search the call-stack for a function call stack where
they have not been told that there shouldn't be an error. If every
call is marked safe, they give up and give a full stack backtrace
instead. In other words they presume that the first likely looking
potential suspect is guilty. Their rules for telling whether
a call shouldn't generate errors work as follows:
=over 4
=item 1.
Any call from a package to itself is safe.
=item 2.
Packages claim that there won't be errors on calls to or from
packages explicitly marked as safe by inclusion in C<@CARP_NOT>, or
(if that array is empty) C<@ISA>. The ability to override what
@ISA says is new in 5.8.
=item 3.
The trust in item 2 is transitive. If A trusts B, and B
trusts C, then A trusts C. So if you do not override C<@ISA>
with C<@CARP_NOT>, then this trust relationship is identical to,
"inherits from".
=item 4.
Any call from an internal Perl module is safe. (Nothing keeps
user modules from marking themselves as internal to Perl, but
this practice is discouraged.)
=item 5.
Any call to Perl's warning system (eg Carp itself) is safe.
(This rule is what keeps it from reporting the error at the
point where you call C<carp> or C<croak>.)
=item 6.
C<$Carp::CarpLevel> can be set to skip a fixed number of additional
call levels. Using this is not recommended because it is very
difficult to get it to behave correctly.
=back
=head2 Forcing a Stack Trace
As a debugging aid, you can force Carp to treat a croak as a confess
and a carp as a cluck across I<all> modules. In other words, force a
detailed stack trace to be given. This can be very helpful when trying
to understand why, or from where, a warning or error is being generated.
This feature is enabled by 'importing' the non-existent symbol
'verbose'. You would typically enable it by saying
perl -MCarp=verbose script.pl
or by including the string C<-MCarp=verbose> in the PERL5OPT
environment variable.
Alternately, you can set the global variable C<$Carp::Verbose> to true.
See the C<GLOBAL VARIABLES> section below.
=head1 GLOBAL VARIABLES
=head2 $Carp::MaxEvalLen
This variable determines how many characters of a string-eval are to
be shown in the output. Use a value of C<0> to show all text.
Defaults to C<0>.
=head2 $Carp::MaxArgLen
This variable determines how many characters of each argument to a
function to print. Use a value of C<0> to show the full length of the
argument.
Defaults to C<64>.
=head2 $Carp::MaxArgNums
This variable determines how many arguments to each function to show.
Use a value of C<0> to show all arguments to a function call.
Defaults to C<8>.
=head2 $Carp::Verbose
This variable makes C<carp> and C<cluck> generate stack backtraces
just like C<cluck> and C<confess>. This is how C<use Carp 'verbose'>
is implemented internally.
Defaults to C<0>.
=head2 %Carp::Internal
This says what packages are internal to Perl. C<Carp> will never
report an error as being from a line in a package that is internal to
Perl. For example:
$Carp::Internal{ __PACKAGE__ }++;
# time passes...
sub foo { ... or confess("whatever") };
would give a full stack backtrace starting from the first caller
outside of __PACKAGE__. (Unless that package was also internal to
Perl.)
=head2 %Carp::CarpInternal
This says which packages are internal to Perl's warning system. For
generating a full stack backtrace this is the same as being internal
to Perl, the stack backtrace will not start inside packages that are
listed in C<%Carp::CarpInternal>. But it is slightly different for
the summary message generated by C<carp> or C<croak>. There errors
will not be reported on any lines that are calling packages in
C<%Carp::CarpInternal>.
For example C<Carp> itself is listed in C<%Carp::CarpInternal>.
Therefore the full stack backtrace from C<confess> will not start
inside of C<Carp>, and the short message from calling C<croak> is
not placed on the line where C<croak> was called.
=head2 $Carp::CarpLevel
This variable determines how many additional call frames are to be
skipped that would not otherwise be when reporting where an error
occurred on a call to one of C<Carp>'s functions. It is fairly easy
to count these call frames on calls that generate a full stack
backtrace. However it is much harder to do this accounting for calls
that generate a short message. Usually people skip too many call
frames. If they are lucky they skip enough that C<Carp> goes all of
the way through the call stack, realizes that something is wrong, and
then generates a full stack backtrace. If they are unlucky then the
error is reported from somewhere misleading very high in the call
stack.
Therefore it is best to avoid C<$Carp::CarpLevel>. Instead use
C<@CARP_NOT>, C<%Carp::Internal> and %Carp::CarpInternal>.
Defaults to C<0>.
=head1 BUGS
The Carp routines don't handle exception objects currently.
If called with a first argument that is a reference, they simply
call die() or warn(), as appropriate.
| leighpauls/k2cro4 | third_party/cygwin/lib/perl5/5.10/Carp.pm | Perl | bsd-3-clause | 7,607 |
package DDG::Spice::Expatistan;
# ABSTRACT: Compare cities based on cost of living
use strict;
use DDG::Spice;
primary_example_queries "cost of living in Philadelphia";
secondary_example_queries "cost of living barcelona vs madrid";
description "See and compare costs of living via Expatistan";
name "Expatisan";
icon_url "/i/www.expatistan.com.ico";
source "Expatisan";
code_url "https://github.com/duckduckgo/zeroclickinfo-spice/blob/master/lib/DDG/Spice/Expatistan.pm";
topics "economy_and_finance";
category "facts";
attribution github => ['https://github.com/hunterlang','Hunter Lang'];
triggers any => "cost of living";
spice to => 'http://www.expatistan.com/api/spice?q=$1&api_key={{ENV{DDG_SPICE_EXPATISTAN_APIKEY}}}';
handle query_lc => sub {
return $_ if $_;
return;
};
1;
| dachinzo/zeroclickinfo-spice | lib/DDG/Spice/Expatistan.pm | Perl | apache-2.0 | 797 |
package DDG::Spice::Launchbug;
# ABSTRACT: Bug search on launchpad
use strict;
use DDG::Spice;
spice is_cached => 1;
name "Launchbug";
description "Returns infos about a given bug-id on Launchpad.net";
primary_example_queries "LP: 7", "LP: #2983", "(LP: #1234)", "launchbug 23", "bugid 234";
category "programming";
topics "programming";
code_url "https://github.com/duckduckgo/zeroclickinfo-spice/blob/master/lib/DDG/Spice/Launchbug.pm";
attribution github => ["https://github.com/puskin94/", "puskin"];
triggers any => "launchbug", "lp", "bugid", "(lp";
spice to => 'https://api.launchpad.net/devel/bugs/$1?ws.accept=application%2Fjson';
spice wrap_jsonp_callback => 1;
handle remainder => sub {
return unless $_ && $_ =~ qr/^\#?(\d+)\)?$/;
return $1;
};
1;
| GrandpaCardigan/zeroclickinfo-spice | lib/DDG/Spice/Launchbug.pm | Perl | apache-2.0 | 778 |
use warnings;
use strict;
use Glib;
use Irssi;
use Net::DBus;
use Net::DBus::GLib;
use POSIX;
our %IRSSI = (
name => "sleep_disconnect",
description => "Disconnects when system goes to sleep",
contact => "Mantas Mikulėnas <grawity\@gmail.com>",
license => "MIT License <https://spdx.org/licenses/MIT>",
);
our $VERSION = '0.2';
my $bus = Net::DBus::GLib->system();
my $logind_mgr = undef;
my $inhibit_fd = undef;
my %restart_servers = ();
sub _trace {
Irssi::print("$IRSSI{name}: @_") if $ENV{DEBUG};
}
sub _err {
Irssi::print("$IRSSI{name}: @_", MSGLEVEL_CLIENTERROR);
}
sub disconnect_all {
my $quit_msg = Irssi::settings_get_str("sleep_quit_message");
%restart_servers = ();
for my $server (Irssi::servers()) {
if ($server->{connected}) {
_trace(" - disconnecting from $server->{tag}");
$restart_servers{$server->{tag}} = 1;
$server->send_raw_now("QUIT :$quit_msg");
}
}
}
sub reconnect_all {
for my $tag (sort keys %restart_servers) {
_trace(" - reconnecting to $tag");
my $server = Irssi::server_find_tag($tag);
if (!$server) {
_err("could not find tag '$tag'!");
next;
}
$server->command("reconnect");
}
%restart_servers = ();
}
sub take_inhibit {
if (!$logind_mgr) {
_err("BUG: (take_inhibit) no manager object");
return;
}
elsif (defined $inhibit_fd) {
_err("BUG: (take_inhibit) already has an inhibit fd ($inhibit_fd)");
return;
}
my $fd = $logind_mgr->Inhibit("sleep",
"Irssi",
"Irssi needs to disconnect from IRC",
"delay");
if ($fd) {
_trace("(take_inhibit) got inhibit fd $fd");
$inhibit_fd = $fd;
} else {
_err("BUG: (take_inhibit) could not take an inhibitor");
}
}
sub drop_inhibit {
if (defined $inhibit_fd) {
_trace("(drop_inhibit) closing fd $inhibit_fd");
POSIX::close($inhibit_fd);
$inhibit_fd = undef;
}
}
sub connect_signals {
drop_inhibit();
my $logind_svc = $bus->get_service("org.freedesktop.login1");
$logind_mgr = $logind_svc->get_object("/org/freedesktop/login1");
$logind_mgr->connect_to_signal("PrepareForSleep", sub {
my ($suspending) = @_;
if ($suspending) {
_trace("suspending...");
_trace("* disconnecting");
disconnect_all();
_trace("* dropping inhibit lock");
drop_inhibit();
# system goes to sleep at this point
} else {
_trace("waking up...");
_trace("* taking inhibit lock");
take_inhibit();
_trace("* reconnecting");
reconnect_all();
}
});
take_inhibit();
}
sub UNLOAD {
drop_inhibit();
}
Irssi::settings_add_str("misc", "sleep_quit_message", "Computer going to sleep");
connect_signals();
# vim: ts=4:sw=4:et:
| grawity/hacks | irc/irssi/sleep_disconnect.pl | Perl | mit | 3,073 |
#!/usr/bin/perl
use strict;
use warnings;
use feature qw(say);
use JSON;
use LWP::UserAgent;
use HTTP::Request::Common;
# This sample will show all subscriptions from the given queuemanager.
my $qmgr = shift;
die("Please pass me the name of a queuemanager as argument")
unless defined($qmgr);
my $json = JSON->new;
my %input = (
'SubName' => '*'
);
my $content = $json->encode(\%input);
my $ua = LWP::UserAgent->new;
my $req = POST 'http://localhost:8081/api/sub/inquire/' . $qmgr;
$req->header(
'Content-Type' => 'application/json',
'Content-length' => length($content)
);
$req->content($content);
my $res = $ua->request($req);
die $res->status_line unless $res->is_success;
my $mqweb = $json->decode($res->content());
if ( exists($mqweb->{error}) ) {
say 'An MQ error occurred while inquiring subscriptions.';
say 'Reason Code: '
, $mqweb->{error}->{reason}->{code}
, ' - '
, $mqweb->{error}->{reason}->{desc};
}
else {
foreach my $sub(@{$mqweb->{data}}) {
say $sub->{SubName}->{value};
}
}
| fbraem/mqweb | samples/perl/sub_inq.pl | Perl | mit | 1,024 |
#!/usr/bin/perl
=head1 NAME
unique_fastq_reads.pl - create a Fastq file with only unique examples of reads
=cut
use strict;
use warnings;
use Getopt::Long;
use Pod::Usage;
use File::Basename;
use IO::Uncompress::Gunzip qw(gunzip $GunzipError);
my $fastq;
my $out = 'unique.fastq';
my $VERBOSE = 1;
my $DEBUG = 0;
my $help;
my $man;
my $version = 0.1;
GetOptions (
'in=s' => \$fastq,
'out=s' => \$out,
'verbose!' => \$VERBOSE,
'debug!' => \$DEBUG,
'man' => \$man,
'help|?' => \$help,
) or pod2usage();
pod2usage(-verbose => 2) if ($man);
pod2usage(-verbose => 1) if ($help);
pod2usage(-msg => 'Please supply a valid filename.') unless ($fastq && -s $fastq);
print "Running version: $version\n\n";
print "Reading '$fastq'...\n" if $VERBOSE;
my $header = '';
my $seq = '';
my $length;
my %uniqReads;
my $n = 0;
my $z = new IO::Uncompress::Gunzip $fastq or die " ERROR - gunzip failed on '$fastq': $GunzipError\n";
while (my $line = $z->getline()) {
chomp($line);
if ($. % 4 == 1) { # read header
$header = $line;
++$n;
}
if ($. % 4 == 2) { # sequence
$seq = $line;
}
if ($. % 4 == 0) { # qulaity string
$uniqReads{$seq} = "$header|$line";
$seq = '';
$header = '';
}
}
$z->close();
printf "Read $n reads of which %d are unique\n", scalar keys %uniqReads if $VERBOSE;
print "Writing output to '$out'...\n" if $VERBOSE;
open(my $OUT, ">", $out) or die "ERROR - unable to open '$out' for write: ${!}\nDied";
foreach my $s (keys %uniqReads) {
my ($head,$qual) = split(/\|/, $uniqReads{$s});
print $OUT "$head\n$s\n+\n$qual\n";
}
close($OUT);
print "Done!\n" if $VERBOSE;
=head1 SYNOPSIS
unique_fastq_reads.pl --in <file> [--out <file>] [--verbose|--no-verbose] [--debug|--no-debug] [--man] [--help]
=head1 DESCRIPTION
Takes a Fastq file and writes out only unique examples of reads therein. Keeps header and quality information for the first instance of each read.
=head1 OPTIONS
=over 5
=item B<--in>
Input fastq file (can be gzipped).
=item B<--out>
Output filename. [default: unique.fastq]
=item B<--verbose|--no-verbose>
Toggle verbosity. [default:none]
=item B<--debug|--no-debug>
Toggle debugging output. [default:none]
=item B<--help>
Brief help.
=item B<--man>
Full manpage of program.
=back
=head1 AUTHOR
Chris Cole <christian@cole.name>
=head1 COPYRIGHT
Copyright 2012, Chris Cole. All rights reserved.
This program is free software; you can redistribute it and/or modify it under the same terms as Perl itself.
=cut | bartongroup/profDGE48 | Mapping/unique_fastq_reads.pl | Perl | mit | 2,559 |
#
# $Header: svn://svn/SWM/trunk/web/Mail/ExternalMailer_BulkMail.pm 9569 2013-09-20 05:51:06Z tcourt $
#
package Mail::ExternalMailer_BulkMail;
use strict;
use lib ".", "..", "../..";
our @ISA =qw(Mail::ExternalMailer_BaseObj);
use Mail::ExternalMailer_BaseObj;
use Mail::Bulkmail;
use Mail::Bulkmail::Server;
sub new {
my $this = shift;
my $class = ref($this) || $this;
my %params=@_;
my $self ={};
##bless selfhash to class
bless $self, $class;
return $self;
}
sub send {
my $self = shift;
my %params=@_;
my @options = (qw(
HTMLMessage
TEXTMessage
MessageID
Subject
FromName
FromAddress
ReplyToAddress
BCCRecipients
ToAddress
ToName
));
for my $o (@options) {
$params{$o} ||= '';
}
my %compulsory = (
MessageID => 1,
Subject => 1,
FromAddress => 1,
BCCRecipients => 1,
ToAddress => 1,
);
my $error = '';
if (!$params{'HTMLMessage'}
and !$params{'TextMessage'}
) {
return (0,'Need Message');
}
for my $k (keys %compulsory) {
return (0,'Need '.$k) if !$params{$k};
}
my %Emails=();
for my $email (@{$params{'BCCRecipients'}}) {
$email =~s/^'//g;
$email =~s/'$//g;
my $domain=$email;
$domain=~s/.*\@//g or next;
$Emails{$email}=$domain;
}
#Now sort by domain
my @Emails;
foreach my $key (sort {$Emails{$a} cmp $Emails{$b}} keys %Emails) {
push @Emails, $key;
}
my $server = Mail::Bulkmail::Server->new(
'Smtp' => "localhost",
'Port' => 25,
'Tries' => 5,
'Domain' => 'sportingpulse.com',
) || die Mail::Bulkmail::Server->error();
$server->Domain('sportingpulse.com');
$server->Tries(5);
my $message = getMessageContent($params{'HTMLMessage'},$params{'TextMessage'});
my $bulk = Mail::Bulkmail->new(
"LIST" => \@Emails,
"From" => qq["$params{'FromName'}" <$params{'FromAddress'}>],
"To" => qq["$params{'ToName'}" <$params{'ToAddress'}>],
"Reply-To" => $params{'ReplyToAddress'},
"Subject" => $params{'Subject'},
"use_envelope" => '1',
"envelope_limit" => '200',
"Message" => $message,
servers => [$server],
) or Mail::Bulkmail->error();
$bulk->header("Content-type", 'multipart/alternative; boundary="----=_NextPart_001_002D_01C21C53.27797610"');
$bulk->bulkmail;
return 1;
}
sub getMessageContent {
my($html, $text)=@_;
return '' if !$text;
$text ||= '';
$html ||= $text;
my $message =qq[
------=_NextPart_001_002D_01C21C53.27797610
Content-Type: text/plain; charset="us-ascii"
Content-Transfer-Encoding: 8bit
This email has been sent as HTML.
Your email client seems to be having problems with viewing it.
$text
------=_NextPart_001_002D_01C21C53.27797610
Content-Type: text/html; charset="us-ascii"
Content-Transfer-Encoding: 8bit
$html
------=_NextPart_001_002D_01C21C53.27797610--
];
return $message;
}
1;
| facascante/slimerp | fifs/web/Mail/ExternalMailer_BulkMail.pm | Perl | mit | 2,804 |
#!/bin/perl -w
# nonforker - server who multiplexes without forking
use POSIX;
use IO::Socket;
use IO::Select;
use Socket;
use Fcntl;
use Tie::RefHash;
$port = 1685; # change this at will
#listen to port
$server = IO::Socket::INET->new(
LocalPort => $port,
Listen => 10,
) or die "Can't make server socket: $@\n";
# begin with empty buffers
my %inbuffer = ();
my %outbuffer = ();
my %ready = ();
tie %ready, 'Tie::RefHash';
nonblock($server);
$select = IO::Select->new($server);
# main loop: check reads/accepts, check writes, check ready to process
while (1) {
my $client;
my $rv;
my $data;
# check for new information on the connections we have
# anything to read or accept?
foreach $client ($select->can_read(1)) {
if ($client == $server) {
# accept a new connection
$client = $server->accept();
$select->add($client);
nonblock($client);
} else {
# read data
$data = '';
$rv = $client->recv($data, POSIX::BUFSIZ, 0);
unless (defined($rv) && length $data) {
# this would be the end of file, so close the client
delete $inbuffer{$client};
delete $outbuffer{$client};
delete $ready{$client};
$select->remove($client);
close $client;
next;
}
$inbuffer{$client} .= $data;
# test whether the data in the buffer or the data we
# just read means there is a complete request waiting
# to be fulfilled. If there is, set $ready{$client}
# to the requests waiting to be fulfilled.
while ($inbuffer{$client} =~ s/(.*\n)//) {
push( @{$ready{$client}}, $1);
}
}
}
# any com[plete requests to process?
foreach $client (keys %ready) {
handle($client);
}
# buffers to flush?
foreach $client ($select->can_write(1)) {
# skip this client if we have nothing to say
next unless exists $outbuffer{$client};
$rv = $client->send($outbuffer{$client},0);
unless (defined $rv) {
# whine, but move on.
warn "I was told I could write, but I can't.\n";
next;
}
if ($rv == length $outbuffer{$client} || $1 == POSIX::EWOULDBLOCK) {
substr($outbuffer{$client}, 0, $rv) = '';
delete $outbuffer{$client} unless length $outbuffer{$client};
} else {
# Couldn't write all the data, and it wasn't because
# it would have blocked. Shutdown and move on.
delete $inbuffer{$client};
delete $outbuffer{$client};
delete $ready{$client};
$select->remove($client);
close($client);
next;
}
}
# Out of band data?
foreach $client ($select->has_exception(0)) { # arg timeout
# Deal with out-of-band data here, if your want to.
}
}
# handle($socket) deals with all pending requests for $client
sub handle {
# requests are in $ready{$client}
# send output to $outbuffer{$client}
my $client = shift;
my $request;
foreach $request (@{$ready{$client}}) {
# $request is the text of the request
# put text of reply into $outbuffer{$client}
}
delete $ready{$client};
}
# nonblock($socket) puts socket into noblocking mode
sub nonblock {
my $socket = shift;
my $flags;
$flags = fcntl($socket, F_GETFL, 0) or die "Can't get flags for socket; $!\n";
fcntl($socket, F_SETFL, $flags | O_NONBLOCK) or die "Can't make socket nonblocking: $!\n";
} | jmcveigh/perl-cookbook-bins | nonforker.pl | Perl | mit | 3,843 |
#
# Copyright (c) 2003-2006 University of Chicago and Fellowship
# for Interpretations of Genomes. All Rights Reserved.
#
# This file is part of the SEED Toolkit.
#
# The SEED Toolkit is free software. You can redistribute
# it and/or modify it under the terms of the SEED Toolkit
# Public License.
#
# You should have received a copy of the SEED Toolkit Public License
# along with this program; if not write to the University of Chicago
# at info@ci.uchicago.edu or the Fellowship for Interpretation of
# Genomes at veronika@thefig.info or download a copy from
# http://www.theseed.org/LICENSE.TXT.
#
#
# This is a SAS component.
#
package SAP;
use strict;
use ERDB;
use Tracer;
use SeedUtils;
use ServerThing;
=head1 Sapling Server Function Object
This file contains the functions and utilities used by the Sapling Server
(B<sap_server.cgi>). The various methods listed in the sections below represent
function calls direct to the server. These all have a signature similar to the
following.
my $results = $sapObject->function_name($args);
where C<$sapObject> is an object created by this module,
C<$args> is a parameter structure, and C<function_name> is the Sapling
Server function name. The output $results is a scalar, generally a hash
reference, but sometimes a string or a list reference.
=head2 Location Strings
Several methods deal with gene locations. Location information from the Sapling
server is expressed as I<location strings>. A location string consists of a
contig ID (which includes the genome ID), an underscore, a starting location, a
strand indicator (C<+> or C<->), and a length. The first location on the contig
is C<1>.
For example, C<100226.1:NC_003888_3766170+612> indicates contig C<NC_003888> in
genome C<100226.1> (I<Streptomyces coelicolor A3(2)>) beginning at location
3766170 and proceeding forward on the plus strand for 612 bases.
=head2 Constructor
Use
my $sapObject = SAPserver->new();
to create a new sapling server function object. The server function object
is used to invoke the L</Primary Methods> listed below. See L<SAPserver> for
more information on how to create this object and the options available.
=cut
#
# Actually, if you are using SAP.pm, you should do SAP->new(), not SAPserver->new()
# That comment above is for the benefit of the pod doc stuff on how to use SAPserver
# that is generated from this file.
#
sub new {
my ($class, $sap) = @_;
# Create the sapling object.
if (! defined $sap) {
$sap = ERDB::GetDatabase('Sapling');
}
# Create the server object.
my $retVal = { db => $sap };
# Bless and return it.
bless $retVal, $class;
return $retVal;
}
sub _set_memcache
{
my($self, $mcache) = @_;
$self->{memcache} = $mcache;
}
=head1 Primary Methods
=head2 Server Utility Methods
You will not use the methods in this section very often. Some are used by the
server framework for maintenance and control purposes (L</methods>), while others
(L</query> and L</get>) provide access to data in the database in case you need
data not available from one of the standard methods.
=head3 methods
my $methodList = $sapObject->methods();
Return a reference to a list of the methods allowed on this object.
=cut
use constant METHODS => [qw(
all_complexes
all_experiments
all_features
all_figfams
all_genomes
all_models
all_proteins
all_reactions
all_roles_used_in_models
all_subsystems
atomic_regulon_vectors
atomic_regulons
classification_of
close_genomes
clusters_containing
co_occurrence_evidence
compared_regions
complex_data
conserved_in_neighborhood
contig_lengths
contig_sequences
coregulated_correspondence
coregulated_fids
coupled_reactions
discriminating_figfams
dlits_for_ids
equiv_ids_for_sequences
equiv_precise_assertions
equiv_sequence_assertions
equiv_sequence_ids
exists
experiment_fid_levels
experiment_regulon_levels
expressed_genomes
feature_assignments
fid_correspondences
fid_experiments
fid_locations
fid_map_for_genome
fid_possibly_truncated
fid_vectors
fids_expressed_in_range
fids_to_ids
fids_to_proteins
fids_to_regulons
fids_with_evidence_code
fids_with_evidence_codes
figfam_fids
figfam_fids_batch
figfam_function
find_closest_genes
genes_in_region
gene_correspondence_map
genome_contigs
genome_contig_md5s
genome_data
genome_domain
genome_fid_md5s
genome_experiments
genome_experiment_levels
genome_figfams
genome_ids
genome_metrics
genome_names
genomes_to_subsystems
genomes_by_md5
get
get_subsystems
ids_in_subsystems
ids_to_annotations
ids_to_assertions
ids_to_data
ids_to_fids
ids_to_figfams
ids_to_functions
ids_to_genomes
ids_to_lengths
ids_to_publications
ids_to_sequences
ids_to_subsystems
intergenic_regions
is_in_subsystem
is_in_subsystem_with
is_prokaryotic
locs_to_dna
make_runs
mapped_genomes
models_to_reactions
occ_of_role
otu_members
pairsets
pegs_implementing_roles
pegs_in_subsystem
pegs_in_subsystems
pegs_in_variants
proteins_to_fids
query
reaction_neighbors
reaction_path
reaction_strings
reactions_to_complexes
reactions_to_roles
regulons_to_fids
related_clusters
related_figfams
representative
representative_genomes
role_neighbors
role_reactions
roles_exist_in_subsystem
roles_to_complexes
roles_to_figfams
roles_to_proteins
roles_to_subsystems
rows_of_subsystems
scenario_names
select
submit_gene_correspondence
subsystem_data
subsystem_genomes
subsystem_names
subsystem_roles
subsystem_spreadsheet
subsystem_type
subsystems_for_role
taxonomy_of
upstream
)];
sub methods {
# Get the parameters.
my ($self) = @_;
# Return the result.
return METHODS;
}
=head3 exists
my $idHash = $sapObject->exists({
-type => 'Genome',
-ids => [$id1, $id2, ...]
});
Return a hash indicating which of the specified objects of the given type exist
in the database. This method is used as a general mechanism for finding what
exists and what doesn't exist when you know the ID. In particular, you can use
it to check for the presence or absence of subsystems, genomes, features,
or FIGfams.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -type
The type of object whose existence is being queried. The type specification is
case-insensitive: C<genome> and C<Genome> are treated the same. The permissible
types are
=over 12
=item Genome
Genomes, identified by taxon ID: C<100226.1>, C<83333.1>, C<360108.3>
=item Feature
Features (genes), identified by FIG ID: C<fig|100226.1.peg.3361>, C<fig|360108.3.rna.4>
=item Subsystem
Subsystem, identified by subsystem name: C<Arginine biosynthesis extended>
=item FIGfam
FIGfam protein family, identified by ID: C<FIG000171>, C<FIG001501>
=back
=item -ids
Reference to a list of identifiers for objects of the specified type.
=back
=item RETURN
Returns a reference to a hash keyed by ID. For each incoming ID, it maps
to C<1> if an object of the specified type with that ID exists, else C<0>.
$idHash = { $id1 => $flag1, $id2 => $flag2, ... };
=back
=cut
use constant EXIST_OBJECT_TYPES => { genome => 'Genome', feature => 'Feature',
subsystem => 'Subsystem', figfam => 'Family'
};
sub exists {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of identifiers.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the object type.
my $type = $args->{-type};
Confess("No -type parameter specified.") if ! defined $type;
my $objectType = EXIST_OBJECT_TYPES->{lc $type};
Confess("Invalid object type \"$type\".") if ! defined $objectType;
# Declare the return variable.
my $retVal = {};
# Loop through the identifiers, checking existence.
for my $id (@$ids) {
$retVal->{$id} = ($sap->Exists($objectType, $id) ? 1 : 0);
}
# Return the result.
return $retVal;
}
=head3 get
my $hashList = $sapObject->get({
-objects => $objectNameString,
-filter => { $label1 => $criterion1, $label2 => $criterion2, ... },
-limit => $maxRows,
-fields => { $label1 => $name1, $label2 => $name2, ... },
-multiples => 'list',
-firstOnly => 1
});
Query the Sapling database. This is a variant of the L</query> method in
which a certain amount of power is sacrificed for ease of use. Instead of
a full-blown filter clause, the caller specifies a filter hash that maps
field identifiers to values.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -objects
The object name string listing all the entities and relationships in the
query. See L<ERDB/Object Name List> for more details.
=item -filter (optional)
Reference to a hash that maps field identifiers in L<ERDB/Standard Field Name Format>
to criteria. A criterion is either an object or scalar value (which is asserted
as the value of the field), a 2-tuple consisting of a relational operator and
a value (which is asserted to be in the appropriate relation to the field), or a
sub-list consisting of the word C<IN> and two or more values (which asserts that
the field has one of the listed values). A record satisfies the filter if it satisfies
all the criteria in the hash.
=item -limit (optional)
Maximum number of rows to return for this query. The default is no limit.
=item -fields (optional)
Reference to a hash mapping field identifiers to field names. In this case,
the field identifier is a field name in L<ERDB/Standard Field Name Format>
and the field name is the key value that will be used for the field in the
returned result hashes. If this parameter is omitted, then instead of a
returning the results, this method will return a count of the number of
records found.
=item -multiples (optional)
Rule for handling field values in the result hashes. The default option is
C<smart>, which maps single-valued fields to scalars and multi-valued fields
to list references. If C<primary> is specified, then all fields are mapped
to scalars-- only the first value of a multi-valued field is retained. If
C<list> is specified, then all fields are mapped to lists.
=item -firstOnly (optional)
If TRUE, only the first result will be returned. In this case, the return
value will be a hash reference instead of a list of hash references. The
default is FALSE.
=back
=item RETURN
Returns a reference to a list of hashes. Each hash represents a single record in
the result set, and maps the output field names to the field values for that
record. Note that if a field is multi-valued, it will be represented as a
list reference.
$hashList = [{ $label1 => $row1value1, $label2 => $row1value2, ... },
{ $label1 => $row2value1, $label2 => $row2value2, ... },
... ];
=back
=cut
sub get {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the filter hash, the flags, and the limit. All of these
# are optional.
my $filter = $args->{-filter} || {};
my $limit = $args->{-limit} || 0;
my $multiples = $args->{-multiples} || 'smart';
my $firstOnly = $args->{-firstOnly} || 0;
# Initialize the return variable. In first-only mode, it starts
# undefined; otherwise, it's an empty list.
my $retVal = ($firstOnly ? undef : []);
# Get the object name string and the result field hash.
my $objects = $args->{-objects};
my $fields = $args->{-fields};
# Insure the object name list is present.
if (! $objects) {
Confess("Object name string not specified.");
} else {
# Get the default object name from the object name list.
my ($defaultObject) = split m/\s+/, $objects, 2;
# We'll build the filter elements and the parameter list in
# these lists. The filter string will be formed by ANDing
# together the filter elements.
my (@filters, @parms);
# Loop through the filter hash.
for my $filterField (keys %$filter) {
# Compute the field type.
my $fieldType = $sap->FieldType($filterField);
# Get this field's criterion.
my $criterion = $filter->{$filterField};
# If the criterion is a not a list, make it one.
if (! defined $criterion) {
Confess("Invalid (missing) criterion for field \"$filterField\".");
} elsif (ref $criterion ne 'ARRAY') {
$criterion = ['=', $criterion];
}
# Determine the criterion type.
if ($criterion->[0] eq 'LIKE') {
# For a LIKE, we don't convert the value.
push @filters, "$filterField LIKE ?";
push @parms, $criterion->[1];
} elsif ($criterion->[0] eq 'IN') {
# For an IN, we have to deal with multiple field values. We'll
# stash one question mark per value in this list.
my @marks;
# Process the criterion elements.
for (my $i = 1; $i < scalar @$criterion; $i++) {
push @marks, "?";
push @parms, $fieldType->encode($criterion->[$i]);
}
# Form the filter element from the collected marks.
push @filters, "$filterField IN (" . join(", ", @marks) . ")";
} else {
# here we have a normal comparison.
push @filters, "$filterField $criterion->[0] ?";
push @parms, $fieldType->encode($criterion->[1]);
}
}
# Create the filter string.
my $filterString = join(" AND ", @filters);
# Add the limit clause.
if ($firstOnly) {
$filterString .= " LIMIT 1";
} elsif ($limit > 0) {
$filterString .= " LIMIT $limit";
}
# Is this a query or a count?
if (! defined $fields) {
# It's a count. Do a GetCount call.
$retVal = $sap->GetCount($objects, $filterString, \@parms);
} else {
# Here we have a real query. Now we run it.
my $query = $sap->Get($objects, $filterString, \@parms);
# Loop through the results.
while (my $record = $query->Fetch()) {
# Create the result hash for this record.
my %results;
# Loop through the fields.
for my $outputField (keys %$fields) {
# Get the value.
my @values = $record->Value($outputField);
# Get the output field name.
my $outputName = $fields->{$outputField};
# Process according to the output type.
if ($multiples eq 'list') {
$results{$outputName} = \@values;
} elsif ($multiples eq 'primary' || scalar(@values) == 1) {
$results{$outputName} = $values[0];
} else {
$results{$outputName} = \@values;
}
}
# Add the result hash to the output. In first-only mode,
# we store it; otherwise, we push it in.
if ($firstOnly) {
$retVal = \%results;
} else {
push @$retVal, \%results;
}
}
}
}
# Return the result.
return $retVal;
}
=head3 query
my $rowList = $sapObject->query({
-objects => $objectNameString,
-filterString => $whereString,
-limit => $maxRows,
-parameters => [$parm1, $parm2, ...],
-fields => [$name1, $name2, ...]
});
This method queries the Sapling database and returns a reference to a list of
lists. The query is specified in the form of an object name string, a filter
string, an optional list of parameter values, and a list of desired output
fields. The result document can be thought of as a two-dimensional array, with
each row being a record returned by the query and each column representing an
output field.
This function buys a great deal of flexibility as the cost of ease of use.
Before attempting to formulate a query, you will need to look at the
L<ERDB> documentation.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -objects
The object name string listing all the entities and relationships in the
query. See L<ERDB/Object Name List> for more details.
=item -filterString
The filter string for the query. It cannot contain a C<LIMIT> clause, but
can otherwise be anything described in L<ERDB/Filter Clause>.
=item -limit (optional)
Maximum number of rows to return for this query. The default is C<1000>. To
make an unlimited query, specify C<none>.
=item -parameters (optional)
Reference to a list of parameter values. These should be numbers or strings,
and are substituted for any parameter marks in the query on a one-for-one
basis. See also L<ERDB/Parameter List>.
=item -fields
Reference to a list containing the names of the desired output fields.
=back
=item RETURN
Returns a reference to a list of lists. Each row corresponds to a database
result row, and each column corresponds to one of the incoming output fields.
Note that some fields contain complex PERL data structures, and fields that
are multi-valued will contain sub-lists.
$rowList = [[$row1field1, $row1field2, ...],
[$row2field1, $row2field2, ...],
[$row3field1, $row3field2, ...],
... ];
=back
=cut
sub query {
# Get the parameters.
my ($self, $args) = @_;
# Get the query parameters.
my $objects = $args->{-objects} || '';
my $parms = $args->{-parameters} || [];
my $filter = $args->{-filterString} || '';
my $limitNumber = $args->{-limit} || 1000;
my $fields = $args->{-fields} || [];
# If this is an unlimited query, set the limit number to 0.
if ($limitNumber eq 'none') {
$limitNumber = 0;
}
# Declare the return variable.
my @retVal;
# Load the query console so we can get its help.
require ERDBQueryConsole;
# Create the console object. The user is allowed unlimited queries
# because we encourage limits and we're hoping no one goes crazy.
# The return data is meant to be raw rather than HTML.
Trace("Submitting query.") if T(3);
my $console = ERDBQueryConsole->new($self->{db}, secure => 1, raw => 1);
# Try to submit the query.
my $ok = $console->Submit($objects, $filter, $parms, $fields, $limitNumber);
# Only proceed if there's no error.
if (! $ok) {
die $console->Messages();
} else {
Trace("Processing query results.") if T(3);
# Loop through the result rows.
while (my @row = $console->GetRow()) {
push @retVal, \@row;
}
}
# Return the result.
return \@retVal;
}
=head3 select
my $listList = $sapObject->select({
-path => $objectNameString,
-filter => { $field1 => $list1, $field2 => $list2, ... },
-fields => [$fieldA, $fieldB, ... ],
-limit => $maxRows,
-multiples => 'list'
});
Query the Sapling database. This is a variant of the L</get> method in
which a further amount of power is sacrificed for ease of use. The
return is a list of lists, and the criteria are always in the form of
lists of possible values.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -path
The object name string listing all the entities and relationships in the
query. See L<ERDB/Object Name List> for more details.
=item -filter (optional)
Reference to a hash that maps field identifiers in L<ERDB/Standard Field Name Format>
to lists of permissible values. A record matches the filter if the field value
matches at least one element of the list.
=item -fields
Reference to a list of field names in L<ERDB/Standard Field Name Format>.
=item -limit (optional)
Maximum number of rows to return for this query. The default is no limit.
=item -multiples (optional)
Rule for handling field values in the result hashes. The default option is
C<smart>, which maps single-valued fields to scalars and multi-valued fields
to list references. If C<primary> is specified, then all fields are mapped
to scalars-- only the first value of a multi-valued field is retained. If
C<list> is specified, then all fields are mapped to lists.
=back
=item RETURN
Returns a reference to a list of lists. Each sub-list represents a single record
in the result set, and contains the field values in the order the fields were
lists in the C<-fields> parameter. Note that if a field is multi-valued, it will
be represented as a list reference.
$listList = [[$row1value1, $row1value2, ... ], [$row2value1, $row2value2, ...], ... ];
=back
=cut
sub select {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the filter hash, the flags, and the limit. All of these
# are optional.
my $filter = $args->{-filter} || {};
my $limit = $args->{-limit} || 0;
my $multiples = $args->{-multiples} || 'smart';
# Initialize the return variable.
my $retVal = [];
# Get the object name string and the result field list.
my $objects = $args->{-path};
my $fields = ServerThing::GetIdList(-fields => $args);
# Insure the object name list is present.
if (! $objects) {
Confess("Object name string not specified.");
} else {
# Get the default object name from the object name list.
my ($defaultObject) = split m/\s+/, $objects, 2;
# We'll build the filter elements and the parameter list in
# these lists. The filter string will be formed by ANDing
# together the filter elements.
my (@filters, @parms);
# Loop through the filter hash.
for my $filterField (keys %$filter) {
# Compute the field type.
my $fieldType = $sap->FieldType($filterField);
# Get this field's criterion.
my $criterion = $filter->{$filterField};
# Insure the criterion exists.
if (! defined $criterion) {
Confess("Invalid (missing) criterion for field \"$filterField\".");
} elsif (ref $criterion ne 'ARRAY') {
# Here we have a scalar criterion. It is encoded as
# an equality clause.
push @parms, $fieldType->encode($criterion);
push @filters, "$filterField = ?";
} else {
# Here we have to deal with multiple field values. We'll
# stash one question mark per value in this list.
my @marks;
# Process the criterion elements.
for (my $i = 0; $i < scalar @$criterion; $i++) {
push @marks, "?";
push @parms, $fieldType->encode($criterion->[$i]);
}
# Form the filter element from the collected marks.
push @filters, "$filterField IN (" . join(", ", @marks) . ")";
}
}
# Create the filter string.
my $filterString = join(" AND ", @filters);
# Add the limit clause.
if ($limit > 0) {
$filterString .= " LIMIT $limit";
}
# Run the query.
my $query = $sap->Get($objects, $filterString, \@parms);
# Loop through the results.
while (my $record = $query->Fetch()) {
# Create the result list for this record.
my @results;
# Loop through the fields.
for my $outputField (@$fields) {
# Get the value.
my @values = $record->Value($outputField);
# Process according to the output type.
if ($multiples eq 'list') {
push @results, \@values;
} elsif ($multiples eq 'primary' || scalar(@values) == 1) {
push @results, $values[0];
} else {
push @results, \@values;
}
}
# Add the result list to the output. In first-only mode,
# we store it; otherwise, we push it in.
push @$retVal, \@results;
}
}
# Return the result.
return $retVal;
}
=head2 Annotation and Assertion Data Methods
=head3 equiv_precise_assertions
my $idHash = $sapObject->equiv_precise_assertions({
-ids => [$id1, $id2, ...]
});
Return the assertions for all genes in the database that match the
identified gene. The gene can be specified by any prefixed gene
identifier (e.g. C<uni|AYQ44>, C<gi|85841784>, or
C<fig|360108.3.peg.1041>).
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of gene identifiers.
=back
For backward compatibility, the parameter can also be a reference to a list
of gene identifiers.
=item RETURN
Returns a reference to a hash that maps each incoming ID to a list of 4-tuples.
Each 4-tuple contains (0) an identifier that is for the same gene as the input
identifier, (1) the asserted function of that identifier, (2) the source of
the assertion, and (3) a flag that is TRUE if the assertion is by a human expert.
$idHash = { $id1 => [$otherID1, $function1, $source1, $flag1],
$id2 => [$otherID2, $function2, $source2, $flag2],
... };
In backward-compatibility mode, returns a reference to a list of 2-tuples. Each
2-tuple consists of an incoming ID and the list of 4-tuples with the asserted
function information.
=back
=cut
sub equiv_precise_assertions {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Check for backward compatibility mode.
my $backwardMode = 0;
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
foreach my $id (@$ids) {
my @resultRows = $sap->GetAll("Identifier HasAssertionFrom Source",
'Identifier(id) = ? ',
[$id], [qw(Identifier(id)
HasAssertionFrom(function)
Source(id)
HasAssertionFrom(expert))]);
$retVal->{$id} = \@resultRows;
}
# Check for backward-compatibility mode.
if ($backwardMode) {
# Convert the hash to a list of 2-tuples.
my @outList = map { [$_, $retVal->{$_}] } @$ids;
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
=head3 equiv_sequence_assertions
my $idHash = $sapObject->equiv_sequence_assertions({
-ids => [$id1, $id2, ...]
});
Return the assertions for all genes in the database that match the
identified protein sequences. A protein sequence can be identified by a
protein MD5 code or any prefixed gene identifier (e.g. C<uni|AYQ44>,
C<gi|85841784>, or C<fig|360108.3.peg.1041>).
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of protein identifiers. Each identifier should be a prefixed
gene identifier or the (optionally) prefixed MD5 of a protein sequence.
=back
=item RETURN
Returns a reference to a hash mapping each incoming protein identifier to a list
of 5-tuples, consisting of (0) an identifier that is sequence-equivalent to the
input identifier, (1) the asserted function of that identifier, (2) the source
of the assertion, (3) a flag that is TRUE if the assertion is by an expert, and
(4) the name of the genome relevant to the identifer (if any).
$idHash = { $id1 => [$otherID1, $function1, $source1, $flag1],
$id2 => [$otherID2, $function2, $source2, $flag2],
... };
=back
=cut
sub equiv_sequence_assertions {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Convert a list to a hash.
if (ref $args ne 'HASH') {
$args = { -ids => $args };
}
# Declare the return variable.
my $retVal = {};
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs in the list.
for my $id (@$ids) {
# This hash will contain a list of the relevant protein sequence IDs.
my %prots;
# We'll put our assertions found in here.
my @results;
# Determine the ID type.
if (my $prot = $sap->IsProteinID($id)) {
# Here we have a protein sequence MD5 ID. In this case, we just
# strip the prefix to get a Sapling protein sequence ID.
$prots{$prot} = 1;
} else {
# Here we have a gene ID. Start by asking for all of the
# protein sequences it identifies directly.
my @prots = $sap->GetFlat("Identifier Names ProteinSequence",
'Identifier(id) = ?', [$id],
'ProteinSequence(id)');
# Add the ones it identifies through a feature.
push @prots, $sap->GetFlat("Identifier Identifies Feature Produces ProteinSequence",
'Identifier(id) = ?', [$id],
'ProteinSequence(id)');
# Put all the proteins found in the hash.
for my $prot (@prots) {
$prots{$prot} = 1;
}
}
# Loop through the protein sequences, finding assertions. For each
# protein, we make two queries. Note that we expect the number of
# protein sequences to be small, despite the large amount of work
# performed above.
for my $prot (sort keys %prots) {
# Get the assertions on the protein's identifiers.
@results = $sap->GetAll("ProteinSequence IsNamedBy Identifier HasAssertionFrom Source",
"ProteinSequence(id) = ?", [$prot],
[qw(Identifier(id) HasAssertionFrom(function)
Source(id) HasAssertionFrom(expert))]);
# Add the assertions on the identifiers for the protein's features.
push @results, $sap->GetAll("ProteinSequence IsProteinFor Feature IsIdentifiedBy Identifier HasAssertionFrom Source AND Feature IsOwnedBy Genome",
"ProteinSequence(id) = ?", [$prot],
[qw(Identifier(id) HasAssertionFrom(function)
Source(id) HasAssertionFrom(expert)
Genome(scientific-name))]);
}
# If we found results, put them in the return object.
Trace(scalar(@results) . " results found for $id.") if T(3);
$retVal->{$id} = \@results;
}
# Return the result.
return $retVal;
}
=head3 feature_assignments
my $featureHash = $sapObject->feature_assignments({
-genome => $genomeID,
-type => 'peg',
-hypothetical => 1
});
Return all features of the specified type for the specified genome along
with their assignments.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -genome
ID of the genome whose features are desired.
=item -type (optional)
If specified, the type of feature desired (C<peg>, C<rna>, etc.). If omitted,
all features will be returned.
=item -hypothetical (optional)
If C<1>, only hypothetical genes will be returned; if C<0>, only non-hypothetical
genes will be returned. If undefined or not specified, all genes will be
returned.
=back
=item RETURN
Returns a hash mapping the ID of each feature in the specified genome to
its assignment.
$featureHash = { $fid1 => $function1, $fid2 => $function2, ... };
=back
=cut
sub feature_assignments {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal;
# Get the Sapling database.
my $sap = $self->{db};
# Get the ID of the desired genome.
my $genomeID = $args->{-genome};
if (! $genomeID) {
Confess("No genome ID specified for feature_assignments.");
} else {
# Start the feature ID search pattern.
my $pattern = "fig|$genomeID.";
# Add the feature type (if any).
my $type = $args->{-type};
if ($type) {
$pattern .= "$type.";
}
# Append a wild card so we match everything in the genome.
$pattern .= "%";
# Ask for all the features.
my @rows = $sap->GetAll("Feature", 'Feature(id) LIKE ?', [$pattern],
[qw(id function)]);
# Are we checking for hypotheticals?
my $hypothetical = $args->{-hypothetical};
if (! defined $hypothetical) {
# No. Put everything in the return hash.
$retVal = { map { $_->[0] => $_->[1] } @rows };
} else {
# Yes. We need to check every functional assignment.
for my $row (@rows) {
# Get the ID and assignment.
my ($fid, $assignment) = @$row;
# Check to see if the assignment is hypothetical.
my $rowHypothetical = (hypo($assignment) ? 1 : 0);
Trace("Assignment \"$assignment\" has hypo = $rowHypothetical.") if T(3);
# Include it if it matches the criterion specified by the caller.
if ($rowHypothetical == $hypothetical) {
$retVal->{$fid} = $assignment;
}
}
}
}
# Return the result.
return $retVal;
}
=head3 ids_to_assertions
my $idHash = $sapObject->ids_to_assertions({
-ids => [$id1, $id2, ...]
});
Return the assertions associated with each prefixed ID.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of prefixed feature IDs (e.g. C<gi|17017961>,
C<NP_625335.1>, C<fig|360108.3.peg.1041>). The assertions associated with
each particular identifier will be returned. In this case, there will be
no processing for equivalent IDs. For that, you should use
L<equiv_sequence_assertions> or L<equiv_precise_assertions>.
=back
=item RETURN
Returns a reference to a hash mapping every incoming ID to a list of
3-tuples, each consisting of (0) an asserted function, (1)
the source of the assertion, and (2) a flag that is TRUE if the assertion
was made by an expert.
$idHash = { $id1 => [[$assertion1a, $source1a, $expert1a],
[$assertion1b, $source1b, $expert1b], ...],
$id2 => [[$assertion2a, $source2a, $expert2a],
[$assertion2b, $source2b, $expert2b], ...],
... };
=back
=cut
sub ids_to_assertions {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the feature ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs.
for my $id (@$ids) {
# Get the assertion data.
my @resultRows = $sap->GetAll("Identifier HasAssertionFrom Source",
'Identifier(id) = ? ',
[$id], [qw(HasAssertionFrom(function)
Source(id)
HasAssertionFrom(expert))]);
# Store it in the return hash.
$retVal->{$id} = \@resultRows;
}
# Return the results.
return $retVal;
}
=head3 ids_to_annotations
my $idHash = $sapObject->ids_to_annotations({
-ids => [$id1, $id2, ...]
});
Return the annotations associated with each prefixed ID. Annotations are
comments attached to each feature (gene), and include past functional
assignments as well as more general information.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of feature IDs.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=back
=item RETURN
Returns a reference to a hash mapping every incoming ID to a list of
3-tuples, each consisting of (0) annotation text, (1) the name of the
annotator, and (2) the timestamp of the annotation (as a number of seconds
since the epoch).
$idHash = { $id1 => [[$annotation1a, $name1a, $time1a],
[$annotation1b, $name1b, $time1b], ...],
$id2 => [[$annotation2a, $name2a, $time2a],
[$annotation2b, $name2b, $time2b], ...],
... };
=back
=cut
sub ids_to_annotations {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the feature ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the desired ID type.
my $source = $args->{-source};
# Build the filter string, object name list, and parameter value list prefix.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($source);
# Loop through the incoming features.
for my $id (@$ids) {
# Get the annotation data.
my @resultRows = $sap->GetAll("$objects IsAnnotatedBy Annotation",
$filter, [@parms, $id],
[qw(Annotation(comment)
Annotation(annotator)
Annotation(annotation-time))]);
# Store it in the return hash.
$retVal->{$id} = \@resultRows;
}
# Return the results.
return $retVal;
}
=head3 ids_to_functions
my $featureHash = $sapObject->ids_to_functions({
-ids => [$id1, $id2, ...],
-source => 'CMR'
-genome => $genome
});
Return the functional assignment for each feature in the incoming list.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of feature IDs.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return genes for all genomes.
=back
=item RETURN
Returns a reference to a hash mapping each feature ID to the feature's current
functional assignment. Features that do not exist in the database will not be
present in the hash. For IDs that correspond to multiple features, only one
functional assignment will be returned.
$featureHash = { $id1 => $function1,
$id2 => $function2,
...};
=back
=cut
sub ids_to_functions {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the feature ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the desired ID type and genome filter.
my $source = $args->{-source};
my $genome = $args->{-genome};
#
# Check for the case where we use the optimized/cached version.
#
if ((!defined($source) || $source eq 'SEED') && !defined($genome))
{
return $self->_ids_to_functions_opt1($ids);
}
# Build the filter string, object name list, and parameter value list prefix.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($source, $genome);
# Loop through the incoming features.
for my $id (@$ids) {
# Get the functional assignment for this feature.
my ($function) = $sap->GetFlat($objects, $filter, [@parms, $id],
"Feature(function)");
# Only proceed if we found one.
if ($function) {
Trace("Function found for $id.") if T(3);
$retVal->{$id} = $function;
}
}
# Return the result.
return $retVal;
}
use Data::Dumper;
sub _ids_to_functions_opt1
{
my($self, $ids) = @_;
my $q = $self->{db}->{_dbh}->quote();
my $out = $self->_memcache_accelerate($ids, "f", sub {
my($self, $id_hash, $out, $upd) = @_;
my @ids = keys %$id_hash;
my $qs = join(", ", map { "?" } 0..$#ids);
my $res = $self->{db}->{_dbh}->SQL(qq(SELECT id, function
FROM ${q}Feature$q
WHERE id IN ($qs)), undef, @ids);
for my $ent (@$res)
{
my($id, $fn) = @$ent;
$out->{$id} = $fn;
push(@$upd, ["f:$id", $fn, 12 * 60 * 60]) if $upd;
}
});
return $out;
}
sub _memcache_accelerate
{
my($self, $ids, $prefix, $lookup_code) = @_;
my $mc = $self->{memcache};
my $out = {};
$prefix .= ":";
my %ids = map { $_ => 1 } @$ids;
my $update;
if (defined($mc))
{
my $mcout = $mc->get_multi(map { $prefix . $_ } keys %ids);
# print STDERR "memcache get " . Dumper($mcout);
for my $fid (keys %$mcout)
{
my $k = $fid;
$fid =~ s/^$prefix//;
$out->{$fid} = $mcout->{$k};
delete $ids{$fid};
}
$update = [];
}
#
# Look up the remaining fids
#
if (%ids)
{
my @update;
# print STDERR "lookup " . Dumper(\%ids);
$lookup_code->($self, \%ids, $out, $update);
}
if ($update && @$update)
{
if ($mc->can('set_multi'))
{
$mc->set_multi(@$update);
}
else
{
$mc->set(@$_) foreach @$update;
}
}
return $out;
}
sub _memcache_accelerate_list
{
my($self, $ids, $prefix, $lookup_code) = @_;
my $mc = $self->{memcache};
my $out = {};
$prefix .= ":";
my %ids = map { $_ => 1 } @$ids;
my $update;
if (defined($mc))
{
my $mcout = $mc->get_multi(map { $prefix . $_ } keys %ids);
# print STDERR "memcache get " . Dumper($mcout);
for my $fid (keys %$mcout)
{
my $k = $fid;
$fid =~ s/^$prefix//;
$out->{$fid} = [split(/$;/, $mcout->{$k})];
delete $ids{$fid};
}
$update = {};
}
#
# Look up the remaining fids
#
if (%ids)
{
my @update;
# print STDERR "lookup " . Dumper(\%ids);
$lookup_code->($self, \%ids, $out, $update);
}
if ($update && %$update)
{
my $timeout = 12 * 60 * 60;
if ($mc->can('set_multi'))
{
my @update;
while (my($k, $vlist, $timeout) = each %$update)
{
push(@update, [$k, join($;, @$vlist), $timeout]);
}
$mc->set_multi(@update);
}
else
{
while (my($k, $vlist) = each %$update)
{
$mc->set($k, join($;, @$vlist), $timeout);
}
}
}
return $out;
}
=head3 occ_of_role
my $roleHash = $sapObject->occ_of_role({
-roles => [$role1, $role2, ...],
-functions => [$function3, $function4, ...],
-genomes => [$genome1, $genome2, ...],
});
Search for features in a specified genome with the indicated roles or
functions.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -roles (optional)
Reference to a list of the roles to search for.
=item -functions (optional)
Reference to a list of the functional assignments to search for.
=item -genomes (optional)
ID of the genomes whose genes are to be searched for the specified roles and
assignments.
=back
=item RETURN
Returns a reference to a hash that maps each specified role ID or functional
assignment to a list of the FIG IDs of genes that have that role or assignment.
$roleHash = { $role1 => [$fid1a, $fid1b, ...],
$role2 => [$fid2a, $fid2b, ...],
$function3 => [$fid3a, $fid3b, ...],
$function4 => [$fid4a, $fid4b, ...],
... };
=back
=cut
sub occ_of_role {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of roles.
my $roles = ServerThing::GetIdList(-roles => $args, 1);
# Get the list of functions.
my $functions = ServerThing::GetIdList(-functions => $args, 1);
# Insure we have something to look for.
Confess("No -roles or -functions specified for occ_of_role.") if ! (@$roles + @$functions);
# These hashes will be used to keep the results.
my %roleHash = map { $_ => [] } @$roles;
my %functionHash = map { $_ => {} } @$functions;
# Get the IDs of the relevant genome.
my $genomes = ServerThing::GetIdList(-genomes => $args, 1);
# For backward compatability, we support the old -genome parameter.
if (exists $args->{-genome}) {
push @$genomes, $args->{-genome};
}
# Create a hash and a flag to help us filter by genome.
my $genomeFilter = (@$genomes > 0);
my %genomeHash = map { $_ => 1 } @$genomes;
# We'll build the filter clause and parameters in here.
my ($filter, @parms) = ('IsFunctionalIn(from-link) = ?');
# If the number of genomes is small, add filtering by genome ID.
if ($genomeFilter && @$genomes < 10) {
# For each genome, accumulate a LIKE filter and a feature ID pattern.
my @filters;
for my $genome (@$genomes) {
push @filters, 'IsFunctionalIn(to-link) LIKE ?';
push @parms, "fig|$genome.%";
}
# Add the feature ID patterns to the filter string.
$filter .= " AND (" . join(" OR ", @filters) . ")";
}
# We now need a list of all the roles to find in the role index. Get a hash of
# the ones coming in directly.
my %searchRoles = map { $_ => 1 } @$roles;
# Add any roles inside the included functions.
for my $function (@$functions) {
for my $functionRole (roles_of_function($function)) {
$searchRoles{$functionRole} = 1;
}
}
# Now loop through all the roles in the search hash.
for my $role (keys %searchRoles) {
# Get this role's feature list.
my $fidList = $roleHash{$role};
# Get all of the features and their assignments for the specified role.
my @fidData = $sap->GetAll("IsFunctionalIn Feature", $filter, [$role, @parms],
[qw(Feature(id) Feature(function))]);
# Loop through the features.
for my $fidDatum (@fidData) {
# Get this feature's ID and assignment.
my ($id, $function) = @$fidDatum;
# Check to see if it's in one of our genomes.
my $genomeID = genome_of($id);
if (! $genomeFilter || $genomeHash{$genomeID}) {
# Here we want to keep this feature. If this role is in the
# role hash, add it to the role's list.
if (defined $fidList) {
push @$fidList, $id;
}
# If this feature's function is in the function hash, add it
# to the function's feature hash. We're using a hash instead of
# a list because we may see the function multiple times and need
# to prevent duplicates.
if (exists $functionHash{$function}) {
$functionHash{$function}{$id} = 1;
}
}
}
}
# Loop through the roles, putting them in the return hash.
for my $role (keys %roleHash) {
$retVal->{$role} = $roleHash{$role};
}
# Loop through the functions, putting them in the return hash. We flatten
# each hash of feature IDs to a list when we do this.
for my $function (keys %functionHash) {
$retVal->{$function} = [sort keys %{$functionHash{$function}}];
}
# Return the result.
return $retVal;
}
=head2 Chemistry Methods
=head3 all_complexes
my $complexList = $sapObject->all_complexes();
Return a list of all the complexes in the database.
=over 4
=item RETURN
Returns a reference to a list of complex IDs.
$complexList = [$cpx1, $cpx2, ...]
=back
=cut
sub all_complexes {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of complexes.
my @retVal = $sap->GetFlat("Complex", "", [], 'id');
# Return the result.
return \@retVal;
}
=head3 all_models
my $modelHash = $sapObject->all_models();
Return a hash of all the models in the database, mapping each one to the relevant
genome.
=over 4
=item RETURN
Returns a reference to a hash that maps each model ID to a genome ID.
$modelHash = { $model1 => $genome1, $model2 => $genome2, ... };
=back
=cut
sub all_models {
# Get the parameters.
my ($self) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Read the models and genomes.
my %retVal = map { $_->[0] => $_->[1] } $sap->GetAll('Models', "", [], "from-link to-link");
# Return the result hash.
return \%retVal;
}
=head3 all_reactions
my $reactions = $sapObject->all_reactions();
Return a list of all the reactions in the database.
=over 4
=item RETURN
Returns a reference to a list of all the reactions.
$reactions = [$rx1, $rx2, ...];
=back
=cut
sub all_reactions {
# Get the parameters.
my ($self) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of reactions.
my @retVal = $sap->GetFlat('Reaction', "", [], 'id');
# Return it.
return \@retVal;
}
=head3 all_roles_used_in_models
my $rolesList = $sapObject->all_roles_used_in_models();
Return a list of all the roles used in models.
=over 4
=item RETURN
Returns a reference to a list of role names. Each named role
triggers a complex used in at least one reaction belonging to
a model.
$rolesList = [$role1, $role2, ...]
=back
=cut
sub all_roles_used_in_models {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get all the roles for complexes. Every complex is in at
# least one model.
my %roles = map { $_ => 1 } $sap->GetFlat("IsTriggeredBy",
"", [], 'IsTriggeredBy(to-link)');
# Return the result.
return [keys %roles];
}
=head3 complex_data
my $complexHash = $sapObject->complex_data({
-ids => [$cpx1, $cpx2, ...],
-data => [$fieldA, $fieldB, ...]
});
Return the specified data items for each incoming reaction complex.
=over 4
=item parameter
Reference to hash with the following keys.
=over 8
=item -ids
Reference to a list of the IDs of reaction complexes of interest.
=item -data
Reference to a list of the names of the data items desired for each of the
specified complexes.
=over 12
=item name
Name of the complex (or C<undef> if the complex is nameless).
=item reactions
Reference to a list of the reactions in the complex.
=item roles
Reference to a list of 2-tuples for the roles in the complex, each
containing (0) the role name, and (1) a flag that is TRUE if the
role is optional to trigger the complex and FALSE if it is necessary.
=back
=back
=item RETURN
Returns a reference to a hash mapping each incoming complex to an n-tuple
containing the desired data fields in the order specified.
$complexHash = { $cpx1 => [$data1A, $data1B, ...],
$cpx2 => [$data2A, $data2B, ...]
... };
=back
=cut
sub complex_data {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Create the return hash.
my $retVal = {};
# Get the list of complex IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the list of data fields.
my $fields = ServerThing::GetIdList(-data => $args);
# Loop through the IDs.
for my $id (@$ids) {
# We'll put the data for this complex in here.
my @data;
# Loop through the selected data fields.
for my $field (@$fields) {
if ($field eq 'name') {
# Here we need the complex name, which is taken from the Complex
# entity.
my ($name) = $sap->GetFlat("Complex", 'Complex(id) = ?', [$id], 'name');
push @data, $name;
} elsif ($field eq 'reactions') {
# Here we need the list of reactions, which comes from the
# IsSetOf relationship.
my @reactions = $sap->GetFlat("IsSetOf", 'IsSetOf(from-link) = ?', [$id],
'to-link');
push @data, \@reactions;
} elsif ($field eq 'roles') {
# The roles are taken from the IsTriggeredBy relationship.
my @roles = $sap->GetAll("IsTriggeredBy", 'IsTriggeredBy(from-link) = ?', [$id],
[qw(to-link optional)]);
push @data, \@roles;
} else {
# Here we have an invalid field name.
Confess("Invalid field name $field specified in complex_data.");
}
}
# Store the retrieved data in the return hash.
$retVal->{$id} = \@data;
}
# Return the result.
return $retVal;
}
=head3 coupled_reactions
my $reactionHash = $sapObject->coupled_reactions({
-ids => [$rx1, $irx2, ...]
});
For each of a set of reactions, get the adjacent reactions in the metabolic network.
Two reactions are considered I<adjacent> if they share at least one compound that
is neither a cofactor or a ubiquitous compound (like water or oxygen). The compounds
that relate the adjacent reactions are called the I<connecting compounds>. In most cases,
each pair of adjacent reactions will have only one connecting compound, but this is
not guaranteed to be true.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of reaction IDs.
=back
=item RETURN
Returns a reference to a hash mapping each reaction ID to a sub-hash. Each sub-hash maps
adjacent reactions to the relevant connecting compounds.
$reactionHash = { $rx1 => { $rx1a => [$cpd1ax, $cpd1ay, ...],
$rx1b => [$cpd1bx, $cpd1by, ...],
...};
=back
=cut
sub coupled_reactions {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of reaction IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Declare the return variable.
my $retVal = {};
# Loop through the IDs.
for my $id (@$ids) {
# This hash will be used to track the connected reactions. When we're done,
# we'll store it in the return hash.
my %connections;
# Get the reactions connected to this one. For each reaction, we get the reaction ID and
# the connecting compound.
my @connects = $sap->GetAll('Involves Compound IsInvolvedIn',
"Involves(from-link) = ? AND Compound(ubiquitous) = 0 AND Involves(cofactor) = 0 AND IsInvolvedIn(cofactor) = 0",
[$id], "IsInvolvedIn(to-link) Involves(to-link)");
# Loop through the connections.
for my $connect (@connects) {
# Get the adjacent reaction ID and the connection compound's ID.
my ($reaction, $compound) = @$connect;
# Throw away reflexive connections.
if ($reaction ne $id) {
# We're not reflexive, so we want to add this reaction to the connection hash.
# Insure it isn't there already.
if (! exists $connections{$reaction}) {
# List was empty, so put in this compound as a singleton.
$connections{$reaction} = [$compound];
} else {
# The reaction is already in the hash. Pull out its list.
my $compoundList = $connections{$reaction};
# If the compound is not in there yet, add it.
if (! grep { $_ eq $compound } @$compoundList) {
push @$compoundList, $compound;
}
}
}
}
# Store this reaction's connection data in the return hash.
$retVal->{$id} = \%connections;
}
# Return the results.
return $retVal;
}
=head3 models_to_reactions
my $modelHash = $sapObject->models_to_reactions({
-ids => [$model1, $model2, ...]
});
Return the list of reactions in each specified model.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of model IDs, indicating the models of interest.
=back
=item RETURN
Returns a reference to a hash that maps each model ID to a list of the reactions in the model.
$modelHash = { $model1 => [$rx1a, $rx1b, ...],
$model2 => [$rx2a, $rx2b, ...],
... };
=back
=cut
sub models_to_reactions {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of model IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Declare the return hash.
my $retVal = {};
# Loop through the incoming model IDs.
for my $id (@$ids) {
# Get the list of reactions for this model.
my @reactions = $sap->GetFlat('Requires', "Requires(from-link) = ?",
[$id], 'to-link');
# Store them in the return hash.
$retVal->{$id} = \@reactions;
}
# Return the results.
return $retVal;
}
=head3 reaction_neighbors
my $reactionHash = $sapObject->reactionNeighbors({
-ids => [$rx1, $rx2, ...],
-depth => 1
});
Return a list of the reactions in the immediate neighborhood of the specified reactions.
A separate neighborhood list will be generated for each incoming reaction; the neighborhood
will consist of reactions connected to the incoming reaction and reactions connected to those
reactions up to the specified depth. (Two reactions are I<connected> if they have a compound
in common that is not a cofactor or a ubiquitous chemical like water or ATP).
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys:
=over 8
=item -ids
Reference to a list of IDs for the reactions of interest.
=item -depth (optional)
Number of levels to which the neighborhood search should take place. If the depth is I<n>, then
the neighborhood will consist of the original reaction and every other reaction for which there is
a sequence of I<n+1> or fewer reactions starting with the original and ending with the other
reaction. Thus, if I<n> is zero, the original reaction is returned as a singleton. If I<n> is 1,
then the neighborhood is the original reaction and every reaction connected to it. The default
is C<2>.
=back
=item RETURN
Returns a reference to a hash mapping each incoming reaction to a sub-hash. The sub-hash
maps each reaction in the neighborhood to its distance from the original reaction.
$reactionHash = { $rx1 => { $rx1a => $dist1a, $rx1b => $dist1b, ... },
$rx2 => { $rx2a => $dist2a, $rx2b => $dist2b, ... },
... };
=back
=cut
sub reaction_neighbors {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of reaction IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the reaction depth. We default to 3. (Note, however, that 0 is meaningful.)
my $depth = $args->{-depth};
if (! defined $depth) {
$depth = 2;
}
# Declare the return hash.
my $retVal = {};
# Loop through the incoming reaction IDs.
for my $id (@$ids) {
# This hash will contain the neighboring reactions found. We start with
# the original reaction.
my %neighbors = ($id => 0);
# This list will contain the new reactions found in the current layer. We
# prime the loop with the original reaction.
my @found = ($id);
for (my $i = 1; $i <= $depth; $i++) {
# Get a copy of the current layer's reactions.
my @oldFound = @found;
@found = ();
# Loop through the previous layer's reactions.
for my $rx (@oldFound) {
# Get the immediate neighbors of this reaction.
my @neighbors = $sap->GetFlat("Involves Compound IsInvolvedIn",
"Involves(from-link) = ? AND Compound(ubiquitous) = 0 AND Involves(cofactor) = 0 AND IsInvolvedIn(cofactor) = 0",
[$rx], 'IsInvolvedIn(to-link)');
# Save the new ones.
for my $neighbor (@neighbors) {
if (! exists $neighbors{$neighbor}) {
$neighbors{$neighbor} = $i;
push @found, $neighbor;
}
}
Trace(scalar(@neighbors) . " neighbors found for $rx.") if T(4);
}
Trace(scalar(@found) . " reactions at depth $i.") if T(3);
}
# Save the neighbors found for this reaction.
$retVal->{$id} = \%neighbors;
}
# Return the result hash.
return $retVal;
}
=head3 reaction_path
my $reactionList = $sapObject->reaction_path({
-roles => [$role1, $role2, ...],
-maxLength => 10
});
Find the shortest reaction path that represents as many of the specified roles
as possible. Note that since the a reaction may be associated with multiple
roles, it is possible for a single role to be represented more than once in
the path.
The search is artificially limited to paths under a maximum length that can
be specified in the parameters.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -roles
Reference to a list of the roles to be covered by the reaction path.
=item -maxLength (optional)
Maximum number of reactions to allow in the reaction path. The default is two more than
the number of roles.
=back
=item RETURN
Returns a reference to a list of the best reaction paths. Each reaction path is
represented by a list of lists, the sub-lists containing the reaction IDs followed
by the roles represented by the reaction. The paths returned will be the shortest ones
found with the minimal number of missing roles.
$reactionList = [
[[$rxn1a, $role1ax, $role1ay, ...], [$rxn1b, $role1bx, $role1by, ...], ...],
[[$rxn2a, $role2ax, $role2ay, ...], [$rxn2b, $role2bx, $role2by, ...], ...],
... ];
=back
=cut
sub reaction_path {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of roles to cover.
my $roles = ServerThing::GetIdList(-roles => $args);
my $roleCount = scalar(@$roles);
# Get the maximum length.
my $maxLength = $args->{-maxLength} || $roleCount + 2;
# To start, we need a hash that maps each reaction that relates to one of our roles
# to a list of the related roles.
my %rxnHash;
for my $role (@$roles) {
my @rxns = $sap->RoleReactions($role);
for my $rxn (@rxns) {
push @{$rxnHash{$rxn}}, $role;
}
}
# We need to be able to create reaction paths.
require ReactionPath;
# Our strategy will be to build a queue of reaction paths. At any given time we will
# track the lowest missing-roles number and the length of each path (all paths will
# have the same length at the end of each iteration). When the missing-roles number
# hits 0 or the length hits the maximum, we stop. This first list contains the queue
# of paths in progress.
my $pathList = [];
# This hash will map each reaction we've encountered to its neighboring reactions.
# If a reaction is not found in here, we get its information from the database.
my %rxnNeighbors;
# This will track the best (lowest) missing-roles number.
my $leastMissing = $roleCount;
# This will track the best paths found so far.
my @bestPaths;
# Now we prime the list with the reactions found so far.
for my $rxn (keys %rxnHash) {
# Get the roles covered by this reaction. There will always be at least one.
my $foundRoles = $rxnHash{$rxn};
# Compute the missing roles. The number of found roles is almost always one, so
# we don't bother doing fancy hash stuff.
my @missingRoles;
for my $role (@$roles) {
if (! grep { $_ eq $role } @$foundRoles) {
push @missingRoles, $role;
}
}
# Compute this path.
my $path = ReactionPath->new($rxn, $foundRoles, \@missingRoles);
# Compute the number of missing roles and merge them into the least-missing indicator.
my $missing = scalar @missingRoles;
if ($leastMissing > $missing) {
# Here we have a new least-missing limit.
@bestPaths = ($path);
$leastMissing = $missing;
} elsif ($leastMissing == $missing) {
# Here we have a comparable path to the ones we've saved.
push @bestPaths, $path;
}
# Add this path to the queue.
push @$pathList, $path;
}
# Now we're ready to start the main loop. The current length of all the paths is 1.
my $pathLength = 1;
# This will remember the length of the shortest best-quality paths.
my $bestPathLength = 1;
# Loop until we hit the maximum path length or have found a path with no missing roles.
while ($pathLength < $maxLength && $leastMissing > 0) {
# The queue for the next iteration will be built in here.
my @newList;
# Update the path length. This is the length for the paths we'll be putting
# in the new queue.
$pathLength++;
# Loop through the current path list.
for my $path (@$pathList) {
# Get the last reaction in this path.
my $rxn0 = $path->lastReaction();
# Look for the reactions in its neighborhood. We try to get these from the hash,
# but if they aren't there, we query the database.
my $neighbors = $rxnNeighbors{$rxn0};
if (! defined $neighbors) {
my @neighbors = $sap->GetFlat("Involves Compound IsInvolvedIn",
"Involves(from-link) = ? AND Compound(ubiquitous) = 0 AND Involves(cofactor) = 0 AND IsInvolvedIn(cofactor) = 0 AND IsInvolvedIn(to-link) <> ?",
[$rxn0, $rxn0], 'IsInvolvedIn(to-link)');
$rxnNeighbors{$rxn0} = \@neighbors;
$neighbors = \@neighbors;
}
# Compute the current number of missing roles for this path.
my $currentMissing = $path->missing();
# Loop through the neighbors, creating longer paths.
for my $rxn1 (@$neighbors) {
# Try to create a longer path.
my $newPath = $path->AddReaction($rxn1, $rxnHash{$rxn1});
if ($newPath) {
# Here the longer path was found. We'll set this to TRUE if we want to keep
# working with this path.
my $keep = 0;
# Compute the quality of this path (number of missing roles).
my $missing = $newPath->missing();
if ($leastMissing > $missing) {
# Here we have the new best path. Save the missing-role
# count and add the path to the queue.
$leastMissing = $missing;
# Re-establish the best-paths queue.
@bestPaths = ($newPath);
$bestPathLength = $pathLength;
# We want to keep this one.
$keep = 1;
} elsif ($leastMissing == $missing && $missing < $currentMissing &&
$bestPathLength == $pathLength) {
# Here the path is better than it used to be and is as good as
# and as short as the current best path. Denote it's one of the best.
push @bestPaths, $newPath;
# We want to keep it.
$keep = 1;
} elsif ($missing - $leastMissing <= $maxLength - $pathLength) {
# Here it's not one of the best paths, but there's a chance
# it can catch up if we expand to the full path length, so
# we keep the path, but it's not worth saving it to return to
# the caller.
$keep = 1;
}
# Put this path in the new list if we're keeping it.
if ($keep) {
push @newList, $newPath;
}
}
}
}
# Replace the old path list with the new one.
$pathList = \@newList;
}
# We're done. Return all of the paths that have the minimal number of missing roles.
my @retVal;
for my $path (@bestPaths) {
# We want to output this path. Convert the path to a list.
my @listedPath;
# Loop through the reactions in the path.
for my $rxn ($path->path()) {
# Get the roles for this reaction. If it has no roles associated with it,
# we use an empty list.
my $roles = $rxnHash{$rxn} || [];
# Put the reaction and its roles into the output list for this path.
push @listedPath, [$rxn, @$roles];
}
# Put the path information in the output.
push @retVal, \@listedPath;
}
# Return the results found.
return \@retVal;
}
=head3 reaction_strings
my $reactionHash = $sapObject->reaction_strings({
-ids => [$rx1, $rx2, ...],
-roles => 1,
-names => 1
});
Return the display string for each reaction. The display string contains the compound IDs
(as opposed to the atomic formulas) and the associated stoichiometries, with the
substrates on the left of the arrow and the products on the right.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of IDs for the reactions of interest.
=item -roles (optional)
If TRUE, then each reaction string will be associated with a list of the reaction's roles
in the result. The default is FALSE.
=item -names (optional)
If C<1>, then the compound name will be included with the ID in the output. If C<only>, the
compound name will be included instead of the ID. If C<0>, only the ID will be included. The
default is C<0>.
=back
=item RETURN
Returns a reference to a hash mapping each reaction ID to a displayable string describing the
reaction. If C<-roles> is TRUE, then instead of a string, the hash will map each reaction ID to
a list consisting of the string followed by the roles associated with the reaction.
=over 8
=item -roles FALSE
$reactionHash = { $rx1 => $string1, $rx2 => $string2, ... }
=item -roles TRUE
$reactionHash = { $rx1 => [$string1, $role1a, $role1b, ...],
$rx2 => [$string2, $role2a, $role2b, ...],
...
}
=back
=back
=cut
sub reaction_strings {
# Get the parameters.
my ($self, $args) = @_;
# Get hte Sapling database.
my $sap = $self->{db};
# Get the list of reaction IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Check the options.
my $roles = $args->{-roles} || 0;
my $names = $args->{-names};
# Declare the return variable.
my $retVal = {};
# Loop through the reactions.
for my $id (@$ids) {
# Get this reaction's equation.
my @components = $sap->GetAll("Involves Compound", "Involves(from-link) = ?",
[$id], "Involves(cofactor) Involves(stoichiometry) Involves(product) Compound(id) Compound(label)");
# We'll build the substrate and product lists in here.
my (@substrate, @product);
# Loop through the components.
for my $component (@components) {
# Get the information about this component.
my ($cofactor, $stoich, $product, $compound, $label) = @$component;
# Compute the compound label.
if ($names eq 'only') {
$compound = $label;
} elsif ($names) {
$compound .= ": $label";
}
# Surround it with parentheses or brackets, depending on whether or not it's a cofactor.
if ($cofactor) {
$compound = "[$compound]";
} else {
$compound = "($compound)";
}
# Add the stoichiometry.
if ($stoich != 1) {
$compound = $stoich . $compound;
}
# Push the result into the appropriate list.
if ($product) {
push @product, $compound;
} else {
push @substrate, $compound;
}
}
# Form the components into the result string.
my $reactionData = join(" + ", @substrate) . " => " . join(" + ", @product);
# Do we want roles?
if ($roles) {
# Get the roles and create a list out of them.
$reactionData = [$reactionData, $sap->ReactionRoles($id)];
}
# Store the reaction data in the result hash.
$retVal->{$id} = $reactionData;
}
# Return the result.
return $retVal;
}
=head3 reactions_to_complexes
my $reactionHash = $sapObject->reactions_to_complexes({
-ids => [$rxn1, $rxn2, ...]
});
Return the complexes containing each reaction. Note that most reactions
are in more than one complex, so the complexes for each reaction are returned
as a list.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of reaction IDs for the reactions of interest.
=back
=item RETURN
Returns a reference to a hash mapping each incoming reaction to a list of the
associated complexes.
$reactionHash = { $rxn1 => [$cpx1a, $cpx1b, ...],
$rxn2 => [$cpx2a, $cpx2b, ...],
...
};
=back
=cut
sub reactions_to_complexes {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the incoming ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Declare the return variable.
my $retVal = {};
# Loop through the reactions.
for my $id (@$ids) {
# Get the complexes for this reaction.
my @cpxes = $sap->GetFlat("IsElementOf",
'IsElementOf(from-link) = ?', [$id], 'to-link');
# Store them in the return hash.
$retVal->{$id} = \@cpxes;
}
# Return the result.
return $retVal;
}
=head3 reactions_to_roles
my $reactionHash = $sapObject->reactions_to_roles({
-ids => [$rx1, $rx2,...]
});
Return the roles associated with each reaction.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of reaction IDs for the reactions of interest.
=back
=item RETURN
Returns a reference to a hash mapping each incoming reaction to a list of the
associated roles.
$reactionHash = { $rx1 => [$role1a, $role1b, ...],
$rx2 => [$role2a, $role2b, ...],
...
};
=back
=cut
sub reactions_to_roles {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the incoming ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Declare the return variable.
my $retVal = {};
# Loop through the reactions.
for my $id (@$ids) {
# Get the roles for this reaction.
my @roles = $sap->ReactionRoles($id);
# Store them in the return hash.
$retVal->{$id} = \@roles;
}
# Return the result.
return $retVal;
}
=head3 role_neighbors
my $roleHash = $sapObject({
-ids => [$role1, $role2, ...]
});
For each role, return a list of roles in the immediate chemical neighborhood. A role is
in the immediate chemical neighborhood of another role if the two roles are associated with
reactions that share a compound that is not ubiquitous or a cofactor.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys:
=over 8
=item -ids
Reference to a list of role names.
=back
=item RETURN
Returns a reference to a hash that maps each incoming role name to a list of the names
of the neighboring roles.
$roleHash = { $role1 => [$role1a, $role1b, ...],
$role2 => [$role2a, $role2b, ...],
... };
=back
=cut
sub role_neighbors {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of incoming roles.
my $roles = ServerThing::GetIdList(-ids => $args);
# Declare the return hash.
my $retVal = {};
# Loop through the roles.
for my $role (@$roles) {
# Get this role's neighbors. We put them in a hash to eliminate duplicates.
my %others = map { $_ => 1 } $sap->GetFlat("Role Triggers IsSetOf Involves Compound IsInvolvedIn IsElementOf IsTriggeredBy Role2",
'Role(id) = ? AND Compound(ubiquitous) = 0 AND Involves(cofactor) = 0 AND IsInvolvedIn(cofactor) = 0',
[$role], 'Role2(id)');
# Remove the incoming role from the result list.
delete $others{$role};
# Store the roles found in the result hash.
$retVal->{$role} = [sort keys %others];
}
# Return the result.
return $retVal;
}
=head3 role_reactions
my $roleHash = $sapObject->role_reactions({
-ids => [$role1, $role2, ...],
-formulas => 1
});
Return a list of all the reactions associated with each incoming role.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of role IDs for the roles of interest.
=item -formulas (optional)
If TRUE, then each reaction will be associated with its formula. The default is
FALSE, in which case for each role a simple list of reactions is returned.
=back
=item RETURN
Returns a reference to a hash, keyed by role ID. If C<-formulas> is FALSE, then
each role will map to a list of reaction IDs. If C<-formulas> is TRUE, then
each role maps to a sub-hash keyed by reaction ID. The sub-hash maps each
reaction to a chemical formula string with compound IDs in place of the
chemical labels.
=over 8
=item -formulas FALSE
$roleHash = { $role1 => [$rxn1a, $rxn1b, ...],
$role2 => [$rxn2a, $rxn2b, ...},
... };
=item -formulas TRUE
$roleHash = { $role1 => { $rx1a => "$s1a1*$cpd1a1 + $s1a2*$cpd1a2 + ... => $s1ax*$cpd1ax + $s1ay*$cpd1ay + ...",
$rx1b => "$s1b1*$cpd1b1 + $s1b2*$cpd1b2 + ... => $s1bx*$cpd1bx + $s1by*$cpd1by + ...",
... },
$role2 => { $rx2a => "$s2a1*$cpd2a1 + $s2a2*$cpd2a2 + ... => $s2ax*$cpd2ax + $s2ay*$cpd2ay + ...",
$rx2b => "$s2b1*$cpd2b1 + $s2b2*$cpd2b2 + ... => $s2bx*$cpd2bx + $s2by*$cpd2by + ...",
... },
... };
=back
=back
=cut
sub role_reactions {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of roles.
my $ids = ServerThing::GetIdList(-ids => $args);
# Find out if we want formulas.
my $formulas = $args->{-formulas};
# Declare the return variable.
my $retVal = {};
# Loop through the roles.
for my $id (@$ids) {
# Get the reactions for this role.
my @reactions = $sap->RoleReactions($id);
# Are we looking for formulas, too?
if (! $formulas) {
# No. Store the reactions in the return hash.
$retVal->{$id} = \@reactions;
} else {
# Yes. Store the reactions and their strings.
$retVal->{$id} = $self->reaction_strings({ -ids => \@reactions });
}
}
# Return the result.
return $retVal;
}
=head3 roles_to_complexes
my $roleHash = $sapObject->roles_to_complexes({
-ids => [$role1, $role2, ...],
});
Return the complexes (sets of related reactions) associated with each role in
the incoming list. Roles trigger many complexes, and a complex may be triggered
by many roles. A given role is considered either I<optional> or I<necessary>
to the complex, and an indication of this will be included in the output.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys:
=over 8
=item -ids
Reference to a list of the IDs for the roles of interest.
=back
=item RETURN
Returns a reference to a hash mapping each incoming role ID to a list of 2-tuples,
each consisting of (0) a complex ID, and (1) a flag that is TRUE if the role is
optional and FALSE if the role is necessary for the complex to trigger.
$roleHash = { $role1 => [[$complex1a, $flag1a], [$complex1b, $flag1b], ...],
$role2 => [[$complex2a, $flag2a], [$complex2b, $flag2b], ...],
... };
=back
=cut
sub roles_to_complexes {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the role IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the role IDs, getting the list of complexes for each.
for my $id (@$ids) {
# Get the complexes for this role.
my @complexes = $sap->GetAll("Triggers", 'Triggers(from-link) = ?', [$id],
[qw(to-link optional)]);
# Store them in the return hash.
$retVal->{$id} = \@complexes;
}
# Return the result hash.
return $retVal;
}
=head2 DNA and Protein Sequence Methods
=head3 dlits_for_ids
my $idHash = $sapObject->dlits_for_ids({
-ids => [id1,id2,...],
-full => 1
});
Find the PUBMED literature references for a list of proteins. The
proteins can be specified either was FIG feature IDs or protein
sequence MD5s.
=over 4
=item parameter
The parameter should be a reference to a hash with the following
keys.
=over 8
=item -ids
Reference to a list of gene and protein IDs. For each gene,
literature references will be returned for the feature's protein.
For each protein, the literature references for the protein will
be returned. Genes should be specified using FIG feature IDs and
proteins using the MD5 of the protein sequence.
=item -full (optional)
If TRUE, then in addition to each literature article's PUBMED
ID, the article title and URL will be returned. (NOTE: these will
not always be available). The default is FALSE.
=back
=item RETURN
Returns a reference to a hash that maps each incoming ID to a list
of publications. The publications will normally be represented by
PUBMED IDs, but if C<-full> is TRUE, then each will be represented
by a 3-tuple consisting of (0) the PUBMED ID, (1) the article title,
and (2) the article URL.
=over 8
=item -full = FALSE
$idHash = { $id1 => [$pubmed1a, $pubmed1b, ...],
$id2 => [$pubmed2a, $pubmed2b, ...],
...
};
=item -full = TRUE
$idHash = { $id1 => [[$pubmed1a, $title1a, $url1a],
[$pubmed1b, $title1b, $url1b], ...],
$id2 => [[$pubmed2a, $title2a, $url2a],
[$pubmed2b, $title2b, $url2b], ...],
...
};
=back
=back
=cut
sub dlits_for_ids {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the full-results flag.
my $full = $args->{-full} || 0;
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through them.
for my $id (@$ids) {
# We'll put the object list and the filter in these variables.
my ($objects, $filter);
# Compute the object list and filter based on the type of ID.
if ($id =~ /^fig/) {
$objects = "Produces IsATopicOf";
$filter = "Produces(from-link) = ?";
} else {
$objects = "IsATopicOf";
$filter = "IsATopicOf(from-link) = ?";
}
# Get the PUBMED IDs for this protein.
my @pubmeds = $sap->GetFlat($objects, $filter, [$id], "IsATopicOf(to-link)");
# Did we find results and we're in full-results mode?
if (@pubmeds && $full) {
# Yes. We need to add title and link information for publications
# where we have them. Create a filter clause to select these PUBMEDs.
my $filter = "Publication(id) IN (" . join(", ", map { "?" } @pubmeds) .
")";
# Loop through for the publications, creating a hash of publication
# data.
my %pubData;
my $q = $sap->Get("Publication", $filter, \@pubmeds);
while (my $pub = $q->Fetch()) {
my $pubmed = $pub->PrimaryValue("id");
my $citation = $pub->PrimaryValue("citation");
$pubData{$pubmed} = [$pubmed, $citation->text, $citation->link];
}
# Convert the pubmeds in the list to 3-tuples.
my @newPubmeds;
for my $pubmed (@pubmeds) {
if ($pubData{$pubmed}) {
push @newPubmeds, $pubData{$pubmed};
} else {
push @newPubmeds, [$pubmed, "<unknown>", "http://www.ncbi.nlm.nih.gov/pubmed/$pubmed"];
}
}
# Save the modified list as our return value.
@pubmeds = @newPubmeds;
}
# Store the pubmeds found in the result hash.
$retVal->{$id} = \@pubmeds;
}
# Return the result.
return $retVal;
}
=head3 equiv_ids_for_sequences
my $labelHash = $sapObject->equiv_ids_for_sequences({
-seqs => [[$label1, $comment1, $sequence1],
[$label2, $comment2, $sequence2], ...]
});
Find all the identifiers in the database that produce the specified proteins.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -seqs
Reference to a list of protein specifications. A protein specification can be
a FASTA string, a 3-tuple consisting of (0) a label, (1) a comment,
and (2) a protein sequence, OR a 2-tuple consisting of (0) a label and (1)
a protein sequence. In other words, each specification can be a raw FASTA
string, a parsed FASTA string, or a simple [id, sequence] pair. In every case,
the protein sequence will be used to find identifiers and the label will be used
to identify the results.
=back
=item RETURN
Returns a hash mapping each incoming label to a list of identifiers from the
database that name the protein or a feature that produces the protein.
$labelHash = { $label1 => [$id1a, $id1b, ...],
$label2 => [$id2a, $id2b, ...],
... };
=back
=cut
sub equiv_ids_for_sequences {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the incoming sequence specifications.
my $seqs = ServerThing::GetIdList(-seqs => $args);
# There is the possibility the caller put in a single specifier, and we'll
# think that it is a list of FASTA strings. To detect this, we see if
# the first element is a string that doesn't begin with a greater-than
# sign.
my $first = $seqs->[0];
if (defined $first && ! ref $first && substr($first,0,1) ne '>') {
$seqs = [$seqs];
}
# Declare the return variable.
my $retVal = {};
# Loop through sequence specifiers.
for my $seq (@$seqs) {
# We need the label and the sequence for this specifier.
my ($label, $comment, $sequence) = parse_fasta_record($seq);
# Compute the ID for the protein in the sequence string.
my $protID = $sap->ProteinID($sequence);
# Find all its identifiers.
my @ids = $sap->IdsForProtein($protID);
# Store them in the return hash.
$retVal->{$label} = \@ids;
}
# Return the results.
return $retVal;
}
=head3 find_closest_genes
my $nameHash = $sapObject->find_closest_genes({
-genome => $genome1,
-seqs => { $name1 => $seq1,
$name2 => #seq2,
... },
-protein => 1
});
Find the closest genes to the specified sequences in the specified genome.
Each indicated sequence will be converted to a DNA sequence and then the contigs
of the specified genome will be searched for the sequence. The genes in closest
proximity to the sequence will be returned. The sequences are named; in the return
hash, the genes found will be associated with the appropriate sequence name.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -genome
ID of the genome to search.
=item -seqs
Reference to a hash mapping names to sequences. The names will be used to associate
the genes found with the incoming sequences. DNA sequences should not contain ambiguity
characters.
=item protein (optional)
If TRUE, the sequences will be interpreted as protein sequences. If FALSE, the
sequences will be interpreted as DNA sequences.
=back
=item RETURN
Returns a reference to a hash mapping each sequence name to a list of 3-tuples, each
consisting of (0) a gene ID, (1) the location of the gene, and (2) the location of the
matching sequence.
$nameHash = { $name1 => [[$fid1a, $loc1a, $match1a],
[$fid1b, $loc1b, $match1b], ...],
$name2 => [[$fid2a, $loc2a, $match2a],
[$fid2b, $loc2b, $match2b], ...],
... }
=back
=cut
sub find_closest_genes {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the genome ID.
my $genome = $args->{-genome};
Confess("No genome specified for \"find_closest_genes\".") if ! $genome;
# Determine whether or not we have a protein sequence.
my $protein = $args->{-protein} || 0;
# Get the hash of sequences.
my $seqHash = $args->{-seqs};
Confess("No sequences specified for \"find_closest_genes\".") if ! $seqHash;
Confess("Invalid sequence hash specified for \"find_closest_genes\".") if ref $seqHash ne 'HASH';
# Declare the return variable.
my $retVal = {};
# How we process this request depends entirely on the type of search.
if ($protein) {
# For a protein search, we start by reading in all the pegs.
my $pattern = "fig|$genome.peg.%";
my %pegSequences = map { $_->[0] => $_->[1] }
$sap->GetAll("Produces ProteinSequence",
'Produces(from-link) LIKE ?', [$pattern], [qw(from-link
ProteinSequence(sequence))]);
# Loop through the incoming sequences.
for my $name (keys %$seqHash) {
# Get the sequence. We convert it to uppercase to match what's in the database.
my $protein = uc $seqHash->{$name};
# We'll put the hits in here.
my @hits;
# Loop through the pegs.
for my $peg (keys %pegSequences) {
# Look for the protein.
my $pegSequence = $pegSequences{$peg};
my $loc = 0;
while (($loc = index($pegSequence, $protein, $loc)) >= 0) {
# Here we have a hit, so record it. Note that we bump
# the location, which both converts it to a position from
# an offset and insures we don't find the same subsequence
# again.
$loc++;
push @hits, [$peg, join(",", map { $_->String() } $sap->GetLocations($peg)),
$peg . "_$loc+" . length($protein)];
}
}
# If we found anything, store it in the return hash.
if (@hits) {
$retVal->{$name} = \@hits;
}
}
} else {
# For a DNA search, we start by reading in all the contigs.
my @contigs = $sap->GetFlat("IsMadeUpOf", "IsMadeUpOf(from-link) = ?",
[$genome], 'to-link');
my $contigSequences = $self->contig_sequences({ -ids => \@contigs });
# Loop through the incoming sequences.
for my $name (keys %$seqHash) {
# Get the sequence. We convert it to lowercase to match what's in the database
# and fold Us to Ts.
my $dna = lc $seqHash->{$name};
$dna =~ tr/u/t/;
# We'll put the hits in here.
my @hits;
# We need to search twice. Set up a hash to drive it.
my %dirs = ('+' => $dna, '-' => rev_comp($dna));
# Save its length.
my $dnaLen = length($dna);
# Loop through the contigs.
for my $contig (keys %$contigSequences) {
# Loop through the directions.
for my $dir (keys %dirs) {
my $dnaSequence = $dirs{$dir};
Trace("Searching $contig in direction $dir for $name.") if T(3);
# Look for a hit in the contig.
my $loc = 0;
while (($loc = index($contigSequences->{$contig}, $dnaSequence, $loc)) >= 0) {
# Here we found one. Compute its location.
my $hitLocation = $contig . "_";
if ($dir eq '+') {
$hitLocation .= ($loc + 1) . "+";
} else {
$hitLocation .= ($loc + $dnaLen) . "-";
}
$hitLocation .= $dnaLen;
Trace("Hit found at $hitLocation.") if T(3);
# Find genes that overlap the hit region.
my @hitPegs = $sap->GenesInRegion($hitLocation);
# Loop through them, producing output.
for my $hitPeg (@hitPegs) {
# Get the peg's location.
my $hitPegLocation = join(",", map { $_->String() } $sap->GetLocations($hitPeg));
Trace("Hit peg $hitPeg is at $hitPegLocation.") if T(3);
# Only proceed if it goes in the correct direction.
if (index($hitPegLocation, $dir) >= 0) {
push @hits, [$hitPeg, $hitPegLocation, $hitLocation];
}
}
# Bump the location point to insure we don't find the same
# sequence again.
$loc++;
}
}
}
# If any hits were found, store them in the return hash.
if (@hits) {
$retVal->{$name} = \@hits;
}
}
}
# Return the result hash.
return $retVal;
}
=head3 ids_to_sequences
my $idHash = $sapObject->ids_to_sequences({
-ids => [$id1, $id2, ...],
-protein => 1,
-fasta => 1,
-source => 'LocusTag',
-genome => $genome,
-comments => { $id1 => $comment1,
$id2 => $comment2,
... }
});
Compute a DNA or protein string for each incoming feature ID.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of feature IDs.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return results for all genomes.
=item -protein (optional)
If TRUE, the output FASTA sequences will be protein sequences; otherwise, they
will be DNA sequences. The default is FALSE.
=item -fasta (optional)
If TRUE, the output sequences will be multi-line FASTA strings instead of sequences.
The default is FALSE, meaning the output sequences will be ordinary strings.
=item -comments (optional)
Allows the user to add a label or description to each FASTA formatted sequence.
The values is a reference to a hash whose keys are the ids, and the values are
the desired labels. This parameter is only used when the C<-fasta> option is
specified.
=back
=item RETURN
Returns a hash mapping the incoming IDs to sequence strings. IDs that
are not found in the database will not appear in the hash.
$idHash = { $id1 => $sequence1, $id2 => $sequence2, ... };
=back
=cut
sub ids_to_sequences {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the ID source type.
my $source = $args->{-source} || 'SEED';
# Compute the ID conversion query.
my ($idObjects, $idFilter, @idParms) = $sap->ComputeFeatureFilter($source,
$args->{-genome});
# Get the comment hash.
my $comments = $args->{-comments} || {};
# Get the strip flag. The default is stripped
my $stripped = ($args->{-fasta} ? 0 : 1);
# Get the sequence type.
my $protein = $args->{-protein} || 0;
# Extract the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the feature IDs.
for my $id (@$ids) {
# Get the FIG IDs for the specified incoming ID.
my @fids;
if ($source eq 'SEED') {
push @fids, $id;
} else {
push @fids, $sap->GetFlat($idObjects, $idFilter, [@idParms, $id],
'Feature(id)');
}
# We'll put the sequence data we find in here.
my @sequences;
# Did we find any features?
if (! @fids) {
# No. Are we looking for proteins?
if ($protein) {
# Yes. Check to see if this is a protein identifier. Note that
# in this case we're guaranteed the identifier is not a FIG ID,
# so we can use part of the information we get from
# ComputeFeatureFilter without worry.
@sequences = $sap->GetFlat("Identifier Names ProteinSequence",
$idFilter, [@idParms, $id],
'ProteinSequence(sequence)');
}
}
# Loop through the feature IDs.
for my $fid (@fids) {
# If the original ID and the FIG ID are different, add the
# FIG ID to the comments.
my $comment = $comments->{$id} || '';
if ($id ne $fid) {
$comment = join(" ", "[$fid]", $comment);
}
# Are we looking for DNA or protein data?
if ($protein) {
# Look for this feature's protein sequence. There should only be
# one, so we only keep the first.
my ($sequence) = $sap->GetFlat("Produces ProteinSequence",
'Produces(from-link) = ?', [$fid],
'ProteinSequence(sequence)');
# Only proceed if we found something.
if (defined $sequence) {
Trace("Creating FASTA for feature $fid.") if T(3);
# Create a sequence string.
push @sequences, create_fasta_record($id, $comment, $sequence,
$stripped);
}
} else {
# We're looking for DNA, so get this feature's locations.
my @locs = $sap->GetLocations($fid);
# Only proceed if at least one location was found.
if (scalar @locs) {
# Loop through the locations, getting DNA.
my $dna = join("", map { $sap->ComputeDNA($_) } @locs);
# Form everything into a FASTA string and store it as the result.
push @sequences, create_fasta_record($id, $comment, $dna, $stripped);
}
}
}
# Store the sequences in the output hash. Most of the time there's only
# one sequence. If there's multiple, we return the first one.
if ($stripped) {
$retVal->{$id} = $sequences[0];
} else {
$retVal->{$id} = $sequences[0];
}
}
# Return the result.
return $retVal;
}
=head3 locs_to_dna
my $locHash = $sapObject->locs_to_dna({
-locations => {
$label1 => $loc1,
$label2 => $loc2,
... },
-fasta => 1
});
Return the DNA sequences for the specified locations.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -locations
Reference to a hash that maps IDs to locations. A location can be in the
form of a L</Location String>, a reference to a list of location strings,
a FIG feature ID, or a contig ID.
=item -fasta (optional)
If TRUE, the DNA sequences will be returned in FASTA format instead of
raw format. The default is FALSE.
=back
=item RETURN
Returns a reference to a hash that maps the incoming IDs to FASTA sequences for
the specified DNA locations. The FASTA ID will be the ID specified in the incoming
hash.
$locHash = { $label1 => $sequence1,
$label2 => $sequence2,
... };
=back
=cut
sub locs_to_dna {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database object.
my $sap = $self->{db};
# Get the ID/location map.
my $locs = $args->{-locations};
Confess("No locations specified.") if ! defined $locs;
# Determine the output format.
my $stripped = ($args->{-fasta} ? 0 : 1);
# Loop through the incoming hash.
for my $id (keys %$locs) {
# Get the location string.
my $locData = $locs->{$id};
# We'll put our DNA in here.
my $dna = "";
# Determine what we have.
if ($locData =~ /^fig\|/) {
# Here we have a FIG ID.
$dna = join("", map { $sap->ComputeDNA($_) } $sap->GetLocations($locData));
} elsif (ref $locData eq 'ARRAY' || $locData =~ /^\S+_\d+[+\-_]\d+$/) {
# Here we have a location string. We take steps to insure it is in the form
# of a list reference.
if (ref $locData ne 'ARRAY') {
$locData = [$locData];
}
# Loop through the locations, accumulating DNA.
for my $locString (@$locData) {
$dna .= $sap->ComputeDNA(BasicLocation->new($locString));
}
} else {
# Here we have a contig ID. Get the contig's DNA.
my @sections = $sap->GetFlat("Contig HasSection DNASequence",
'Contig(id) = ? ORDER BY DNASequence(id)', [$locData],
'DNASequence(sequence)');
$dna = join("", @sections);
}
# Output the DNA in FASTA form.
$retVal->{$id} = create_fasta_record($id, undef, $dna, $stripped);
}
# Return the result.
return $retVal;
}
=head3 roles_to_proteins
my $roleHash = $sapObject->roles_to_proteins({
-roles => [$role1, $role2, ...]
});
Return a list of the proteins associated with each of the incoming functional
roles.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -roles
Reference to a list of functional roles.
=back
=item RETURN
Returns a reference to a hash mapping each incoming role to a list of the
proteins generated by features that implement the role. The proteins will
be represented by MD5 protein IDs.
$roleHash = { $role1 => [$prot1a, $prot1b, ...],
$role2 => [$prot2a, $prot2b, ...],
... };
=back
=cut
sub roles_to_proteins {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of roles.
my $roles = ServerThing::GetIdList(-roles => $args);
# Declare the return hash.
my $retVal = {};
# Loop through the roles.
for my $role (@$roles) {
# Get the proteins for this role. We use a hash to filter out duplicates.
my %dups;
my @results = grep { ! $dups{$_}++ }
$sap->GetFlat("IsFunctionalIn Produces",
'IsFunctionalIn(from-link) = ?', [$role],
'Produces(to-link)');
# Store them in the return hash.
$retVal->{$role} = \@results;
}
# Return the result hash.
return $retVal;
}
=head3 upstream
my $featureHash = $sapObject->upstream({
-ids => [$fid1, $fid2, ...],
-size => 200,
-skipGene => 1,
-fasta => 1,
-comments => { $fid1 => $comment1,
$fid2 => $comment2, ...}
});
Return the DNA sequences for the upstream regions of the specified
features. The nucleotides inside coding regions are displayed in upper
case; others are displayed in lower case.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs of interest.
=item -size (optional)
Number of upstream nucleotides to include in the output. The default is
C<200>.
=item -skipGene (optional)
If TRUE, only the upstream region is included. Otherwise, the content
of the feature is included in the output.
=item -fasta (optional)
If TRUE, the output sequences will be multi-line FASTA strings instead of sequences.
The default is FALSE, meaning the output sequences will be ordinary strings.
=item -comments (optional)
Allows the user to add a label or description to each FASTA formatted sequence.
The values is a reference to a hash whose keys are the ids, and the values are
the desired labels. This parameter is only used when the C<-fasta> option is
specified.
=back
=item RETURN
Returns a hash mapping each incoming feature ID to the DNA sequence of
its upstream region.
$featureHash = { $fid1 => $sequence1, $fid2 => $sequence2, ... };
=back
=cut
sub upstream {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Compute the options.
my $skipGene = $args->{-skipGene} || 0;
my $size = $args->{-size} || 200;
my $comments = $args->{-comments} || {};
my $stripped = ($args->{-fasta} ? 0 : 1);
# Loop through the IDs.
for my $fid (@$ids) {
# Get the first location for this ID.
my ($loc) = $sap->GetLocations($fid);
### DEBUG
if (! defined $loc) {
print "Location undefined for $fid.\n";
}
### END DEBUG
if (defined $loc) {
# We found a location. Get its length.
my $locLen = $loc->Length();
Trace("Location is " . $loc->String . " length $locLen.") if T(3);
# Get the length of its contig.
my $contigID = $loc->Contig;
my $contigLen = $sap->ContigLength($contigID);
# Extend the location over the specified upstream region.
if ($skipGene) {
# In skip-gene mode, we get a pure upstream location.
$loc = $loc->Upstream($size, $contigLen);
} else {
# Otherwise, we simply extend upstream.
$loc->ExtendUpstream($size, $contigLen);
}
Trace("Upstream location is " . $loc->String) if T(3);
# Get the DNA for this location. It is already in lower case.
my $dna = $sap->ComputeDNA($loc);
Trace("DNA prefix is " . substr($dna, 0, 100) . ".") if T(3);
# Get the direction of this location.
my $locDir = $loc->Dir;
# Find the other genes in this region.
my @pegs = $sap->GenesInRegion($loc);
Trace("Overlapping pegs are: " . join(", ", @pegs)) if T(3);
# Get the gene locations that go in the same direction on the
# same contig.
my @locs;
for my $peg (@pegs) {
push @locs, grep { $_->Dir eq $locDir &&
$_->Contig eq $contigID } $sap->GetLocations($peg);
}
# Loop through the locations found.
for my $overLoc (@locs) {
my ($start, $len) = $loc->OverlapRegion($overLoc);
Trace("Ovelap with " . $overLoc->String . " is at $start for length $len.") if T(3);
if ($len) {
$dna = substr($dna, 0, $start) . uc(substr($dna, $start, $len)) .
substr($dna, $start + $len);
}
}
# Store the result.
$retVal->{$fid} = create_fasta_record($fid, $comments->{$fid}, $dna, $stripped);
}
}
# Return the result.
return $retVal;
}
=head2 Expression Data Methods
=head3 all_experiments
my $expList = $sapObject->all_experiments();
Return a list of all the experiment names.
=over 4
=item RETURN
Returns a reference to a list of experiment names.
$expList = [$exp1, $exp2, ...];
=back
=cut
sub all_experiments {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of experiment names.
my @retVal = $sap->GetFlat('Experiment', '', [], 'id');
# Return it.
return \@retVal;
}
=head3 atomic_regulon_vectors
my $regulonHash = $sapObject->atomic_regulon_vectors({
-ids => [$ar1, $ar2, ...],
-raw => 0
});
Return a map of the expression levels for each specified atomic regulon. The
expression levels will be returned in the form of vectors with values C<-1>
(suppressed), C<1> (expressed), or C<0> (unknown) in each position. The positions
will correspond to the experiments in the order returned by L</genome_experiments>.
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -ids
Reference to a list of atomic regulon IDs.
=item -raw (optional)
If TRUE, then the vectors will be returned in the form of strings. Each string will
have the character C<+>, C<->, or space for the values 1, -1, and 0 respectively.
=back
=item RETURN
Returns a reference to a hash mapping the incoming atomic regulon IDs to the desired
vectors. The vectors will normally be references to lists of values pf 1, 0, and -1,
but they can also be represented as strings.
=over 8
=item Normal Output
$regulonHash = { $ar1 => [$level1a, $level2a, ...],
$ar2 => [$level2a, $level2b, ...],
... };
=item Output if -raw is TRUE
$regulonHash = { $ar1 => $string1, $ar2 => $string2, ... };
=back
=back
=cut
sub atomic_regulon_vectors {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Decide if we're in raw mode or not.
my $rawFlag = $args->{-raw} || 0;
# Get the list of regulons to process.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the regulons.
for my $id (@$ids) {
# Get the level vectors for this regulon.
my $qh = $sap->Get("WasGeneratedFrom", 'WasGeneratedFrom(from-link) = ? ORDER BY WasGeneratedFrom(to-link)',
[$id]);
# Format them into the desired single vector.
my $levels = ServerThing::ReadCountVector($qh, 'level-vector', $rawFlag);
# Connect the vector to this regulon.
$retVal->{$id} = $levels;
}
# Return the result.
return $retVal;
}
=head3 atomic_regulons
my $regulonHash = $sapObject->atomic_regulons({
-id => $genome1
});
Return a map of the atomic regulons for the specified genome. Each atomic
regulon is a set of genes that are always regulated together. The map will
connect each regulon ID to a list of those genes. A given gene can only be
in one atomic regulon.
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -id
The ID of the genome of interest.
=back
=item RETURN
Returns a reference to a hash that maps each atomic regulon ID to a list of
the FIG IDs of its constituent genes.
$regulonHash = { $regulon1 => [$fid1a, $fid1b, ...],
$regulon2 => [$fid2a, $fid2b, ...],
... };
=back
=cut
sub atomic_regulons {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the genome ID.
my $genomeID = $args->{-id};
Confess("No genome ID specified for atomic_regulons.") if ! $genomeID;
# Get the atomic regulon data for this genome.
my $qh = $sap->Get("IsConfiguredBy IsFormedOf",
'IsConfiguredBy(from-link) = ?', [$genomeID]);
# Loop through the result rows.
while (my $resultRow = $qh->Fetch()) {
# Get this regulon's ID and the ID of the feature in it.
my $regulonID = $resultRow->PrimaryValue('IsFormedOf(from-link)');
my $featureID = $resultRow->PrimaryValue('IsFormedOf(to-link)');
# Put the feature into the regulon's feature list.
push @{$retVal->{$regulonID}}, $featureID;
}
# Return the results.
return $retVal;
}
=head3 coregulated_correspondence
my $fidHash = $sapObject->coregulated_correspondence({
-ids => [$fid1, $fid2, ...],
-pcLevel => 0.8,
-genomes => [$genome1, $genome2, ...]
});
Given a gene, return genes that may be coregulated because they correspond to
coregulated genes in genomes for which we have expression data (an
I<expression-analyzed genome>). For each incoming gene, a corresponding
gene will be found in each expression-analyzed genome. The coregulated
genes for the corresponding gene will be determined, and then these will be
mapped back to the original genome. The resulting genes can be considered
likely candidates for coregulation in the original genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=item -pcLevel (optional)
Minimum pearson coefficient level for a gene to be considered coregulated.
The default is C<0.5>.
=item -genomes (optional)
Reference to a list of genome IDs. If specified, only expression data from the
listed genomes will be used in the analysis; otherwise, all genomes with
expression data will be used.
=back
=item RETURN
Returns a reference to a hash that maps each incoming gene to a list of 4-tuples,
each 4-tuple consisting of (0) a hypothetical coregulated gene in this genome,
(1) a gene in an expression-analyzed genome corresponding to the input gene,
(2) a gene in the expression-analyzed genome coregulated with it (and that
corresponds to the hypothetical coregulated gene), and (3) the correlation
score.
$fidHash = { $fid1 => [[$fid1a, $fid1ax, $fid1ay, $score1a],
[$fid1b, $fid1bx, $fid1by, $score1b],
...],
$fid2 => [[$fid2a, $fid2ax, $fid2ay, $score2a],
[$fid2b, $fid2bx, $fid2by, $score2b],
...],
... };
=back
=cut
# Maximum number of maps to keep in memory.
use constant MAX_MAPPINGS => 500;
sub coregulated_correspondence {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the pearson coefficient level.
my $pcLevel = $args->{-pcLevel} || 0.5;
# Get the list of expression-data genomes.
my $genomes;
if (exists $args->{-genomes}) {
$genomes = ServerThing::GetIdList(-genomes => $args);
} else {
$genomes = $self->expressed_genomes();
}
# Get the list of genes to process.
my $ids = ServerThing::GetIdList(-ids => $args);
# Declare the return variable.
my $retVal = {};
# Create a correspondence cache.
require CorrespondenceCache;
my $corrCache = CorrespondenceCache->new();
# Loop through the incoming genes.
for my $fid (@$ids) {
# Compute the genome for this gene.
my $idGenome = genome_of($fid);
# The correspondence data for this gene will be put in here.
my @coregs;
# Loop through the expression-analyzed genomes.
for my $xGenome (@$genomes) {
# Get the corresponding gene in this expression-analyzed genome.
my $xFid = $corrCache->get_correspondent($fid, $xGenome);
# Only proceed if one was found.
if (defined $xFid) {
# Loop through the coregulated genes with sufficiently high pearson coefficients.
my $qh = $sap->Get("IsCoregulatedWith",
'IsCoregulatedWith(from-link) = ? AND IsCoregulatedWith(coefficient) >= ?',
[$xFid, $pcLevel]);
while (my $resultRow = $qh->Fetch()) {
# Get the coregulated gene in the expression-analyzed genome.
my $xFid2 = $resultRow->PrimaryValue('to-link');
# Get the pearson coefficient for the coregulation.
my $coefficient = $resultRow->PrimaryValue('coefficient');
# Look for a corresponding gene in the original genome.
my $fid2 = $corrCache->get_correspondent($xFid2, $idGenome);
# If we found one, put it in the output.
if (defined $fid2) {
push @coregs, [$fid2, $xFid, $xFid2, $coefficient];
}
}
}
}
# Store the hypothetically-coregulated genes found in the return hash.
$retVal->{$fid} = \@coregs;
}
# Return the result.
return $retVal;
}
=head3 coregulated_fids
my $fidHash = $sapObject->coregulated_fids({
-ids => [$fid1, $fid2, ...]
});
Given a gene, return the coregulated genes and their pearson coefficients.
Two genes are considered coregulated if there is some experimental evidence
that their expression levels are related: the pearson coefficient indicates
the strength of the relationship.
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=back
=item RETURN
Returns a reference to a hash that maps each incoming FIG ID to a sub-hash.
The sub-hash in turn maps each related feature's FIG ID to its pearson
coefficient with the incoming FIG ID.
$fidHash = { $fid1 => { $fid1a => $coeff1a, $fid1b => $coeff1b, ...},
$fid2 => { $fid2a => $coeff2a, $fid2b => $coeff2b, ...},
... };
=back
=cut
sub coregulated_fids {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the FID IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the FIG IDs.
for my $id (@$ids) {
# Get the coefficient data for this FIG ID.
my %coeffs = map { $_->[0] => $_->[1] }
$sap->GetAll("IsCoregulatedWith",
'IsCoregulatedWith(from-link) = ?', [$id],
[qw(to-link coefficient)]);
# Put it in the result hash.
$retVal->{$id} = \%coeffs;
}
# Return the results.
return $retVal;
}
=head3 experiment_fid_levels
my $expHash = $sapObject->experiment_fid_levels({
-ids => [$exp1, $exp2, ...]
});
Given an experiment, return the on/off levels for all genes in that
experiment. An on/off level is either C<1> (expressed), C<-1> (inhibited),
or C<0> (unknown).
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -ids
Reference to a list of experiment IDs.
=back
=item RETURN
Returns a reference to a hash that maps each experiment ID to a sub-hash
that indicates the expression level of each gene for which the experiment
showed a result.
$expHash = { $exp1 => { $fid1a => $level1a, $fid1b => $level1b, ... },
$exp2 => { $fid2a => $level2a, $fid2b => $level2b, ... },
... };
=back
=cut
sub experiment_fid_levels {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the experiment IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the experiments.
for my $id (@$ids) {
# Create a level hash for this experiment.
my %levels = map { $_->[0] => $_->[1] }
$sap->GetAll("IndicatesSignalFor",
'IndicatesSignalFor(from-link) = ?', [$id],
[qw(to-link level)]);
# Store it in the return hash.
$retVal->{$id} = \%levels;
}
# Return the results.
return $retVal;
}
=head3 experiment_regulon_levels
my $expHash = $sapObject->experiment_regulon_levels({
-ids => [$exp1, $exp2, ...]
});
Given an experiment, return the on/off levels for all atomic regulons
affected by that experiment. An on/off level is either C<1> (expressed), C<-1>
(inhibited), or C<0> (unknown).
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -ids
Reference to a list of experiment IDs.
=back
=item RETURN
Returns a reference to a hash that maps each experiment ID to a sub-hash
that indicates the expression level of each atomic regulon for which the
experiment showed a result.
$expHash = { $exp1 => { $regulon1a => $level1a, $regulon1b => $level1b, ... },
$exp2 => { $regulon2a => $level2a, $regulon2b => $level2b, ... },
... };
=back
=cut
sub experiment_regulon_levels {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the experiment IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the experiments.
for my $id (@$ids) {
# Create a level hash for this experiment.
my %levels = map { $_->[0] => $_->[1] }
$sap->GetAll("AffectsLevelOf",
'AffectsLevelOf(from-link) = ?', [$id],
[qw(to-link level)]);
# Store it in the return hash.
$retVal->{$id} = \%levels;
}
# Return the results.
return $retVal;
}
=head3 expressed_genomes
my $genomeList = $sapObject->expressed_genomes((
-names => 1
});
List the IDs of genomes for which expression data exists in the database.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -names (optional)
If TRUE, then the return will be a reference to a hash mapping the genome IDs to
genome names; if FALSE, the return will be a reference to a list of genome IDs.
The default is FALSE.
=back
=item RETURN
Returns a reference to a list of genome IDs or a hash mapping genome IDs to genome names.
=over 8
=item -names FALSE
$genomeList = [$genome1, $genome2, ...];
=item -names TRUE
$genomeList = { $genome1 => $name1, $genome2 => $name2, ... };
=back
=back
=cut
sub expressed_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get a list of all the genomes with atomic regulon data.
my %genomes = map { $_->[0] => $_->[1] } $sap->GetAll('ProducedResultsFor Genome', '', [],
'to-link Genome(scientific-name)');
# The return value will be put in here.
my $retVal;
# Does the user want genome names?
if ($args->{-names}) {
$retVal = \%genomes;
} else {
$retVal = [ sort keys %genomes ];
}
return $retVal;
}
=head3 fid_experiments
my $fidHash = $sapObject->fid_experiments({
-ids => [$fid1, $fid2, ...],
-experiments => [$exp1, $exp2, ...]
});
Return the expression levels for the specified features in all experiments for which they
have results.
=over 4
=item parameter
The parameter should be a reference to a hash with the following key.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=item -experiments (optional)
A list of experiments. If specified, only levels from the indicated experiments will be
returned.
=back
=item RETURN
Returns a reference to a hash mapping each incoming feature ID to a list of 3-tuples,
each 3-tuple containing (0) an experiment ID, (1) the expression on/off indication (1/0/-1),
and (2) the normalized rma-value.
$fidHash = { $fid1 => [[$exp1a, $level1a, $rma1a],
[$exp1b, $level1b, $rma1b], ...],
$fid2 => [[$exp2a, $level2a, $rma2a],
[$exp2b, $level2b, $rma2b], ...],
... };
=back
=cut
sub fid_experiments {
# Get the paramters.
my ($self, $args) = @_;
# Get access to the Sapling database.
my $sap = $self->{db};
# Create the return hash.
my $retVal = {};
# Get the feature IDs.
my $fids = ServerThing::GetIdList(-ids => $args);
# Get the experiment list. This is optional, so it will return an empty list
# if the parameter is unspecified.
my $exps = ServerThing::GetIdList(-experiments => $args, 1);
# Set up the experiment filter.
my $unFiltered = (@$exps == 0);
my %expFilter = map { $_ => 1 } @$exps;
# Loop through the IDs.
for my $fid (@$fids) {
# Get the experiment list for this feature.
my @rows = $sap->GetAll("HasIndicatedSignalFrom",
'HasIndicatedSignalFrom(from-link) = ?', [$fid],
[qw(to-link level rma-value)]);
# Are we filtered?
if ($unFiltered) {
# Yes. Store it in the result hash.
$retVal->{$fid} = \@rows;
} else {
# Otherwise, store a filtered list in the result hash. The experiment is in the
# first position of the result tuple, and this is what we check against the
# filter hash.
$retVal->{$fid} = [ grep { $expFilter{$_->[0]} } @rows];
}
}
# Return the result.
return $retVal;
}
=head3 fid_vectors
my $regulonHash = $sapObject->fid_vectors({
-ids => [$fid1, $fid2, ...],
-raw => 0
});
Return a map of the expression levels for each specified feature (gene). The
expression levels will be returned in the form of vectors with values C<-1>
(suppressed), C<1> (expressed), or C<0> (unknown) in each position. The positions
will correspond to the experiments in the order returned by L</genome_experiments>.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=item -raw (optional)
If TRUE, then the vectors will be returned in the form of strings. Each string will
have the character C<+>, C<->, or space for the values 1, -1, and 0 respectively.
=back
=item RETURN
Returns a reference to a hash mapping the incoming atomic regulon IDs to the desired
vectors. The vectors will normally be references to lists of values pf 1, 0, and -1,
but they can also be represented as strings.
=over 8
=item Normal Output
$regulonHash = { $fid1 => [$level1a, $level2a, ...],
$fid2 => [$level2a, $level2b, ...],
... };
=item Output if -raw is TRUE
$regulonHash = { $fid1 => $string1, $fid2 => $string2, ... };
=back
=back
=cut
sub fid_vectors {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Decide if we're in raw mode or not.
my $rawFlag = $args->{-raw} || 0;
# Get the list of features to process.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the features.
for my $id (@$ids) {
# Get the level vectors for this regulon.
my $qh = $sap->Get("HasLevelsFrom", 'HasLevelsFrom(from-link) = ? ORDER BY HasLevelsFrom(to-link)',
[$id]);
# Format them into the desired single vector.
my $levels = ServerThing::ReadCountVector($qh, 'level-vector', $rawFlag);
# Connect the vector to this regulon.
$retVal->{$id} = $levels;
}
# Return the result.
return $retVal;
}
=head3 fids_expressed_in_range
my $genomeHash = $sapObject->fids_expressed_in_range({
-ids => [$genome1, $genome2, ...],
-minLevel => $min,
-maxLevel => $max
});
Return for each genome the genes that are expressed in a given fraction of the experiments
for that ganome.
=over 4
=item parameter
The parameter should be a reference to a hash containing the following keys.
=over 8
=item -ids
Reference to a list of IDs for the genomes of interest.
=item -minLevel (optional)
Minimum expression level. Only genes expressed at least this fraction of the time will be
output. Must be between C<0> and C<1> (inclusive) to be meaningful. The default
is C<0>, which gets everything less than or equal to the maximum level.
=item -maxLevel (optiona;)
Maximum expression level. Only genes expressed no more than this fraction of the time will be
output. Must be between C<0> and C<1> (inclusive) to be meaningful. The default is C<1>,
which gets everything greater than or equal to the minimum level.
=back
=item RETURN
Returns a hash that maps each incoming genome ID to a sub-hash. The sub-hash maps the FIG ID for
each qualifying feature to the level (as a fraction of the total experiments recorded) that it
is expressed.
$genomeHash = { $genome1 => { $fid1a => $level1a, $fid1b => $level1b, ...},
$genome1 => { $fid2a => $level2a, $fid2b => $level2b, ...},
};
=back
=cut
sub fids_expressed_in_range {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of genome IDs.
my $genomes = ServerThing::GetIdList(-ids => $args);
# Get the minimum and maximum levels.
my $minLevel = $args->{-minLevel}; $minLevel = 0.00 if ! defined $minLevel;
my $maxLevel = $args->{-maxLevel}; $maxLevel = 1.00 if ! defined $maxLevel;
# Declare the result hash.
my $retVal = {};
# Loop through the incoming genomes.
for my $genome (@$genomes) {
# Create the sub-hash for this genome.
my $subHash = {};
# Create a query to get the expression levels for this genome.
my $qh = $sap->Get("HasLevelsFrom",
'HasLevelsFrom(from-link) LIKE ? ORDER BY HasLevelsFrom(from-link)',
["fig|$genome%"], [qw(from-link level-vector)]);
# There is a possibility we will get multiple results for a single gene. In that case
# we average them together. These variables remember the previous gene, its total
# fraction, and the number of vectors.
my ($fid, $total, $count) = ('', 0, 0);
# Loop through the results.
while (my $row = $qh->Fetch()) {
# Get the feature ID.
my $newFid = $row->PrimaryValue('from-link');
# Get the raw level vector. This will be a string of +, 0, and - characters.
# We want the fraction of + characters over the total count of + and -
# characters. If both counts are 0, then the result is ignored.
my ($vector) = $row->Value('level-vector', 1);
my $plusses = ($vector =~ tr/+//);
my $minuses = ($vector =~ tr/-//);
# Only proceed if we had at least one experiment with a definite result.
if ($plusses > 0 || $minuses > 0) {
my $level = $plusses / ($plusses + $minuses);
# Is this a new feature?
if ($newFid ne $fid) {
# Yes. Store the previous feature (if any).
if ($count > 0) {
my $oldLevel = $total / $count;
if ($oldLevel >= $minLevel && $oldLevel <= $maxLevel) {
$subHash->{$fid} = $oldLevel;
}
}
# Initialize for the new feature.
($fid, $total, $count) = ($newFid, 0, 0);
}
# Add this data to the existing information about the feature.
$total += $level;
$count++;
}
}
# If we have data left over, write it out to the sub-hash.
if ($count > 0) {
my $oldLevel = $total / $count;
if ($oldLevel >= $minLevel && $oldLevel <= $maxLevel) {
$subHash->{$fid} = $oldLevel;
}
}
# Save this genome's results.
$retVal->{$genome} = $subHash;
}
# Return the results.
return $retVal;
}
=head3 fids_to_regulons
my $fidHash = $sapObject->fids_to_regulons({
-ids => [$fid1, $fid2, ...]
});
Return the atomic regulons associated with each incoming gene.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs for the genes of interest.
=back
=item RETURN
Returns a reference to a hash of hashes, keyed on FIG feature ID.
Each feature is mapped to a sub-hash that maps the feature's atomic
regulons to the number of features in each regulon.
$fidHash = { $fid1 => { $regulon1a => $size1a, $regulon1b => $size1b, ...},
$fid2 => { $regulon2a => $size2a, $regulon2b => $size2b, ...},
... };
=back
=cut
sub fids_to_regulons {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Declare the return hash.
my $retVal = {};
# Create a hash of atomic regulons. Each regulon will be mapped
# to its size so that we don't have to ask for the same regulon
# twice.
my %regulons;
# Loop through the FIG IDs.
for my $id (@$ids) {
# Get the regulons for this feature.
my (@fidRegulons) = $sap->GetFlat("IsFormedInto",
'IsFormedInto(from-link) = ?', [$id], 'to-link');
# Loop through the regulons, insuring we know the size of
# each one.
for my $regulon (@fidRegulons) {
# Get the size of this regulon if we don't already have it.
if (! exists $regulons{$regulon}) {
$regulons{$regulon} = $sap->GetCount("IsFormedOf",
"IsFormedOf(from-link) = ?", [$regulon]);
}
}
# Store the regulons for this feature in the result hash.
$retVal->{$id} = { map { $_ => $regulons{$_} } @fidRegulons };
}
# Return the result hash.
return $retVal;
}
=head3 genome_experiments
my $genomeHash = $sapObject->genome_experiments({
-ids => [$genome1, $genome2, ...]
});
Return a list of the experiments for each indicated genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of genome IDs. For each genome ID, a list of relevant
experiments will be produced.
=back
=item RETURN
Returns a hash mapping each incoming genome ID to a list of experiments related
to that genome ID.
$featureHash = { $id1 => [$exp1a, $exp1b, ...],
$id2 => [$exp2a, $exp2b, ...] };
=back
=cut
sub genome_experiments {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through them.
for my $genomeID (@$ids) {
# Get this genome's experiments.
my @results = $sap->GetFlat("HadResultsProducedBy HasResultsIn",
'HadResultsProducedBy(from-link) = ? ORDER BY HasResultsIn(from-link), HasResultsIn(sequence)',
[$genomeID], 'HasResultsIn(to-link)');
# Store them in the return hash.
$retVal->{$genomeID} = \@results;
}
# Return the result.
return $retVal;
}
=head3 genome_experiment_levels
my $fidHash = $sapObject->genome_experiment_levels({
-genome => $genome1,
-experiments => [$exp1, $exp2, ...]
});
Return the expression levels for the specified features in all experiments for which they
have results.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -genome
ID of a genome for which expression data is present.
=item -experiments (optional)
A list of experiments. If specified, only levels from the indicated experiments will be
returned.
=back
=item RETURN
Returns a reference to a hash mapping each of the genome's feature IDs to a list of 3-tuples,
each 3-tuple containing (0) an experiment ID, (1) the expression on/off indication (1/0/-1),
and (2) the normalized rma-value.
$fidHash = { $fid1 => [[$exp1a, $level1a, $rma1a],
[$exp1b, $level1b, $rma1b], ...],
$fid2 => [[$exp2a, $level2a, $rma2a],
[$exp2b, $level2b, $rma2b], ...],
... };
=back
=cut
sub genome_experiment_levels {
# Get the paramters.
my ($self, $args) = @_;
# Get access to the Sapling database.
my $sap = $self->{db};
# Create the return hash.
my $retVal = {};
# Get the genome ID.
my $genome = $args->{-genome};
Confess("No genome specified for genome_experiment_levels") if ! defined $genome;
# Get the experiment list. This is optional, so it will return an empty list
# if the parameter is unspecified.
my $exps = ServerThing::GetIdList(-experiments => $args, 1);
# Set up the experiment filter.
my $unFiltered = (@$exps == 0);
my %expFilter = map { $_ => 1 } @$exps;
Trace("Retrieving experiment data for $genome.") if T(3);
# Get the experiment data for this genome.
my @params = ("fig|$genome.%");
# If there is experiment filtering, add the experiment IDs to the condition.
my $cond = "";
if (@$exps) {
push(@params, @$exps);
my $qs = join(", ", map { "?" } @$exps);
$cond = " AND HasIndicatedSignalFrom(to-link) IN ($qs)";
}
my @rows = $sap->GetAll("HasIndicatedSignalFrom",
"HasIndicatedSignalFrom(from-link) LIKE ? $cond ORDER BY HasIndicatedSignalFrom(from-link)",
\@params,
[qw(from-link to-link level rma-value)]);
Trace(scalar(@rows) . " of experiment data found.") if T(SAP => 3);
# Now we loop through the results, organizing them by feature ID. The current
# feature ID will be kept in here.
my $fid;
# This will contain the rows for the current feature.
my $fidRows = [];
# Finally, we add a trailer row to insure the last feature's data gets stored
# in the loop.
push @rows, ["fig|TRAILER", $exps->[0]];
for my $row (@rows) {
# Is this row for an experiment we care about?
if ($unFiltered || $expFilter{$row->[1]}) {
# Yes. Split out the feature ID.
my ($rowFid, @rowData) = @$row;
# Is this a new feature or the same old one?
if ($fid eq $rowFid) {
# Same old one: queue the data.
push @$fidRows, \@rowData;
} else {
# New feature. If we have data for the previous feature, write it out.
if (@$fidRows) {
$retVal->{$fid} = $fidRows;
}
# Initialize for the new feature.
$fidRows = [\@rowData];
$fid = $rowFid;
}
}
}
# Return the result.
return $retVal;
}
=head3 regulons_to_fids
my $regHash = $sapObject->regulons_to_fids({
-ids => [$regulon1, $regulon2, ...]
});
Return the list of genes in each specified atomic regulon.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of atomic regulon IDs.
=back
=item RETURN
Returns a reference to a hash mapping each incoming atomic regulon ID
to a list of the FIG feature IDs for the genes found in the regulon.
$regHash = { $regulon1 => [$fid1a, $fid1b, ...],
$regulon2 => [$fid2a, $fid2b, ...],
... };
=back
=cut
sub regulons_to_fids {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the list of regulon IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through them.
for my $id (@$ids) {
# Get the features in this regulon.
my @fids = $sap->GetFlat("IsFormedOf", "IsFormedOf(from-link) = ?",
[$id], "to-link");
# Store them in the return hash.
$retVal->{$id} = \@fids;
}
# Return the result hash.
return $retVal;
}
=head2 Feature (Gene) Data Methods
NOTE: To get the functional assignment for a feature, see
L</Annotation and Assertion Data Methods>.
=head3 compared_regions
my $result = $sapObject->compared_regions({
-focus => $fid1,
-genomes => [$genome1, $genome2, ... ],
-extent => 16000
});
Return information about the context of a focus gene and the corresponding genes in
other genomes (known as I<pinned genes>). The information returned can be used to
create a compare-regions display.
The return information will be in the form of a reference to a list of contexts,
each context containing genes in a region surrounding the pinned gene on a particular
genome. The genome containing the focus gene will always be the first in the list.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -focus
The FIG ID of the focus gene.
=item -count (optional)
The number of pinned genes desired. If specified, the closest genes to the focus
gene will be located, at most one per genome. The default is C<4>.
=item -genomes (optional)
Reference to a list of genomes. If specified, only genes in the specified
genomes will be considered pinned.
=item -pins (optional)
Reference to a list of FIG feature IDs. The listed genes will be used as the pinned
genes. If this option is specified, it overrides C<-count> and C<-genomes>.
=item -extent (optional)
The number of base pairs to show in the context for each particular genome. The
default is C<16000>.
=back
=item RETURN
Returns a hash that maps each focus gene to the compared regions view for that gene.
Each compared regions view is a list of hashes, one hash per genome.
Each genome has the following keys:
genome_id => this genome's id
genome_name => this genome's name
row_id => the row number for this genome
features => the features for this genome.
The features lists will consist of one or more 9-tuples, one per gene in the context. Each
8-tuple will contain (0) the gene's FIG feature ID, (1) its functional assignment,
(2) its FIGfam ID, (3) the contig ID, (4) the start location, (5) the end location,
(6) the direction (C<+> or C<->), (7) the row index, and (8) the color index. All
genes with the same color have similar functions.
$result = { focus_fid =>
[
{ row_id => 0, genome_name => "g1name", genome_id => "g1id",
features => [[$fid1a, $function1a, $figFam1a, $contig1a, $start1a, $end1a, $dir1a, 0, $color1a],
[$fid1b, $function1b, $figFam1b, $contig1b, $start1b, $end1b, $dir1b, 0, $color1b],
... ],
},
{ row_id => 1, genome_name => "g2name", genome_id => "g2id",
features => [[$fid2a, $function2a, $figFam2a, $contig2a, $start2a, $end2a, $dir2a, 1, $color2a],
[$fid2b, $function2b, $figFam2b, $contig2b, $start2b, $end2b, $dir2b, 1, $color2b],
... ],
},
...
]
};
=back
=cut
sub compared_regions {
# Get the parameters.
my ($self, $args) = @_;
# Get the compared-region module.
require SapCompareRegions;
# Get the focus gene ID.
my $focus = $args->{-focus};
Confess("No focus gene specified for compared_regions.") if ! $focus;
# Now we need to determine the pinned genes. First, we check for an explicit list.
my $pins = ServerThing::GetIdList(-pins => $args, 1);
if (! @$pins) {
# Here no explicit list was specified, so we have to compute the pins. We'll
# use this argument hash to do it.
my %pinArgs = (-focus => $focus);
# Check for a genome list.
my $genomes = ServerThing::GetIdList(-genomes => $args, 1);
if (@$genomes) {
$pinArgs{-genomes} = $genomes;
Trace("Using genome list.") if T(SapCompareRegions => 3);
}
# Get the count.
$pinArgs{-count} = $args->{-count} || 4;
# Ask for the pins.
$pins = SapCompareRegions::get_pin($self, \%pinArgs);
Trace(scalar(@$pins) . " pins found.") if T(SapCompareRegions => 3);
}
# Now we have the pins and the focus gene. Get the extent.
my $extent = $args->{-extent} || 16000;
# Compute the context list.
my $ctxList = SapCompareRegions::get_context($self, { -focus => $focus,
-pin => $pins,
-extent => $extent });
# Add the colors.
my $retVal = SapCompareRegions::cluster_by_function($self, {-context => $ctxList});
$retVal = { $focus => $retVal };
# Return the result.
return $retVal;
}
=head3 equiv_sequence_ids
my $idHash = $sapObject->equiv_sequence_ids({
-ids => [$id1, $id2, ...],
-precise => 1
});
Return all identifiers for genes in the database that are
protein-sequence-equivalent to the specified identifiers. In this case, the
identifiers are assumed to be in their natural form (without prefixes). For
each identifier, the identified protein sequences will be found and then
for each protein sequence, all identifiers for that protein sequence or for
genes that produce that protein sequence will be returned.
Alternatively, you can ask for identifiers that are precisely equivalent, that is,
that identify the same location on the same genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of identifiers of interest. These can be normal feature
identifiers in prefixed form (e.g. C<cmr|NT03SD3201>, C<gi|90022544>,
C<fig|100226.1.peg.3361>) or their natural, un-prefixed form (C<NT03SD3201>,
C<90022544>). In addition, they can be protein sequence IDs formed by taking the
hexadecimal MD5 hash of the protein sequence with an optional C<md5> or
C<gnl|md5> prefix (C<500009d8cf094fa4e6a1ebb15295c60f>,
C<gnl|md5|6a00b57a9facf5056c68e5d7fe157814>).
=item -precise
If TRUE, then only identifiers that refer to the same location on the same
genome will be returned. The default is FALSE (return all sequence-equivalent
IDs). If this option is specified, identifiers that refer to proteins rather
than features will return no result.
=item -assertions
If TRUE, then instead of returning a hash of lists, this method will return
a hash of sub-hashes. Each sub-hash will be keyed by the equivalent IDs, and
will map each ID to a list of 3-tuples describing assertions about the ID,
each 3-tuple consisting of (0) an assertion of function, (1) the source of the
assertion, and (2) a flag that is TRUE for an expert assertion and FALSE
otherwise. IDs in a sub-hash which are not associated with assertions will map
to an empty list.
=back
=item RETURN
Returns a reference to a hash that maps each incoming identifier to a list
of sequence-equivalent identifiers.
=over 4
=item Normal Output
$idHash = { $id1 => [$id1a, $id1b, ...],
$id2 => [$id2a, $id2b, ...],
... };
=item Output with -assertions = 1
$idHash = { $id1 => { $id1a => [[$assert1ax, $source1ax, $flag1ax],
[$assert1ay, $source1ay, $flag1ay], ...],
$id1b => [[$assert1bx, $source1bx, $flag1bx],
[$assert1by, $source1by, $flag1by], ...]},
... },
$id2 => { $id2a => [[$assert2ax, $source2ax, $flag2ax],
[$assert2ay, $source2ay, $flag2ay], ...],
$id2b => [[$assert2bx, $source2bx, $flag2bx],
[$assert2by, $source2by, $flag2by], ...]},
... },
... };
=back
The output identifiers will not include protein sequence IDs: these are
allowed on input only as a convenience.
=back
=cut
sub equiv_sequence_ids {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Check for a precise-equivalence request.
my $precise = $args->{-precise} || 0;
# Check for an assertions request.
my $assertions = $args->{-assertions} || 0;
# Construct the filter clause we'll use for identifiers.
my $filter = 'Identifier(natural-form) = ? OR Identifier(id) = ?';
# Loop through the IDs in the list.
for my $id (@$ids) {
# We'll store the equivalent IDs we find in here.
my @results;
# Is this precise equivalence?
if ($precise) {
# Ask for all identifiers that connect to a feature identified by this ID.
@results = $sap->GetFlat("Identifier Identifies Feature IsIdentifiedBy Identifier2",
$filter, [$id, $id],
'Identifier2(id)');
} else {
# We'll put the proteins of interest in here.
my @prots;
# Is this a protein sequence ID?
if (my $prot = $sap->IsProteinID($id)) {
# Use it unmodified.
push @prots, $prot;
} else {
# Here we have a database ID. Ask for all the protein sequences
# this ID identifies directly.
push @prots, $sap->GetFlat("Identifier Names ProteinSequence", $filter,
[$id, $id], 'ProteinSequence(id)');
# Add the ones it identifies through a feature.
push @prots, $sap->GetFlat("Identifier Identifies Feature Produces ProteinSequence",
$filter, [$id, $id], 'ProteinSequence(id)');
}
# Loop through the protein sequences, finding equivalent IDs.
for my $prot (@prots) {
push @results, $sap->IdsForProtein($prot);
}
}
# Loop through the results, removing duplicates.
my %results;
for my $result (@results) {
$results{$result} = 1;
}
# Format the output according to the assertions option.
if ($assertions) {
for my $result (keys %results) {
# Get the assertion data for this ID.
my @assertRows = $sap->GetAll("Identifier HasAssertionFrom Source",
'Identifier(id) = ? ',
[$result],
[qw(HasAssertionFrom(function)
Source(id)
HasAssertionFrom(expert))]);
# Store them in the hash.
$results{$result} = \@assertRows;
}
# Attach the hash to this ID.
$retVal->{$id} = \%results;
} else {
# Normal mode. Store the IDs found as a list.
$retVal->{$id} = [ sort keys %results ];
}
}
# Return the result.
return $retVal;
}
=head3 fid_correspondences
my $featureHash = $sapObject->fid_correspondences({
-ids => [$fid1, $fid2, ...],
-genomes => [$genome1, $genome2, ...]
});
Return the corresponding genes for the specified features in the specified genomes.
The correspondences are determined in the same way as used by L</gene_correspondence_map>,
but this method returns substantially less data.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=item -genomes
Reference to a list of genome IDs. For each incoming feature ID, the corresponding
features in the specified genomes will be returned.
=back
=item RETURN
Returns a reference to a hash that maps each incoming feature ID to a list of
corresponding feature IDs in the specified genomes. If no sufficiently corresponding
feature is found in any of the genomes, the feature ID will map to an empty list.
$featureHash = { $fid1 => [$fid1a, $fid1b, ...],
$fid2 => [$fid2a, $fid2b, ...],
... };
=back
=cut
sub fid_correspondences {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get feature and genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
my $genomes = ServerThing::GetIdList(-genomes => $args);
# Sort the incoming IDs by genome.
my %idGroups;
for my $id (@$ids) {
my $genome = genome_of($id);
push @{$idGroups{$genome}}, $id;
}
# Loop through the incoming IDs, one genome at a time.
for my $sourceGenome (keys %idGroups) {
# Get the features for this source genome.
my $sourceFids = $idGroups{$sourceGenome};
# Loop through the target genomes.
for my $targetGenome (@$genomes) {
# We only need to do this if the source and target genomes are different.
if ($sourceGenome ne $targetGenome) {
# Get the correspondences for this genome.
my $geneHash = $self->gene_correspondence_map({ -genome1 => $sourceGenome,
-genome2 => $targetGenome });
# Put all the useful results into the return hash.
for my $id (@$sourceFids) {
if (exists $geneHash->{$id}) {
push @{$retVal->{$id}}, $geneHash->{$id};
}
}
}
}
}
# Return the result hash.
return $retVal;
}
=head3 fid_locations
my $featureHash = $sapObject->fid_locations({
-ids => [$fid1, $fid2, ...],
-boundaries => 1
});
Return the DNA locations for the specified features.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=item -boundaries (optional)
If TRUE, then for any multi-location feature, a single location encompassing all the
location segments will be returned instead of a list of all the segments. If the
segments cross between contigs, then the behavior in this mode is undefined
(something will come back, but it may not be what you're expecting). The default
is FALSE, in which case the locations for each feature will be presented in a list.
=back
=item RETURN
Returns a reference to a hash mapping each feature ID to a list of location strings
representing the feature locations in sequence order.
$featureHash = { $fid1 => [$loc1a, $loc1b, ...],
$fid2 => [$loc2a, $loc2b, ...],
... };
=back
=cut
sub fid_locations {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database object.
my $sap = $self->{db};
# Determine the operating mode (boundaries or normal).
my $boundaryMode = $args->{-boundaries} || 0;
# Get the list of identifiers.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through them.
for my $fid (@$ids) {
# Get the list of locations for this feature.
my @locs = $sap->GetLocations($fid);
# Convert the locations to location strings.
my @locStrings = map { $_->String() } @locs;
# Only proceed if we found something.
if (scalar @locs) {
# Process according to the output mode.
if ($boundaryMode) {
# Here we're looking for boundaries.
my ($contig, $min, $max) = boundaries_of(\@locStrings);
# Get the first location's direction.
my $dir = $locs[0]->Dir;
# Compute a location from the boundaries.
my $locLen = $max - $min + 1;
my $retLoc = $contig . "_";
if ($dir eq '+') {
$retLoc .= "$min+$locLen";
} else {
$retLoc .= "$max-$locLen";
}
# Store it in the return hash.
$retVal->{$fid} = $retLoc;
} else {
# Here we want the list of locations, a much simpler operation.
$retVal->{$fid} = \@locStrings;
}
}
}
# Return the result.
return $retVal;
}
=head3 fid_map_for_genome
my $idHash = $sapObject->get_map_for_genome({
-idHash => { $myID1 => [$id1a, $id1b, ...],
$myID2 => [$id2a, $id2b, ...],
... },
-genome => $genome1
});
Find FIG IDs corresponding to caller-provided genes in a specific genome.
In some situations you may have multiple external identifiers for
various genes in a genome without knowing which ones are present in the Sapling
database and which are not. The external identifiers present in the Sapling
database are culled from numerous sources, but different genomes will tend to
have coverage from different identifier types: some genomes are represented
heavily by CMR identifiers and have no Locus Tags, others have lots of Locus
Tags but no CMR identifiers, and so forth. This method allows you to throw everything
you have at the database in hopes of finding a match.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -idHash
Reference to a hash that maps caller-specified identifiers to lists of external
identifiers in prefixed form (e.g. C<LocusTag:SO1103>, C<uni|QX8I1>, C<gi|4808340>).
Each external identifier should be an alternate name for the same gene.
=item -genome (optional)
ID of a target genome. If specified, only genes in the specified target genome
will be returned.
=back
=item RETURN
Returns a hash mapping the original caller-specified identifiers to FIG IDs in the
target genome. If the identifier list is ambiguous, the first matching FIG ID will
be used. If no matching FIG ID is found, an undefined value will be used.
$idHash = { $myID1 => $fid1, $myID2 => $fid2, ... };
=back
=cut
sub fid_map_for_genome {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the genome ID.
my $genome = $args->{-genome};
# Get the ID hash.
my $idHash = $args->{-idHash};
if (! defined $idHash) {
Confess("No id hash specified to fid_map_for_genome.");
} elsif (ref $idHash ne 'HASH') {
Confess("Invalid id hash specified to fid_map_for_genome.");
} else {
# Here we have a valid ID hash. First, compute the object list and filter
# clause for the ID query.
my ($idObjects, $idFilter, @idParms) = $sap->ComputeFeatureFilter('prefixed',
$genome);
# Loop through the keys of the hash. These are the user's preferred IDs.
for my $id (keys %$idHash) {
# Get the identifiers for this ID. Note we are prepared for a singleton
# instead of a list.
my $idList = $idHash->{$id};
$idList = [$idList] if ref $idList ne 'ARRAY';
# Try to find a FIG ID for each identifier.
my $foundFid;
for my $external (@$idList) { last if $foundFid;
# Check this external for a matching FIG ID.
($foundFid) = $sap->GetFlat($idObjects, $idFilter, [@idParms, $external],
'Feature(id)');
}
# Store the FIG ID found (if any).
$retVal->{$id} = $foundFid;
}
}
# Return the result.
return $retVal;
}
=head3 fid_possibly_truncated
my $featureHash = $sapObject->fid_possibly_truncated({
-ids => [$fid1, $fid2, ...],
-limit => 300
});
For each specified gene, return C<stop> if its end is possibly truncated,
C<start> if its beginning is possibly truncated, and an empty string
otherwise. Truncation occurs if the gene is located near either edge of a
contig.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG gene IDs.
=item -limit (optional)
The distance from the end of a contig considered to be at risk for truncation.
the default is 300.
=back
=item RETURN
Returns a hash mapping each incoming gene ID to the appropriate value (C<start>
if it has a possibly-truncated start, C<stop> if it has a possibly-truncated stop,
or the empty string otherwise). Note that the empty string is expected to be the
most common result.
$featureHash = { $fid1 => $note1, $fid2 => $note2, ... };
=back
=cut
sub fid_possibly_truncated {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the Sapling database.
my $sap = $self->{db};
# Get the truncation limit.
my $limit = $args->{-limit} || 300;
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through them.
for my $fid (@$ids) {
# Get this feature's location list.
my @locs = $sap->GetLocations($fid);
# Compute the boundaries.
my ($contig, $min, $max) = boundaries_of([map { $_->String } @locs]);
# Only proceed if the location was valid.
if (defined $contig) {
# It's okay, so get the direction.
my $dir = $locs[0]->Dir;
# Get the contig length.
my ($contigLen) = $sap->GetEntityValues(Contig => $contig, ['length']);
Confess("Database error: contig $contig not found.") if ! defined $contigLen;
# Determine whether we're near one of the ends.
my $nearLeft = $min < $limit;
my $nearRight = $max > $contigLen - $limit;
# Compute the truncation indicator. Note that STOP truncation has priority
# over START truncation.
my $truncated = '';
if ($nearLeft && $dir eq '-' || $nearRight && $dir eq '+') {
$truncated = 'stop';
} elsif ($nearLeft && $dir eq '+' || $nearRight && $dir eq '-') {
$truncated = 'start';
}
# Store the indicator.
$retVal->{$fid} = $truncated;
}
}
# Return the result.
return $retVal;
}
=head3 fids_to_ids
my $featureHash = $sapObject->fids_to_ids({
-ids => [$fid1, $fid2, ...],
-types => [$typeA, $typeB, ...],
-protein => 1
});
Find all aliases and/or synonyms for the specified FIG IDs. For each FIG
ID, a hash will be returned that maps each ID type to a list of the IDs
of that type.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs of interest,
=item -types (optional)
Reference to a list of permissible ID types. Only ID types in this list will
be present in the output. If omitted, all ID types are permissible.
=item -protein (optional)
If TRUE, then IDs for features with equivalent protein sequences will be
returned; otherwise, only IDs for precisely equivalent genes will be returned.
The default is FALSE
=item -natural (optional)
If TRUE, then the IDs will be returned in their natural form; otherwise, the
IDs are returned in prefixed form. The default is FALSE.
=back
=item RETURN
Returns a reference to a hash that maps each feature ID to a sub-hash. Each
sub-hash maps an ID type to a list of equivalent IDs of that type.
$featureHash = { $fid1 => { $typeA => [$id1A1, $id1A2, ...],
$typeB => [$id1B1, $id1B2, ...],
... },
$fid2 => { $typeA => [$id2A1, $id2A2, ...],
$typeB => [$id2B1, $id2B2, ...],
... },
... };
=back
=cut
sub fids_to_ids {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the protein mode flag.
my $protein = ($args->{-protein} ? 1 : 0);
# Find out if we're natural or prefixed.
my $idField = ($args->{-natural} ? "Identifier(natural-form)" : "Identifier(id)");
# Create the list of permissible ID types. We will set $restrictedTypes
# to TRUE if type restrictions matter.
my ($restrictedTypes, %permissibleTypes);
if (defined $args->{-types}) {
$restrictedTypes = 1;
my $types = $args->{-types};
if (! ref $types) {
# A scalar was specified, so we treat it as the only allowed type.
$permissibleTypes{$types} = 1;
} elsif (ref $types eq 'ARRAY'){
# Here we have an array reference, which is what we are expecting.
%permissibleTypes = map { $_ => 1 } @$types;
} else {
# This is an error.
Confess("Invalid \"types\" parameter specified: it must be a scalar or a list, but found " .
ref $types . " instead.");
}
}
# Finally, we get the the ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs.
for my $fid (@$ids) {
Trace("Processing $fid.") if T(3);
# Get the aliases for this ID. We'll put them in this hash, with each
# alias mapped to its type.
my %aliasHash;
# First, we get the list of exact matches.
my @idPairs = $sap->GetAll('IsIdentifiedBy Identifier',
'IsIdentifiedBy(from-link) = ?',
[$fid], "$idField Identifier(source)");
# If we are doing protein equivalence, get the protein aliases as well.
if ($protein && (my $protID = $sap->IdentifiedProtein($fid))) {
# We have a protein ID, so we want to get all the protein identifiers
# AND all the feature identifiers for features with this protein.
push @idPairs, $sap->GetAll('IsNamedBy Identifier',
'IsNamedBy(from-link) = ?', [$protID],
"$idField Identifier(source)");
push @idPairs, $sap->GetAll('IsProteinFor IsIdentifiedBy Identifier',
'IsProteinFor(from-link) = ?', [$protID],
"$idField Identifier(source)");
}
# Only proceed if we found something.
my $count = scalar @idPairs;
Trace("$count identifiers found for $fid with protein flag $protein.") if T(3);
if ($count) {
# Now we have a list of (identifier, source) pairs for this feature.
# We want to convert this into a hash of lists, mapping source types to
# identifiers from that source. This hash is used to prevent duplicates.
my %idsFound;
# This hash will contain the ID lists.
my %idLists;
# Loop through the pairs.
for my $idPair (@idPairs) {
# Get the ID and its source type.
my ($id, $source) = @$idPair;
# Only proceed if this ID is new.
if (! exists $idsFound{$id}) {
$idsFound{$id} = 1;
# Do we want to keep IDs of this type?
if (! $restrictedTypes || $permissibleTypes{$source}) {
# Yes. Put it in the type's list.
push @{$idLists{$source}}, $id;
}
}
}
# Put our hash of IDs in the return value.
$retVal->{$fid} = \%idLists;
}
}
# Return the result.
return $retVal;
}
=head3 fids_to_proteins
my $fidHash = $sapObject->fids_to_proteins({
-ids => [$fid1, $fid2, ...],
-sequence => 1
});
Return the ID or amino acid sequence associated with each specified gene's protein. If the gene
does not produce a protein, it will not be included in the output.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs, representing the features of interest.
=item -sequence (optional)
If TRUE, then the output will include protein sequences; otherwise, the output will include
MD5 protein IDs. The default is FALSE.
=back
=item RETURN
Returns a reference to a hash keyed by feature ID. If C<-sequence> is FALSE, then the hash
maps each feature ID to the MD5 ID of the relevant gene's protein sequence. If C<-sequence>
is TRUE, then the hash maps each feature ID to the relevant protein sequence itself.
=over 8
=item -sequence TRUE
$fidHash = { $fid1 => $sequence1, $fid2 => $sequence2, ... };
=item -sequence FALSE
$fidHash = { $fid1 => $md5id1, $fid2 => $md5id2, ... };
=back
=back
=cut
sub fids_to_proteins {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of feature IDs.
my $fids = ServerThing::GetIdList(-ids => $args);
# Find out if we're going for sequences or IDs. This determines our output fields and
# the query path.
if ($self->{memcache})
{
if (!$args->{-sequence})
{
return $self->_fids_to_proteins_opt1($fids);
}
}
my ($path, $field);
if ($args->{-sequence}) {
# Here we want the protein sequences themselves.
$path = "Produces ProteinSequence";
$field = 'ProteinSequence(sequence)';
} else {
# Here we only care about the protein ID.
$path = "Produces";
$field = 'to-link';
}
# Declare the return hash.
my $retVal = {};
# Loop through the incoming IDs.
for my $fid (@$fids) {
# Get the protein data for this feature. There can be at most one result.
my ($result) = $sap->GetFlat($path, "Produces(from-link) = ?", [$fid], $field);
# Store it in the return hash.
if ($result) {
$retVal->{$fid} = $result;
}
}
# Return the result.
return $retVal;
}
sub _fids_to_proteins_opt1
{
my($self, $ids) = @_;
my $out = $self->_memcache_accelerate($ids, "f2md5", sub {
my($self, $id_hash, $out, $upd) = @_;
my @ids = keys %$id_hash;
my $qs = join(", ", map { "?" } 0..$#ids);
my $res = $self->{db}->{_dbh}->SQL(qq(SELECT to_link, from_link
FROM IsProteinFor
WHERE to_link IN ($qs)), undef, @ids);
for my $ent (@$res)
{
my($id, $md5) = @$ent;
$out->{$id} = $md5;
push(@$upd, ["f2md5:$id", $md5, 12 * 60 * 60]) if $upd;
}
});
return $out;
}
=head3 fids_with_evidence_codes
my $featureHash = $sapObject->fids_with_evidence_codes({
-codes => [$code1, $code2, ...],
-genomes => [$genome1, $genome2, ...]
});
Return the ID, assignment, and evidence for all features having an
evidence code of one of the specified types. The output can be restricted
to one or more specified genomes.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -codes
Reference to a list of evidence code types. This is only the prefix, not a
full-blown code. So, for example, C<ilit> would be used for indirect literature
references, C<dlit> for direct literature references, and so forth.
=item -genomes (optional)
Reference to a list of genome IDs. If no genome IDs are specified, all features
in all genomes will be processed.
=back
=item RETURN
Returns a hash mapping each feature to a list containing the function followed by
all of the feature's evidence codes.
$featureHash = { $fid1 => [$function1, $code1A, $code1B, ...],
$fid2 => [$function2, $code2A, $code2B, ...],
... };
=back
=cut
sub fids_with_evidence_codes {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Determine the genome list.
my $genomes = $args->{-genomes};
if (! defined $genomes) {
# No genomes we specified, so we do them all.
$genomes = [ $sap->GetFlat("Genome", "", [], "id") ];
} elsif (! ref $genomes) {
# A scalar genome ID is converted to a list for convenience.
$genomes = [$genomes];
}
Trace(scalar(@$genomes) . " genomes selected.") if T(3);
# Get the evidence code list.
my $codes = ServerThing::GetIdList(-codes => $args);
Trace(scalar(@$codes) . " evidence code types selected.") if T(3);
# Declare the return variable.
my $retVal = {};
# Loop through the genomes.
for my $genomeID (@$genomes) {
# Loop through the evidence code.
for my $code (@$codes) {
Trace("Processing $code for $genomeID.") if T(3);
# Query the database for this genome and code.
my $qh = $sap->Get("Genome IsOwnerOf Feature",
'Genome(id) = ? AND Feature(evidence-code) LIKE ?',
[$genomeID, "$code%"]);
# Loop through the results.
while (my $resultRow = $qh->Fetch()) {
# Get the data for this feature.
my $featureId = $resultRow->PrimaryValue('Feature(id)');
my $featureFunction = $resultRow->PrimaryValue('Feature(function)');
my @featureEvidenceCodes = $resultRow->Value('Feature(evidence-code)');
# Put the data in the return hash.
$retVal->{$featureId} = [$featureFunction, @featureEvidenceCodes];
}
}
}
# Return the result.
return $retVal;
}
=head3 genes_in_region
my $locHash = $sapObject->genes_in_region({
-locations => [$loc1, $loc2, ...],
-includeLocation => 1
});
Return a list of the IDs for the features that overlap the specified
regions on a contig.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -locations
Reference to a list of location strings (e.g. C<360108.3:NZ_AANK01000002_264528_264007>
or C<100226.1:NC_003888_3766170+612>). A location string consists of a contig ID
(which includes the genome ID), an underscore, a begin offset, and either an underscore
followed by an end offset or a direction (C<+> or C<->) followed by a length.
=item -includeLocation
If TRUE, then instead of mapping each location to a list of IDs, the hash will map
each location to a hash reference that maps the IDs to their locations.
=back
=item RETURN
Returns a reference to a hash mapping each incoming location string
to a list of the IDs for the features that overlap that location.
$locHash = { $loc1 => [$fid1A, $fid1B, ...],
$loc2 => [$fid2A, $fid2B, ...],
... };
=back
=cut
sub genes_in_region {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Check for the includeLocation option.
my $includeLocation = $args->{-includeLocation} || 0;
# Get the list of location strings.
my $locs = ServerThing::GetIdList(-locations => $args);
# Loop through the locations.
for my $loc (@$locs) {
# Get the genes in the region.
my @fids = $sap->GenesInRegion($loc);
# If this is include-location mode, add the location strings.
if ($includeLocation) {
# Loop through the features found. We'll put the data we find in here.
my %fidData;
for my $fid (@fids) {
$fidData{$fid} = [ map { $_->String() } $sap->GetLocations($fid) ];
}
# Store the list of lists in the output hash.
$retVal->{$loc} = \%fidData;
} else {
# In normal mode, the feature list goes in the output hash unaltered.
$retVal->{$loc} = \@fids;
}
}
# Return the result.
return $retVal;
}
=head3 ids_to_data
my $featureHash = $sapObject->ids_to_data({
-ids => [$id1, $id2, ...],
-data => [$fieldA, $fieldB, ...],
-source => 'UniProt'
});
Return the specified data items for the specified features.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of gene identifiers. Normally, these would be FIG
feature IDs, but other identifier types can be specified if you use the
C<-source> option.
=item -data
Reference to a list of data field names. The possible data field names are
given below.
=over 12
=item evidence
Comma-delimited list of evidence codes indicating the reason for the gene's
current assignment.
=item fig-id
The FIG ID of the gene.
=item function
Current functional assignment.
=item genome-name
Name of the genome containing the gene.
=item length
Number of base pairs in the gene.
=item location
Comma-delimited list of location strings indicated the location of the gene
in the genome. A location string consists of a contig ID, an underscore, the
starting offset, the strand (C<+> or C<->), and the number of base pairs.
=item publications
Comma-delimited list of PUBMED IDs for publications related to the gene.
=back
=item -source (optional)
Database source of the IDs specified-- e.g. C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return results for genes in all genomes.
=back
=item RETURN
Returns a hash mapping each incoming ID to a list of tuples, There will be one
tuple for each feature identified by the incoming ID (because some IDs are
ambiguous there may be more than one), and the tuple will contain the
specified data fields for the computed gene in the specified order.
$featureHash = { $id1 => [$tuple1A, $tuple1B, ...],
$id2 => [$tuple2A, $tuple2B, ...],
... };
=back
=cut
sub ids_to_data {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# This hash is used to cache genome names for performance.
my %genomes;
# Get the list of fields and perform a basic validation so we know we have a
# list of data items.
my $fields = $args->{-data};
Confess("No data fields specified in \"ids_to_data\".") if ! defined $fields;
Confess("Invalid data field list in \"ids_to_data\".") if ref $fields ne 'ARRAY';
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Create the feature filter for IDs of this type.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($args->{-source},
$args->{-genome});
# Loop through the identifiers.
for my $id (@$ids) {
# The output tuples for this identifier will be put in here.
my @tuples;
# Get the features for this identifier.
my @dbObjects = $sap->GetList($objects, $filter, [@parms, $id]);
# Loop through the features found.
for my $feature (@dbObjects) {
# The current tuple will be built in here.
my @tuple;
# Get the current feature ID.
my $fid = $feature->PrimaryValue('Feature(id)');
# Loop through the fields we need.
for my $field (@$fields) {
if ($field eq 'evidence') {
# We do a join here because there may be multiple evidence codes.
push @tuple, join(", ", $feature->Value('Feature(evidence-code)'));
} elsif ($field eq 'fig-id') {
# The FIG ID was extracted above.
push @tuple, $fid;
} elsif ($field eq 'function') {
# The assignment is a field in the Feature record.
push @tuple, $feature->Value('Feature(function)');
} elsif ($field eq 'genome-name') {
# For genome names, we need to parse the feature ID.
my $genomeID = genome_of($fid);
# If we already have this genome's name, we reuse it;
# otherwise, we query the database.
if (! $genomes{$genomeID}) {
($genomes{$genomeID}) = $sap->GetEntityValues(Genome => $genomeID,
['scientific-name']);
}
push @tuple, $genomes{$genomeID};
} elsif ($field eq 'length') {
# This is the sequence-length field from the feature record.
push @tuple, $feature->Value('Feature(sequence-length)');
} elsif ($field eq 'location') {
# Sapling has a custom method for getting locations.
my @locs = $sap->GetLocations($fid);
# We translates the location objects to location strings and
# join them with commas before returning them. Note that in
# most cases, however, there will only be one.
push @tuple, join(", ", map { $_->String } @locs);
} elsif ($field eq 'publications') {
# The publication data is kept in the evidence codes. For each
# publication, there will be a "dlit" evidence code relating to
# it. Immediately after the "dlit" will be a PUBMED number
# enclosed in parentheses. Note that in array context, the match
# operator obligingly returns an empty list if a match fails and
# a list of the parenthesized matched text if the match works.
push @tuple, join(", ", map { $_ =~ /dlit\((\d+)/ } $feature->Value('Feature(evidence-code)'));
} else {
Confess("Invalid data field name \"$field\" in \"ids_to_data\".");
}
}
# Add this feature's data to the output list for this ID.
push @tuples, \@tuple;
}
# Store this ID's results.
$retVal->{$id} = \@tuples;
}
# Return the result.
return $retVal;
}
=head3 ids_to_fids
my $idHash = $sapObject->ids_to_fids({
-ids => [$id1, $id2, ...],
-protein => 1,
-genomeName => $genusSpeciesString,
-source => 'UniProt'
});
Return a list of the FIG IDs corresponding to each of the specified
identifiers. The correspondence can either be gene-based (same feature)
or sequence-based (same protein).
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of identifiers.
=item -source
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth).
=item -protein (optional)
If TRUE, then all FIG IDs for equivalent proteins will be returned. The default is
FALSE, meaning that only FIG IDs for the same gene will be returned.
=item -genomeName (optional)
The full or partial name of a genome or a comma-delimited list of genome IDs.
This parameter is useful for narrowing the results when a protein match is
specified. If it is omitted, no genome filtering is performed.
=back
=item RETURN
Returns a reference to a hash mapping each incoming identifier to a list
of equivalent FIG IDs.
$idHash = { $id1 => [$fid1A, $fid1B, ...],
$id2 => [$fid2A, $fid2B, ...],
... };
=back
=cut
sub ids_to_fids {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the source.
my $source = $args->{-source};
Confess("No -source specified on \"ids_to_fids\".") if ! defined $source;
# Compute the feature filter.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($source);
# Determine whether we are looking for protein equivalence or feature
# equivalence.
my $protFlag = ($args->{-protein} ? 1 : 0);
Trace("Protein flag is $protFlag.") if T(3);
# Loop through the IDs.
for my $id (@$ids) {
Trace("Retrieving features for identifier $id.") if T(3);
# We'll put the FIG IDs for this identifier in here.
my %fidsFound;
# Are we looking for proteins or genes?
if (! $protFlag) {
# Genes are fairly simple. We use the feature filter to get feature IDs.
%fidsFound = map { $_ => 1 } $sap->GetFlat($objects, $filter, [@parms, $id],
'Feature(id)');
} else {
# Here we're looking for proteins. We first need to check for a
# direct protein ID. How we do this depends on the source.
my ($filter2, @parms2);
if ($source eq 'mixed') {
$filter2 = "Identifier(natural-form) = ?";
} elsif ($source eq 'prefixed') {
$filter2 = "Identifier(id) = ?"
} else {
$filter2 = "Identifier(source) = ? AND Identifier(natural-form) = ?";
@parms2 = $source;
}
my @prots = $sap->GetFlat("Identifier Names ProteinSequence",
$filter2, [@parms2, $id], 'ProteinSequence(id)');
# Check for proteins related to feature IDs.
push @prots, $sap->GetFlat("$objects Produces ProteinSequence",
$filter, [@parms, $id], 'ProteinSequence(id)');
# Now find all the features for these proteins.
for my $prot (@prots) {
my @fids = $sap->GetFlat("ProteinSequence IsProteinFor Feature",
"ProteinSequence(id) = ?", [$prot],
'Feature(id)');
for my $fid (@fids) {
$fidsFound{$fid} = 1;
}
}
}
# Now we apply the genome filter.
my @results = $sap->FilterByGenome([ keys %fidsFound ], $args->{-genomeName});
# Put the IDs found into the return hash.
$retVal->{$id} = [ sort { SeedUtils::by_fig_id($a, $b) } @results ];
}
# Return the result.
return $retVal;
}
=head3 ids_to_genomes
my $featureHash = $sapObject->ids_to_genomes({
-ids => [$id1, $id2, ...],
-source => 'SwissProt',
-name => 1
});
Return the genome information for each incoming gene ID.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of gene IDs.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -name (optional)
If TRUE, the genomes names will be returned; if FALSE, the genome IDs will be
returned. The default is FALSE.
=back
=item RETURN
Returns a reference to a hash mapping each incoming ID to the associated genome
ID, or alternatively to the associated genome name.
$featureHash = { $id1 => $genome1, $id2 => $genome2, ... };
=back
=cut
sub ids_to_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Compute the ID conversion query.
my ($idObjects, $idFilter, @idParms) = $sap->ComputeFeatureFilter($args->{-source});
# Determine the desired output field: genome ID or name.
my $field = ($args->{-name} ? 'Genome(scientific-name)' : 'Genome(id)');
# Get the incoming IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the incoming IDs.
for my $id (@$ids) {
# Get the genome information for this ID.
my ($genomeID) = $sap->GetFlat("$idObjects IsOwnedBy Genome", $idFilter,
[@idParms, $id], $field);
# Store it in the return hash.
$retVal->{$id} = $genomeID;
}
# Return the result.
return $retVal;
}
sub _ids_to_genomes_opt1
{
my($self, $ids) = @_;
my $out = $self->_memcache_accelerate($ids, "f", sub {
my($self, $id_hash, $out, $upd) = @_;
my @ids = keys %$id_hash;
my $qs = join(", ", map { "?" } 0..$#ids);
my $res = $self->{db}->{_dbh}->SQL(qq(SELECT id, function
FROM Feature
WHERE id IN ($qs)), undef, @ids);
for my $ent (@$res)
{
my($id, $fn) = @$ent;
$out->{$id} = $fn;
push(@$upd, ["f:$id", $fn, 12 * 60 * 60]) if $upd;
}
});
return $out;
}
=head3 ids_to_lengths
my $geneHash = $sapObjects->ids_to_lengths({
-ids => [$id1, $id2, ...],
-protein => 1,
-source => 'NCBI'
});
Return the DNA or protein length of each specified gene.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of gene IDs.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return results for genes in all genomes.
=item -protein (optional)
If TRUE, then the length of each gene's protein will be returned. Otherwise, the
DNA length of each gene will be returned. The default is FALSE (DNA lengths).
=back
=item RETURN
Returns a reference to a hash mapping each incoming ID to the length of the
associated gene. If no gene is found, or B<-protein> is TRUE and the gene is
not a protein-encoding gene, the ID will not be present in the return hash.
$geneHash = { $id1 => $length1, $id2 => $length2, ... };
=back
=cut
sub ids_to_lengths {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Create the feature filter for IDs of this type.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($args->{-source},
$args->{-genome});
# Determine whether we are looking for DNA or protein lengths.
my $proteinMode = $args->{-protein} || 0;
# Loop through the identifiers.
for my $id (@$ids) {
# The list of lengths found will be put in here.
my @lengths;
# Are we looking for DNA lengths?
if (! $proteinMode) {
# Yes. The query is very simple in that case.
@lengths = $sap->GetFlat($objects, $filter, [@parms, $id], 'Feature(sequence-length)');
} else {
# No. We have to get the actual proteins and compute their lengths.
@lengths = map { length $_ } $sap->GetFlat("$objects Produces ProteinSequence",
$filter, [@parms, $id],
'ProteinSequence(sequence)');
}
# If we found results, compute the mean length. Most of the time there will only
# be one result, but some IDs have multiple targets.
if (@lengths) {
my $total = 0;
for my $length (@lengths) { $total += $length; }
$retVal->{$id} = int($total / @lengths);
}
}
# Return the result hash.
return $retVal;
}
=head3 make_runs
my $groupHash = $sapObject->make_runs({
-groups => ["$fid0a, $fid0b, ...",
"$fid1a, $fid1b, ...",
...],
-maxGap => 200,
-justFirst = 1,
-operonSize => 10000
});
Look at sequences of feature IDs and separate them into operons. An
operon contains features that are close together on the same contig going
in the same direction.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -groups
Reference to a list of strings. Each string will contain a comma-separated list of
FIG feature IDs for the features in a group. Alternatively, this can be a
reference to a list of lists, in which each sub-list contains the feature IDs in
a group.
=item -maxGap (optional)
Maximum number of base pairs that can be between to genes in order for them
to be considered as part of the same operon. The default is 200.
=item -justFirst (optional)
If TRUE, then only the first feature in an operon will be included in the
output operon strings. The default is FALSE.
=item -operonSize (optional)
Estimate of the typical size of an operon. This is a tuning parameter; the
default is C<10000>.
=back
=item RETURN
Returns a hash mapping group numbers to lists of operons. In other words,
for each incoming group, the hash will map the group's (zero-based) index number
to a list of operon strings. Each operon string is a comma-separated list of
feature IDs in operon order.
$groupHash = { 0 => [[$fid1op1, $fid2op1, ...],
[$fid1op2, $fid2op2, ...], ... ],
1 => [[$fid1opA, $fid2opB, ...],
[$fid1opB, $fid2opB, ...], ... ],
... };
=back
=cut
sub make_runs {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of group strings.
my $groups = ServerThing::GetIdList(-groups => $args);
my $groupCount = scalar @$groups;
# Get the just-first flag.
my $justFirst = $args->{-justFirst} || 0;
# Get the maximum gap size.
my $maxGap = $args->{-maxGap} || 200;
# Get the operon size.
my $operonSize = $args->{-operonSize} || 10000;
# Loop through the groups. We use an index because it is going to be
# the key of the output hash.
for (my $gidx = 0; $gidx < $groupCount; $gidx++) {
Trace("Processing group $gidx.") if T(3);
# Get the features in this group. We allow the option of a comma-delimited
# string or a sub-list.
my $group = $groups->[$gidx];
if (ref $group ne 'ARRAY') {
$group = [split m/\s*,\s*/, $group];
}
# This hash is used to remove duplicates.
my %fids = map { $_ => 1 } @$group;
# We have our initial set of features. We are now going to loop through
# all the locations and convert them into operons. Each operon is a list
# of features. We will collect the completed operons in a list. If at
# any point we find a feature that's been in a previous operon, we quit.
# A hash is used to contain the features previously found.
my %oldFidHash;
my @operons;
# This is the main location loop.
for my $loc (map { $sap->GetLocations($_) } keys %fids) {
Trace("Computing operon for " . $loc->String . ".") if T(3);
# We query the database and loop through the locations found
# until we find a feature from an old operon (which means this
# operon is discarded) or encounter a gap (which means this operon
# is complete). We need to go in both directions; the following
# flag will stop us if it is set in either direction.
my $redundant = 0;
# Locations found will be stored in here.
my @operonData;
# Search to the left for a gap.
push @operonData, $sap->FindGapLeft($loc, $maxGap, $operonSize,
\%oldFidHash, \$redundant);
# Search to the right for a gap.
push @operonData, $sap->FindGapRight($loc, $maxGap, $operonSize,
\%oldFidHash, \$redundant);
# Only proceed if what we found was not redundant.
if (! $redundant) {
Trace("Nonredundant operon found.") if T(3);
# We need to sort the features found into operon order.
# For a forward operon, we want to sort by leftmost
# point; for a backward operon, we want to do a reverse
# sort by rightmost point. The following loop creates
# a hash we can use for a sort by value.
my %sortHash;
for my $operonDatum (@operonData) {
# Get the location data from the current tuple.
my ($fid, $begin, $dir, $len) = @$operonDatum;
# The sort value is something we want to favor when its
# numerically low. For a forward operon, this is the
# begin point. For a backward operon, we take the end point
# and negate it. The negation makes it sort correctly.
my $sortValue = ($dir eq '+' ? $begin : -($begin + $len));
# Merge this datum into the hash.
if (! exists $sortHash{$fid}) {
# Here we have a new feature. Save its sort value.
$sortHash{$fid} = $sortValue;
# Add it to the redundancy hash for future use.
$oldFidHash{$fid} = 1;
} else {
# Hewre we have a second location for an existing feature.
# Save the minimum sort value.
$sortHash{$fid} = Tracer::Min($sortValue, $sortHash{$fid});
}
}
# Create the operon.
my @operon = sort { $sortHash{$a} <=> $sortHash{$b} } keys %sortHash;
# Add it to the return list, in the format indicated by the
# "justFirst" flag.
if ($justFirst) {
push @operons, $operon[0];
} else {
push @operons, join(", ", @operon);
}
}
}
# Put the operons found into the return hash.
$retVal->{$gidx} = \@operons;
}
# Return the result.
return $retVal;
}
=head3 proteins_to_fids
my $protHash = $sapObject->proteins_to_fids({
-prots => [$prot1, $prot2, ...]
});
Return the FIG feature IDs associated with each incoming protein. The protein can be
specified as an amino acid sequence or MD5 protein ID.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -prots
Reference to a list of proteins. Each protein can be specified as either an amino acid
sequence or an MD5 protein ID. The method will assume a sequence of 32 hex characters is
an MD5 ID and anything else is an amino acid sequence. Amino acid sequences should be
in upper-case only.
=back
=item RETURN
Returns a hash mapping each incoming protein to a list of FIG feature IDs for the genes that
produce the protein.
$protHash = { $prot1 => [$fid1a, $fid1b, ...],
$prot2 => [$fid2a, $fid2b, ...],
... };
=back
=cut
sub proteins_to_fids {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of incoming proteins.
my $prots = ServerThing::GetIdList(-prots => $args);
# Declare the return hash.
my $retVal = {};
# Loop through the proteins.
my @ids;
my $have_prot;
for my $prot (@$prots) {
# If this is a protein sequence, convert it to an MD5 ID.
my $id;
if (length($prot) == 32 && $prot !~ /[^0-9a-f]/) {
$id = $prot;
} else {
$id = $sap->ProteinID($prot);
$have_prot++;
}
push(@ids, [$prot, $id]);
}
if ($self->{memcache} && !$have_prot)
{
my $opt_out = $self->_proteins_to_fids_opt1($prots);
if ($opt_out->{$prots->[0]} ne '')
{
return $opt_out;
}
else
{
print STDERR "proteins_to_fids: missing output " . Dumper($prots, $opt_out);
}
}
for my $id_ent (@ids)
{
my($prot, $id) = @$id_ent;
# Find the features that produce the specified protein.
my @fids = $sap->GetFlat('IsProteinFor', 'IsProteinFor(from-link) = ?', [$id], "to-link");
# Put the results in the return hash.
$retVal->{$prot} = \@fids;
}
# Return the results.
return $retVal;
}
sub _proteins_to_fids_opt1
{
my($self, $ids) = @_;
my $out = $self->_memcache_accelerate_list($ids, "md52f", sub {
my($self, $id_hash, $out, $upd) = @_;
my @ids = keys %$id_hash;
my $qs = join(", ", map { "?" } 0..$#ids);
my $res = $self->{db}->{_dbh}->SQL(qq(SELECT from_link, to_link
FROM IsProteinFor
WHERE from_link IN ($qs)), undef, @ids);
for my $ent (@$res)
{
my($md5, $id) = @$ent;
push(@{$out->{$md5}}, $id);
push(@{$upd->{"md52f:$md5"}}, $id);
}
});
return $out;
}
=head2 FIGfam Data Methods
=head3 all_figfams
my $ffHash = $sapObject->all_figfams({
-roles => [$role1, $role2, ...],
-functions => [$function1, $function2, ...]
});
Return a list of all the FIGfams along with their functions. Optionally, you
can specify a role or a function, and only FIGfams with that role or function
will be returned.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item roles (optional)
If specified, a reference to a list of roles. Only FIGfams with one of the
specified roles (or one of the functions listed in C<-functions>) will be
returned in the hash.
=item function (optional)
If specified, a reference to a list of functions. Only FIGfams with one of the
specified functions (or one of the roles listed in C<-roles>) will be returned
in the hash.
=back
=item RETURN
Returns a reference to a hash mapping each qualifying FIGfam ID to its
function.
$ffHash = { $ff1 => $function1, $ff2 => $function2, ... };
=back
=cut
sub all_figfams {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the Sapling database.
my $sap = $self->{db};
# Get the roles and functions. Both of these are optional.
my $roles = ServerThing::GetIdList(-roles => $args, 1);
my $functions = ServerThing::GetIdList(-functions => $args, 1);
# If we have neither, we're officially asking for everything.
if (@$roles + @$functions == 0) {
# Ask for all of the FIGfams.
$retVal = { map { $_->[0] => $_->[1] } $sap->GetAll('Family',
'Family(id) LIKE ?',
['FIG%'],
['id', 'family-function']) };
} else {
# Here we are searching by role or function. Create hashes for the
# roles and functions of interest.
my %roleMap = map { $_ => 1 } @$roles;
my %functionMap = map { $_ => 1 } @$functions;
# Now we want a list of all roles of interest. We start with the roles
# themselves, then add all the roles taken from functions.
my %roleList = map { $_ => 1 } @$roles;
for my $function (@$functions) {
for my $role (roles_of_function($function)) {
$roleList{$role} = 1;
}
}
# Now loop through all of the roles found.
for my $role (keys %roleList) {
# Get all the FIGfams for this role.
my @ffPairs = $sap->GetAll("Role DeterminesFunctionOf Family",
'Role(id) = ? AND Family(id) LIKE ?', [$role,'FIG%'],
[qw(Family(id) Family(family-function))]);
# Loop through the FIGfams for this role.
for my $ffPair (@ffPairs) {
my ($ff, $function) = @$ffPair;
# Keep this FIGfam if the role or function is of interest.
if ($roleMap{$role} || $functionMap{$function}) {
$retVal->{$ff} = $function;
}
}
}
}
# Return the result.
return $retVal;
}
=head3 discriminating_figfams
my $groupList = $sapObject->discriminating_figfams({
-group1 => [$genome1a, $genome2a, ...],
-group2 => [$genome2a, $genome2b, ...]
});
Determine the FIGfams that discriminate between two groups of genomes.
A FIGfam discriminates between genome groups if it is common in one group and
uncommon in the other. The degree of discrimination is assigned a score based
on statistical significance, with 0 being insignificant and 2 being extremely
significant. FIGfams with a score greater than 1 are returned by this method.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -group1
Reference to a list of genome IDs for the genomes in the first group.
=item -group2
Reference to a list of genome IDs for the genomes in the second
=back
=item RETURN
Returns a reference to a 2-tuple, consisting of (0) a hash mapping FIGfam IDs
to scores for FIGfams common in group 1 and (1) a hash maping FIGfam IDs to
scores for FIGfams common in group 2.
$groupList = [{ $ff1a => $score1a, $ff1b => $score1b, ... },
{ $ff2a => $score2a, $ff2b => $score2b, ... }];
=back
=cut
sub discriminating_figfams {
# Get the parameters.
my ($self, $args) = @_;
# Get the two groups.
my $group1 = ServerThing::GetIdList(-group1 => $args);
my $group2 = ServerThing::GetIdList(-group2 => $args);
# Generate the FIGfam hashes for the groups.
my $group1H = $self->genome_figfams({ -ids => $group1 });
my $group2H = $self->genome_figfams({ -ids => $group2 });
# Get the signature tool.
require Signatures;
# Compute the signature.
my ($sig1H, $sig2H) = Signatures::ComputeSignatures($group1H, $group2H);
# Return the result.
return [$sig1H, $sig2H];
}
=head3 figfam_fids
my $fidList = $sapObject->figfam_fids({
-id => $figFam1,
-fasta => 1
});
Return a list of all the protein encoding genes in a FIGfam. The genes
can be returned as IDs or as FASTA strings.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -id
ID of the desired FIGfam.
=item -fasta
If TRUE, then the output will be in the form of FASTA strings; otherwise it will
be in the form of FIG IDs.
=back
=item RETURN
Returns a reference to a list of genes in the form of FIG feature IDs or protein
FASTA strings.
=over 8
=item Normal Output
$fidList = [$fid1, $fid2, ...];
=item Output When -fasta = 1
$fidList = [$fasta1, $fasta2, ...];
=back
=back
=cut
sub figfam_fids {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = [];
# Get the sapling database.
my $sap = $self->{db};
# Get the incoming FIGfam ID.
my $figFam = $args->{-id};
Confess("No FIGfam ID specified.") if ! defined $figFam;
# Get the FASTA-format flag.
my $fasta = $args->{-fasta} || 0;
# Configure the query depending on whether or not we're doing the FASTA
# sequences.
my ($objects, $fields) = ("HasMember", ["HasMember(to-link)"]);
if ($fasta) {
$objects .= " Feature Produces ProteinSequence";
push @$fields, "HasMember(from-link)", "ProteinSequence(sequence)";
}
# Get the list of genes in the FIGfam.
my @fids = $sap->GetAll($objects, "HasMember(from-link) = ?", [$figFam],
$fields);
# Are we doing FASTA results?
if (! $fasta) {
# No, so just return the FIDs.
$retVal = [ map { $_->[0] } @fids ];
} else {
# Yes, so we need a FASTA string for each feature.
$retVal = [ map { create_fasta_record(@$_) } @fids ];
}
# Return the result.
return $retVal;
}
=head3 figfam_fids_batch
my $fidHash = $sapObject->figfam_fids_batch({
-ids => [$ff1, $ff2, ...],
-genomeFilter => $genome1
});
Return a list of all the protein encoding genes in one or more FIGfams. This
method is an alternative to L</figfam_fids> that is faster when you need the
feature IDs but not the protein sequences.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the IDs of the desired FIGfams.
=item -genomeFilter (optional)
The ID of a genome. If specified, then only feature IDs from the specified
genome will be returned.
=back
=item RETURN
Returns a hash mapping each incoming FIGfam ID to a list of the IDs for the features
in that FIGfam.
$fidHash = { $ff1 => [$fid1a, $fid1b, ...],
$ff2 => [$fid2a, $fid2b, ...],
... };
=back
=cut
sub figfam_fids_batch {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of FIGfam IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
my %figFamHash = map { $_ => [] } @$ids;
Trace("Initiating batch FIGfam retrieval for " . scalar(@$ids) . " FIGfams.") if T(3);
# Form the filter string. At a minimum, we filter by FIGfam ID.
my $filter = "HasMember(from-link) = ?";
my @parms;
# If there is a genome filter, incorporate that.
my $genomeFilter = $args->{-genomeFilter};
if ($genomeFilter) {
$filter .= " AND HasMember(to-link) LIKE ?";
push @parms, "fig|$genomeFilter.%";
}
# Loop through the FIGfams.
for my $id (sort keys %figFamHash) {
# Get the list of genes in the FIGfam.
my @fids = $sap->GetFlat('HasMember', $filter, [$id, @parms],
'to-link');
# Store them in the hash.
$figFamHash{$id} = \@fids;
}
Trace("Batch FIGfam retrieval complete.") if T(3);
# Return the result.
return \%figFamHash;
}
=head3 figfam_function
my $ffHash = $sapObject->figfam_function({
-ids => [$ff1, $ff2, ...]
});
For each incoming FIGfam ID, return its function, that is, the common
functional assignment of all its members.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIGfam IDs.
=back
=item RETURN
Returns a hash mapping each incoming FIGfam ID its function string.
$ffHash => { $ff1 => $function1, $ff2 => $function2, ... };
=back
=cut
sub figfam_function {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of FIGfam IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the FIGfams.
for my $id (@$ids) {
# Get this FIGfam's function.
my ($function) = $sap->GetEntityValues(Family => $id, ['family-function']);
# Put it in the return hash.
$retVal->{$id} = $function;
}
# Return the result.
return $retVal;
}
=head3 genome_figfams
my $genomeHash = $sapObject->genome_figfams({
-ids => [$genome1, $genome2, ...]
});
Compute the list of FIGfams represented in each specific genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of genome identifiers.
=back
=item RETURN
Returns a reference to a hash mapping each incoming genome ID to a list of the
IDs of the FIGfams represented in that genome.
$genomeHash = { $genome1 => [$ff1a, $ff1b, ...],
$genome2 => [$ff2a, $ff2b, ...],
... };
=back
=cut
sub genome_figfams {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the list of genomes.
my $genomes = ServerThing::GetIdList(-ids => $args);
# Loop through the genomes, finding FIGfams.
for my $genome (@$genomes) {
# Get this genome's FIGfam list.
my @figfams = $sap->GetFlat("HasRepresentativeOf",
'HasRepresentativeOf(from-link) = ?', [$genome],
'to-link');
# Store it in the return hash.
$retVal->{$genome} = \@figfams;
}
# Return the result.
return $retVal;
}
=head3 ids_to_figfams
my $featureHash = $sapObject->ids_to_figfams({
-ids => [$id1, $id2, ...],
-functions => 1,
-source => 'RefSeq'
});
This method returns a hash mapping each incoming feature to its FIGfam.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of feature identifiers.
=item -functions (optional)
If TRUE, the family function will be returned in addition to the list of
FIGfam IDs. In this case, instead of a list of FIGfam IDs, each feature ID will
point to a list of 2-tuples, each consisting of (0) a FIGfam ID followed by (1)
a function string. The default is FALSE.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those
databases. Use C<mixed> to allow mixed ID types (though this may cause problems
when the same ID has different meanings in different databases). Use C<prefixed>
to allow IDs with prefixing indicating the ID type (e.g. C<uni|P00934> for a
UniProt ID, C<gi|135813> for an NCBI identifier, and so forth). The default is
C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return results for genes in all genomes.
=back
=item RETURN
Returns a reference to a hash mapping each incoming feature ID to a list of the
IDs of the FIGfams that contain it. (In general the list will be a singleton
unless the feature ID corresponds to multiple actual features.) Features not in
FIGfams will be omitted from the hash.
$featureHash = { $id1 => [$ff1a, $ff1b, ...],
$id2 => [$ff2a, $ff2b, ...],
... };
=back
=cut
sub ids_to_figfams {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the feature ID list.
my $ids = ServerThing::GetIdList(-ids => $args);
# Find out if we need to see functions in the output.
my $functionFlag = $args->{-functions} || 0;
# Compute the ID conversion query.
my ($idObjects, $idFilter, @idParms) = $sap->ComputeFeatureFilter($args->{-source},
$args->{-genome});
# Loop through the incoming feature IDs, retrieving FIGfams.
for my $id (@$ids) {
Trace("Reading families for $id.") if T(3);
# Get this feature's FIG families and functions.
my @fidPairs = $sap->GetAll("$idObjects IsMemberOf Family",
"$idFilter AND Family(id) LIKE ?",
[@idParms, $id, 'FIG%'],
['Family(id)', 'Family(family-function)']);
# Only proceed if we found something.
if (@fidPairs) {
# Do we want functions or just FIGfams?
if ($functionFlag) {
# We want both.
$retVal->{$id} = \@fidPairs;
} else {
# We want only the FIGfams.
$retVal->{$id} = [ map { $_->[0] } @fidPairs ];
}
}
}
# Return the result.
return $retVal;
}
=head3 related_figfams
my $ffHash = $sapObject->related_figfams({
-ids => [$ff1, $ff2, ...],
-expscore => 1,
-all => 1
});
This method takes a list of FIGfam IDs. For each FIGfam, it returns a
list of FIGfams related to it by functional coupling.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIGfam IDs.
=item -expscore (optional)
If TRUE, then the score returned will be the co-expression score. If
FALSE, the score returned will be the co-occurrence score. This option
is ignored if C<-all> is specified. The default is FALSE.
=item -all (optional)
If TRUE, then both scores will be returned. The default is FALSE, meaning
only one score is returned.
=back
=item RETURN
=over 8
=item normal
Returns a reference to a hash mapping each incoming FIGfam ID
to a list of 2-tuples for other FIGfams. The 2-tuples
each consist of (0) a related FIGfam's ID followed by (1) a 2-tuple
containing a coupling score and the related FIGfam's function.
$ffHash = { $ff1 => [[$ff1a, [$score1a, $function1a]],
[$ff1b, [$score1b, $function1b]], ...],
$ff2 => [[$ff2a, [$score2a, $function2a]],
[$ff2b, [$score2b, $function2b]], ...],
... };
=item -exp = all
Returns a reference to a hash mapping each incoming FIGfam ID
to a list of 2-tuples for other FIGfams. The 2-tuples
each consist of (0) a related FIGfam's ID followed by (1) a 3-tuple
containing the co-occurrence coupling score, the co-expression coupling
score, and the related FIGfam's function.
$ffHash = { $ff1 => [[$ff1a, [$score1ax, $score1ay, $function1a]],
[$ff1b, [$score1bx, $score1by, $function1b]], ...],
$ff2 => [[$ff2a, [$score2ax, $score2ay, $function2a]],
[$ff2b, [$score2bx, $score2by, $function2b]], ...],
... };
=back
=back
=cut
sub related_figfams {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of FIGfam IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Determine the score that we want.
my @scoreTypes;
if ($args->{-all}) {
@scoreTypes = qw(co-occurrence-evidence co-expression-evidence);
} elsif ($args->{-expscore}) {
@scoreTypes = qw(co-expression-evidence);
} else {
@scoreTypes = qw(co-occurrence-evidence);
}
# Loop through the FIGfams.
for my $id (@$ids) {
# The couplings for this family will be put in here.
my @couples;
# We need to do this query for both directions of the coupling relationship.
for my $rel (qw(IsCoupledWith IsCoupledTo)) {
# Get a query to find the familes coupled in this direction.
my $qh = $sap->Get("$rel Family", "$rel(from-link) = ?", [$id]);
while (my $resultRow = $qh->Fetch()) {
# Get this coupled family.
my $familyId = $resultRow->PrimaryValue('Family(id)');
my $familyFamilyFunction = $resultRow->PrimaryValue('Family(family-function)');
# Get the scores.
my @scores;
for my $scoreType (@scoreTypes) {
push @scores, $resultRow->PrimaryValue("$rel($scoreType)");
}
# Store this coupling in the current output list.
push @couples, [$familyId, [@scores, $familyFamilyFunction]];
}
}
# Store the list of results.
$retVal->{$id} = \@couples
}
# Return the result.
return $retVal;
}
=head3 roles_to_figfams
my $roleHash = $sapObject->roles_to_figfams({
-roles => [$role1, $role2, ...]
});
For each incoming role, return a list of the FIGfams that implement
the role, that is, whose functional assignments include the role.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -roles
Reference to a list of role names.
=back
=item RETURN
Returns a reference to a hash mapping each incoming role to a list of
FIGfam IDs for the FIGfams that implement the role.
$roleHash = { $role1 => [$ff1a, $ff1b, ...],
$role2 => [$ff2a, $ff2b, ...],
... };
=back
=cut
sub roles_to_figfams {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Get the list of roles.
my $roles = ServerThing::GetIdList(-roles => $args);
# Declare the return hash.
my $retVal = {};
# Loop through the roles.
for my $role (@$roles) {
# Get the FIGfams for this role.
my @ffs = $sap->GetFlat("DeterminesFunctionOf",
'DeterminesFunctionOf(from-link) = ?', [$role],
'to-link');
# Store them in the return hash.
$retVal->{$role} = \@ffs;
}
# Return the result hash.
return $retVal;
}
=head2 Functional Coupling Data Methods
=head3 clusters_containing
my $featureHash = $sapObject->clusters_containing({
-ids => [$fid1, $fid2, ...]
});
This method takes as input a list of FIG feature IDs. For each feature, it
returns the IDs and functions of other features in the same cluster of
functionally-coupled features.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=back
For backward compatibility, this method can also take as input a reference to
a list of FIG feature IDs.
=item RETURN
Returns a reference to a hash. The hash maps each incoming feature ID to a
2-tuple containing (0) the feature's functional assignment and (1) a
reference to a hash that maps each clustered feature to its functional assignment.
$featureHash = { $fid1 => [$function1, { $fid1a => $function1a,
$fid1b => $function1b,
...}],
$fid2 => [$function2, { $fid2a => $function2a,
$fid2b => $function2b,
...}],
... };
In backward-compatibility mode, this method returns a reference to a list. For
each incoming feature, there is a list entry containing the feature ID, the
feature's functional assignment, and a sub-list of 2-tuples. Each 2-tuple
contains the ID of another feature in the same cluster and its functional
assignment.
=back
=cut
sub clusters_containing {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get access to functional coupling services.
require FC;
# Check for backward-compatibility mode.
my $backwardMode = 0;
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the features.
for my $id (@$ids) {
# Get this feature's cluster data.
my $cluster = &FC::in_co_occurrence_cluster($sapling, $id);
# Did we find something?
if ($cluster) {
# Get this feature's assignment.
my $func = scalar $sapling->Assignment($id);
# Create a hash of the clustered IDs.
my %members = map { $_ => $sapling->Assignment($_) } @$cluster;
# Store the result.
$retVal->{$id} = [$func, \%members];
}
}
# In backward-compatibility mode, convert the result to a list.
if ($backwardMode) {
# We'll create our result list in here.
my @outList;
# Loop through the IDs.
for my $id (@$ids) {
# Do we have something for this feature?
my $featureData = $retVal->{$id};
if (defined $featureData) {
# Get the pieces.
my ($func, $memberHash) = @$featureData;
# Convert the member hash to a list of 2-tuples.
my @memberList = map { [$_, $memberHash->{$_} ] } sort keys %$memberHash;
# Assemble the result.
push @outList, [$id, $func, \@memberList];
}
}
# Store the reformatted list.
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
=head3 co_occurrence_evidence
my $pairHash = $sapObject->co_occurrence_evidence({
-pairs => ["$fid1:$fid2", "$fid3:$fid4", ...]
});
For each specified pair of genes, this method returns the evidence that
the genes are functionally coupled (if any); that is, it returns a list
of the physically close homologs for the pair.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -pairs
Reference to a list of functionally-coupled pairs. Each pair is represented by two
FIG gene IDs, either in the form of a 2-tuple or as a string with the two gene IDs
separated by a colon.
=back
=item RETURN
Returns a hash mapping each incoming gene pair to a list of 2-tuples. Each 2-tuple
contains a pair of physically close genes, the first of which is similar to the first
gene in the input pair, and the second of which is similar to the second gene in the
input pair. The hash keys will consist of the two gene IDs separated by a colon (e.g.
C<fig|273035.4.peg.1016:fig|273035.4.peg.1018>).
$pairHash = { "$fid1:$fid2" => [[$fid1a, $fid2a], [$fid1b, $fid2b], ...],
"$fid3:$fid4" => [[$fid3a, $fid4a], [$fid3b, $fid4b], ...],
... };
=back
=cut
sub co_occurrence_evidence {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the Sapling database.
my $sap = $self->{db};
# Get access to the functional coupling services.
require FC;
# Get the list of pairs.
my $pairs = ServerThing::GetIdList(-pairs => $args);
# Loop through the pairs.
for my $pair (@$pairs) {
# Determine the IDs in this pair.
my ($peg1, $peg2);
if (ref $pair) {
($peg1, $peg2) = @$pair;
} else {
($peg1, $peg2) = split /:/, $pair;
}
# Get the evidence and store it in the return hash.
$retVal->{"$peg1:$peg2"} = FC::co_occurrence_evidence($sap, $peg1, $peg2);
}
# Return the result.
return $retVal;
}
=head3 conserved_in_neighborhood
my $featureHash = $sapObject->conserved_in_neighborhood({
-ids => [$fid1, $fid2, ...]
});
This method takes a list of feature IDs. For each feature ID, it will
return the set of other features to which it is functionally coupled,
along with the appropriate score.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=back
For backward compatibility, this method can also take as input a reference to
a list of FIG feature IDs.
=item RETURN
Returns a reference to a hash mapping each incoming feature ID to a list of
4-tuples, one 4-tuple for each feature coupled to the incoming feature. Each
4-tuple contains (0) the coupling score, (1) the FIG ID of the coupled feature,
(2) the coupled feature's current functional assignment, and (3) the ID of the
pair set to which the coupling belongs.
$featureHash = { $fid1 => [[$score1A, $fid1A, $function1A, $psID1A],
[$score1B, $fid1B, $function1B, $psID1B], ...],
$fid2 => [[$score2A, $fid2A, $function2A, $psID2A],
[$score2B, $fid2B, $function2B, $psID2B], ...],
... };
In backward compatibility mode, returns a list of sub-lists, each sub-list
corresponding to the value that would be found in the hash for the feature in the
specified position of the input list.
=back
=cut
sub conserved_in_neighborhood {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Get the functional coupling methods.
require FC;
# Declare the return variable.
my $retVal = {};
# Check for backward compatibility mode.
my $backwardMode = 0;
# Convert a list to a hash.
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the features.
for my $id (@$ids) {
# Create a sub-list for this feature.
my $group = [];
# Ask for the functional coupling information.
my @co_occurs = &FC::co_occurs($sapling, $id);
# Loop through the coupling data found.
for my $tuple (@co_occurs) {
# Get the coupled feature's data.
my($sc, $fid, $pairset) = @$tuple;
# Add it to the group of tuples for this feature's couplings.
push(@$group, [$sc, $fid, $sapling->Assignment($fid), $pairset]);
}
# Add this feature's couplings to the return value.
$retVal->{$id} = $group;
}
# If we're in backward-compatibility mode, convert the output to a list.
if ($backwardMode) {
my @outList = map { $retVal->{$_} } @$ids;
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
=head3 pairsets
my $psHash = $sapObject->pairsets({
-ids => [$psID1, $psID2, ...]
});
This method takes as input a list of functional-coupling pair set IDs
(such as those returned in the output of L</conserved_in_neighborhood>). For
each pair set, it returns the set's score (number of significant couplings) and
a list of the coupled pairs in the set.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of functional-coupling pair set IDs.
=back
For backward compatibility, you may also specify a reference to a list of pair
set IDs.
=item RETURN
Returns a reference to a hash that maps each incoming pair-set ID to a 2-tuple
that consists of (0) the set's score and (1) a reference to a list of 2-tuples
containing the pairs in the set.
$psHash = { $psID1 => [$score1, [[$fid1A, $fid1B],
[$fid1C, $fid1D], ...]],
$psID2 => [$score2, [[$fid2A, $fid2B],
[$fid2C, $fid2D], ...]],
... };
In backward-compatibility mode, returns a reference to a list of 2-tuples, each
consisting of (0) an incoming pair-set ID, and (1) the 2-tuple that would be its
hash value in the normal output.
=back
=cut
sub pairsets {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Get access to the functional coupling methods.
require FC;
# Declare the return variable.
my $retVal = {};
# Check for backward-compatability mode.
my $backwardMode = 0;
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Get the list of pairset IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the pairsets, producing output.
for my $id (@$ids) {
$retVal->{$id} = [&FC::co_occurrence_set($sapling, $id)];
}
# In backward-compatible mode, convert the output to a list.
if ($backwardMode) {
my @outList = map { [$_, $retVal->{$_}] } @$ids;
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
=head3 related_clusters
my $featureHash = $sapObject->related_clusters({
-ids => [$fid1, $fid2, ...]
});
This method returns the functional-coupling clusters related to the specified
input features. Each cluster contains features on a single genome that are
related by functional coupling.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of FIG feature IDs.
=back
=item RETURN
Returns a reference to a hash that maps each incoming feature ID to a list
of clusters. Each cluster in the list is a 3-tuple consisting of (0) the ID of a
feature similar to the incoming feature, (1) the similarity P-score, and (2) a
reference to a list of 2-tuples containing clustered features and their functional
assignments.
$featureHash = { $fid1 => [[$fid1A, $score1A, [[$fid1Ax, $function1Ax],
[$fid1Ay, $function1Ay],
...]],
[$fid1B, $score1B, [[$fid1Bx, $function1Bx],
[$fid1By, $function1By],
...]],
...],
$fid2 => [[$fid2A, $score2A, [[$fid2Ax, $function2Ax],
[$fid2Ay, $function2Ay],
...]],
[$fid2B, $score2B, [[$fid2Bx, $function2Bx],
[$fid2By, $function2By],
...]],
...],
... };
=back
=cut
sub related_clusters {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the functional coupling methods.
require FC;
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the features.
for my $id (@$ids) {
# Create the output list for this feature.
my $output = [];
# Loop through the related clusters.
for my $cluster (FC::largest_co_occurrence_clusters($sapling, $id)) {
# Get this cluster's data.
my ($fid, $sc, $other_fids) = @$cluster;
# Extract the functional roles of the other features in the cluster.
my $other_tuples = [ map { [$_, $sapling->Assignment($_)] } @$other_fids ];
# Assemble the result into the output list.
push @$output, [$fid, $sc, $other_tuples];
}
# Return this list of clusters.
$retVal->{$id} = $output;
}
# Return the result.
return $retVal;
}
=head2 Genome Data Methods
=head3 all_features
my $genomeHash = $sapObject->all_features({
-ids => [$genome1, $genome2, ...],
-type => [$type1, $type2, ...],
});
Return a list of the IDs for all features of a specified type in a specified
genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of genome IDs.
=item -type (optional)
Type of feature desired (e.g. C<peg>, C<rna>), or a reference to a list of
desired feature types. If omitted, all features regardless of type are returned.
=back
=item RETURN
Returns a reference to a hash that maps each incoming genome ID to a list
of the desired feature IDs for that genome. If a genome does not exist or has
no features of the desired type, its ID will map to an empty list.
$genomeHash = { $genome1 => [$fid1a, $fid1b, ...],
$genome2 => [$fid2a, $fid2b, ...],
... };
=back
=cut
sub all_features {
# Get the parameters.
my ($self, $args) = @_;
# Create the filter for the query.
my $filter = 'IsOwnerOf(from-link) = ?';
# The query may require additional parameters in addition to the genome ID.
# Those additional parameters will go in here.
my @parms;
# Are we filtering by type?
my $type = $args->{-type};
if (defined $type) {
# Yes. Add filtering by type. First, insure that we are dealing with a
# list of types.
if (ref $type ne 'ARRAY') {
$type = [$type];
}
# Now, form the list into an IN-type filter.
$filter .= " AND Feature(feature-type) IN (" . join(", ", map { "?" } @$type) . ")";
push @parms, @$type;
}
# Declare the return hash.
my %retVal;
# Get the list of genome IDs.
my $genomeIDs = ServerThing::GetIdList(-ids => $args);
# Loop through the genome IDs.
for my $genomeID (@$genomeIDs) {
# Execute the query.
Trace("Retrieving features for $genomeID.") if T(3);
my @fids = $self->{db}->GetFlat("IsOwnerOf Feature", $filter,
[$genomeID, @parms], 'Feature(id)');
# Put the result in the output hash.
$retVal{$genomeID} = \@fids;
}
# Return the hash of results.
return \%retVal;
}
=head3 all_genomes
my $genomeHash = $sapObject->all_genomes({
-complete => 1,
-prokaryotic => 1
});
Return a list of the IDs for all the genomes in the system.
=over 4
=item parameter
Reference to a hash containing the following keys.
=over 8
=item -complete (optional)
If TRUE, only complete genomes will be returned. The default is FALSE (return
all genomes).
=item -prokaryotic (optional)
If TRUE, only prokaryotic genomes will be returned. The default is FALSE (return
all genomes).
=back
=item RETURN
Returns a reference to a hash mapping genome IDs to genome names.
$genomeHash = { $genome1 => $name1, $genome2 => $name2, ... };
=back
=cut
sub all_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Fix the filter and parms according to the options.
my @filters;
my $parms = [];
if ($args->{-complete}) {
push @filters, "Genome(complete) = ?";
push @$parms, 1;
}
if ($args->{-prokaryotic}) {
push @filters, "Genome(prokaryotic) = ?";
push @$parms, 1;
}
my $filter = join(" AND ", @filters);
# Ask for the genome data
my %retVal = map { $_->[0] => $_->[1] }
$self->{db}->GetAll("Genome", $filter, $parms,
"Genome(id) Genome(scientific-name)");
# Return the result.
return \%retVal;
}
=head3 all_proteins
my $fidHash = $sapObject->all_proteins({
-id => $genome1
});
Return the protein sequences for all protein-encoding genes in the specified
genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -id
A single genome ID. All of the protein sequences for genes in the specified
genome will be extracted.
=back
=item RETURN
Returns a reference to a hash that maps the FIG ID of each protein-encoding
gene in the specified genome to its protein sequence.
$fidHash = { $fid1 => $protein1, $fid2 => $protein2, ... };
=back
=cut
sub all_proteins {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the target genome ID.
my $genome = $args->{-id};
Confess("No genome ID specified for all_proteins.") if ! defined $genome;
# Ask for all the proteins in this genome and put them in a hash.
my %retVal = map { $_->[0] => $_->[1] }
$sap->GetAll("Produces ProteinSequence",
'Produces(from-link) LIKE ?', ["fig|$genome.peg.%"],
[qw(Produces(from-link) ProteinSequence(sequence))]);
# Return the result.
return \%retVal;
}
=head3 close_genomes
my $genomeHash = $sapObject->close_genomes({
-ids => [$genome1, $genome2, ...],
-count => 10,
});
Find the genomes functionally close to the input genomes.
Functional closeness is determined by the number of FIGfams in common. As a result,
this method will not produce good results for genomes that do not have good FIGfam
coverage.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item ids
Reference to a list of genome IDs for the genomes whose close neighbors are
desired.
=item count (optional)
Maximum number of close genomes to return for each input genome. The default is
C<10>.
=back
=item RETURN
Returns a reference to a hash mapping each incoming genome ID to a list of
2-tuples. Each 2-tuple consists of (0) the ID of a close genome and (2) the
score (from 0 to 1) for the match. The list will be sorted from closest to
furthest.
=back
=cut
sub close_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Compute the count.
my $count = $args->{-count} || 10;
# Loop through the IDs, locating the close genomes.
for my $id (@$ids) {
# This hash will count the common features in related genomes.
my %genomes;
# This hash will count the FIGfams in the source genome.
my %figFams;
# Get the figfam data for this genome.
my $qh = $sap->Get("IsOwnerOf IsMemberOf HasMember",
'IsOwnerOf(from-link) = ?', [$id]);
while (my $resultRow = $qh->Fetch()) {
# Get this figfam and the related feature ID.
my $figFam = $resultRow->PrimaryValue('HasMember(from-link)');
my $fid = $resultRow->PrimaryValue('HasMember(to-link)');
# Compute the feature's genome.
my $genome = genome_of($fid);
# Count this genome.
$genomes{$genome}++;
# Count this FIGfam;
$figFams{$figFam} = 1;
}
# Sort the genomes and compute the scores.
my @genomes = sort { -($genomes{$a} <=> $genomes{$b}) } keys %genomes;
my $figFamCount = scalar keys %figFams;
# We'll put our results in here.
my @results;
for my $genome (@genomes) { last if scalar(@results) >= $count;
if ($genome ne $id) {
push @results, [$genome, $genomes{$genome} / $figFamCount ];
}
}
$retVal->{$id} = \@results;
}
# Return the result.
return $retVal;
}
=head3 contig_sequences
my $contigHash = $sapObject->contig_sequences({
-ids => [$contig1, $contig2, ...]
});
Return the DNA sequences for the specified contigs.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of contig IDs. Note that the contig ID contains the
genome ID as a prefix (e.g. C<100226.1:NC_003888>).
=back
=item RETURN
Returns a reference to a hash that maps each contig ID to its DNA sequence.
$contigHash = { $contig1 => $dna1, $contig2 => $dna2, ... };
=back
=cut
sub contig_sequences {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of contig IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, getting the DNA sequences and putting them in the
# return hash.
for my $id (@$ids) {
my @dna = $sap->GetFlat("Contig HasSection DNASequence",
'Contig(id) = ? ORDER BY DNASequence(id)', [$id],
'DNASequence(sequence)');
$retVal->{$id} = join("", @dna);
}
# Return the result.
return $retVal;
}
=head3 contig_lengths
my $contigHash = $sapObject->contig_lengths({
-ids => [$contig1, $contig2, ...]
});
Return the lengths for the specified contigs.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of contig IDs. Note that the contig ID contains the
genome ID as a prefix (e.g. C<100226.1:NC_003888>).
=back
=item RETURN
Returns a reference to a hash that maps each contig ID to its length in base
pairs.
$contigHash = { $contig1 => $len1, $contig2 => $len2, ... };
=back
=cut
sub contig_lengths {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of contig IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, getting conting lengths and putting them in the
# return hash.
for my $id (@$ids) {
my ($len) = $sap->GetFlat("Contig", 'Contig(id) = ?', [$id], 'length');
$retVal->{$id} = $len;
}
# Return the result.
return $retVal;
}
=head3 gene_correspondence_map
my $geneHash = $sapObject->gene_correspondence_map({
-genome1 => $genome1,
-genome2 => $genome2,
-fullOutput => 1,
-passive => 0
});
Return a map of genes in the specified second genome that correspond to genes in
the specified first genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -genome1
ID of the first genome of interest.
=item -genome2
ID of the second genome of interest.
=item -fullOutput (optional)
If C<1>, then instead of a simple hash map, a list of lists will be returned.
If C<2>, then the list will contain unidirectional correspondences from the target
back to the source as well as bidirectional corresopndences and unidirectional
correspondences from the source to the target. The default is C<0>, which returns
the hash map.
=item -passive (optional)
If TRUE, then an undefined value will be returned if no correspondence file
exists. If FALSE, a correspondence file will be created and cached on the server
if one does not already exist. This is an expensive operation, so set the flag
to TRUE if you are worried about performance. The default is FALSE.
=back
=item RETURN
This method will return an undefined value if either of the genome IDs is missing,
not found, or incomplete.
=over 8
=item Normal Output
Returns a hash that maps each gene in the first genome to a corresponding gene in
the second genome. The correspondence is determined by examining factors such as
functional role, conserved neighborhood, and similarity.
$geneHash = { $g1gene1 => $g2gene1, $g1gene2 => $g2gene2,
$g1gene3 => $g2gene3, ... };
=item Output with -fullOutput >= 1
Returns a reference to list of sub-lists. Each sub-list contains 18 data items, as
detailed in L<ServerThing/Gene Correspondence List>.
=back
=back
=cut
sub gene_correspondence_map {
# Get the parameters.
my ($self, $args) = @_;
# We'll put the results in here.
my $retVal;
# Get the two genome IDs.
my $genome1 = $args->{-genome1};
my $genome2 = $args->{-genome2};
if (! defined $genome1) {
Trace("-genome1 missing in gene_correspondence_map call.") if T(Corr => 1);
} elsif (! defined $genome2) {
Trace("-genome2 missing in gene_correspondence_map call.") if T(Corr => 1);
} else {
# We have genome IDs. Get the sapling database.
my $sap = $self->{db};
# Validate the genome IDs.
my %completeMap =
map { $_->[0] => $_->[1] } $sap->GetAll("Genome", "Genome(id) IN (?,?)",
[$genome1, $genome2], "id complete");
if (! $completeMap{$genome1}) {
Trace("Genome $genome1 not found or incomplete.") if T(Corr => 1);
} elsif (! $completeMap{$genome2}) {
Trace("Genome $genome2 not found or incomplete.") if T(Corr => 1);
} else {
# The genomes are both complete. Determine the output mode.
my $fullOutput = $args->{-fullOutput} || 0;
# Determine whether or not we're passive.
my $passive = $args->{-passive};
# This will hold the correspondence data.
my $corrList = ServerThing::GetCorrespondenceData($genome1, $genome2, $passive,
$fullOutput == 2);
# Do we have a result?
if (defined $corrList) {
# Check the output mode.
if ($fullOutput) {
# Full output is the correspondence list itself.
$retVal = $corrList;
} else {
# Normal output is a hash.
my %corrHash = map { $_->[0] => $_->[1] } @$corrList;
$retVal = \%corrHash;
}
} elsif (! $passive) {
# Here we couldn't find a file AND the user is NOT in passive mode. That
# indicates an error condition.
Confess("Could not generate corresopndences from $genome1 to $genome2.");
}
}
}
# Return the result.
return $retVal;
}
=head3 genome_contig_md5s
my $genomeHash = $sapObject->genome_contig_md5s({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome, return a hash mapping its contigs to their MD5 identifiers.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the genome IDs.
=back
=item RETURN
Returns a hash that maps each incoming genome ID to a sub-hash that maps its contig IDs
to their MD5 identifiers. The MD5 identifiers are computed directly from the contig
DNA sequences.
$genomeHash = { $genome1 => {$contig1a => $md5id1a, $contig1b => $md5id1b, ... },
$genome2 => {$contig2a => $md5id2a, $contig2b => $md5id2b, ... },
... };
=back
=cut
sub genome_contig_md5s {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, asking for contigs.
for my $id (@$ids) {
my @contigs = $sap->GetAll("IsMadeUpOf Contig", "IsMadeUpOf(from-link) = ?", [$id],
['to-link', 'Contig(md5-identifier)']);
# If we found contigs for this genome, store them in the return hash. We must
# convert the list of 2-tuples to a hash.
if (@contigs) {
$retVal->{$id} = { map { $_->[0] => $_->[1] } @contigs };
}
}
# Return the result.
return $retVal;
}
=head3 genome_contigs
my $genomeHash = $sapObject->genome_contigs({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome, return a list of its contigs.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the genome IDs.
=back
=item RETURN
Returns a hash that maps each incoming genome ID to a list of its contig IDs.
$genomeHash = { $genome1 => [$contig1a, $contig1b, ...],
$genome2 => [$contig2a, $contig2b, ...],
... };
=back
=cut
sub genome_contigs {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, asking for contigs.
for my $id (@$ids) {
my @contigs = $sap->GetFlat("IsMadeUpOf", "IsMadeUpOf(from-link) = ?", [$id],
'to-link');
# If we found contigs for this genome, store them in the return hash.
if (@contigs) {
$retVal->{$id} = \@contigs;
}
}
# Return the result.
return $retVal;
}
=head3 genome_data
my $genomeHash = $sapObject->genome_data({
-ids => [$genome1, $genome2, ...],
-data => [$fieldA, $fieldB, ...]
});
Return the specified data items for the specified genomes.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of genome IDs.
=item -data
Reference to a list of data field names. The possible data field names are
given below.
=over 12
=item complete
C<1> if the genome is more or less complete, else C<0>.
=item contigs
The number of contigs for the genome
=item dna-size
The number of base pairs in the genome
=item domain
The domain of the genome (Archaea, Bacteria, ...).
=item gc-content
The amount of GC base pairs in the genome, expressed as a percentage of the
genome's DNA.
=item genetic-code
The genetic code used by this genome.
=item pegs
The number of protein encoding genes in the genome.
=item rnas
The number of RNAs in the genome.
=item name
The scientific name of the genome.
=item taxonomy
The genome's full taxonomy as a comma-separated string.
=item md5
The MD5 identifier computed from the genome's DNA sequences.
=back
=back
=item RETURN
Returns a hash mapping each incoming genome ID to an n-tuple. Each tuple
will contain the specified data fields for the computed gene in the specified
order.
$genomeHash = { $id1 => [$data1A, $data1B, ...],
$id2 => [$data2A, $data2B, ...],
... };
=back
=cut
use constant GENOME_FIELDS => { complete => 'complete',
contigs => 'contigs',
'dna-size' => 'dna-size',
domain => 'domain',
'gc-content' => 'gc-content',
'genetic-code' => 'genetic-code',
pegs => 'pegs',
rnas => 'rnas',
name => 'scientific-name',
md5 => 'md5-identifier'};
use constant SPECIAL_FIELDS => { taxonomy => 1 };
sub genome_data {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of fields and perform a basic validation so we know we have a
# list of data items.
my $fields = $args->{-data};
Confess("No data fields specified in \"genome_data\".") if ! defined $fields;
Confess("Invalid data field list in \"genome_data\".") if ref $fields ne 'ARRAY';
# There are two types of fields: GENOME_FIELDS contains the ones that
# are actual fields in the genome record; SPECIAL_FIELDS are fields that
# require special queries. The following hash maps each normal field's
# database name to its output position.
my %fieldNames;
# This one maps each special field to its output position.
my %otherFields;
# Analyze the data field list and populate the field name hashes.
for (my $i = 0; $i < @$fields; $i++) {
# Get the current field's external name.
my $field = $fields->[$i];
# Check to see if this is a normal field.
my $fieldName = GENOME_FIELDS->{$field};
if (defined $fieldName) {
# Yes. Store its database name in the field name map.
$fieldNames{$fieldName} = $i;
} else {
# Check to see if this is a special field.
my $found = SPECIAL_FIELDS->{$field};
if ($found) {
# Yes. Store it in the special name map.
$otherFields{$field} = $i;
} else {
# No. It's a bad field.
Confess("Invalid data field name \"$field\" in \"genome_data\".");
}
}
}
# Compute the list of normal field names.
my @fieldNames = keys %fieldNames;
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the genomes.
for my $id (@$ids) {
# Get the normal data for this genome.
my @tuple = $sap->GetEntityValues(Genome => $id, \@fieldNames);
# Fill in the results.
my @result;
for (my $i = 0; $i < @fieldNames; $i++) {
my $fieldName = $fieldNames[$i];
$result[$fieldNames{$fieldName}] = $tuple[$i];
}
# Now run through and process the special fields.
for my $otherField (keys %otherFields) {
# Compute this field's value.
if ($otherField eq 'taxonomy') {
my @taxonomy = $sap->Taxonomy($id, 'names');
$result[$otherFields{$otherField}] = join(",", @taxonomy);
}
}
# Store the results.
$retVal->{$id} = \@result;
}
# Return the result.
return $retVal;
}
=head3 genome_domain
my $genomeHash = $sapObject->genome_domain({
-ids => [$genome1, $genome2, ...]
});
Return the domain for each specified genome (e.g. C<Archaea>, C<Bacteria>, C<Plasmid>).
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the genome IDs.
=back
=item RETURN
Returnss a hash that maps each incoming genome ID to its taxonomic domain.
=back
=cut
sub genome_domain {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs.
for my $id (@$ids) {
# Get the domain for this genome.
my ($domain) = $sap->GetFlat('Genome', 'Genome(id) = ?', [$id], 'domain');
# If we found it, store it in the return hash.
if (defined $domain) {
$retVal->{$id} = $domain;
}
}
# Return the result.
return $retVal;
}
=head3 genome_fid_md5s
my $genomeHash = $sapObject->genome_fid_md5s({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome, return a hash mapping its genes to their MD5 identifiers.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the genome IDs.
=back
=item RETURN
Returns a hash that maps each incoming genome ID to a sub-hash that maps its FIG feature IDs
to their MD5 identifiers. The MD5 identifiers are computed from the genome's MD5 identifier
and the gene's location in the genome.
$genomeHash = { $genome1 => {$fid1a => $md5id1a, $fid1b => $md5id1b, ... },
$genome2 => {$fid2a => $md5id2a, $fid2b => $md5id2b, ... },
... };
=back
=cut
sub genome_fid_md5s {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, asking for contigs.
for my $id (@$ids) {
my @fids = $sap->GetAll("IsIdentifiedBy Identifier",
"IsIdentifiedBy(from-link) LIKE ? AND IsIdentifiedBy(to-link) LIKE ?",
["fig|$id.%", "md5g|%"],
['from-link', 'Identifier(natural-form)']);
# If we found contigs for this genome, store them in the return hash. We must
# convert the list of 2-tuples to a hash.
if (@fids) {
$retVal->{$id} = { map { $_->[0] => $_->[1] } @fids };
}
}
# Return the result.
return $retVal;
}
=head3 genome_ids
my $genomeHash = $sapObject->genome_ids({
-names => [$name1, $name2, ...],
-taxons => [$tax1, $tax2, ...]
});
Find the specific genome ID for each specified genome name or taxonomic number.
This method helps to find the correct version of a given genome when only the
species and strain are known.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -names (optional)
Reference to a list of genome scientific names, including genus, species, and
strain (e.g. C<Streptomyces coelicolor A3(2)>). A genome ID will be found (if any)
for each specified name.
=item taxons (optional)
Reference to a list of genome taxonomic numbers. These are essentially genome IDs
without an associated version number (e.g. C<100226>). A specific matching
genome ID will be found; the one chosen will be the one with the highest version
number that is not a plasmid.
=back
=item RETURN
Returns a hash mapping each incoming name or taxonomic number to the corresponding
genome ID.
$genomeHash = { $name1 => $genome1, $name2 => $genome2, ...
$tax1 => $genome3, $tax2 => $genome4, ... };
=back
=cut
sub genome_ids {
# Get the parameters.
my ($self, $args) = @_;
# We'll put our results in here.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of names.
my $names = ServerThing::GetIdList(-names => $args, 1);
# Loop through the names.
for my $name (@$names) {
# Find a genome ID for this name.
my ($genome) = $sap->GetFlat('Genome', 'Genome(scientific-name) = ?', [$name],
'id');
# If we found one, store it in the return hash.
if ($genome) {
$retVal->{$name} = $genome;
}
}
# Get the list of taxonomic IDs.
my $taxons = ServerThing::GetIdList(-taxons => $args, 1);
# Loop through the taxons.
for my $taxon (@$taxons) {
# Find the genome IDs for this taxon. Note we exclude plasmids.
my (@genomes) = $sap->GetFlat('Genome',
'Genome(id) LIKE ? AND Genome(domain) <> ?',
["$taxon.%", 'Plasmid'], 'id');
# Only proceed if we found something.
if (@genomes) {
# Find the highest version number.
my @sorted = sort { $a <=> $b } map { $_ =~ /\d+\.(.+)/; $1 } @genomes;
# Use it to form the result.
$retVal->{$taxon} = "$taxon.$sorted[$#sorted]";
}
}
# Return all the results found.
return $retVal;
}
=head3 genome_metrics
my $genomeHash = $sapObject->genome_metrics({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome ID, returns the number of contigs, the total
number of base pairs in the genome's DNA, and the genome's default genetic
code.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of genome IDs.
=back
=item RETURN
Returns a hash mapping each incoming genome ID to a 3-tuple consisting of (0)
the number of contigs, (1) the total DNA size, and (2) the genome's default
genetic code.
$genomeHash = { $genome1 => [$contigCount1, $baseCount1, $geneticCode1],
$genome2 => [$contigCount2, $baseCount2, $geneticCode2],
... };
=back
=cut
sub genome_metrics {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, getting the desired values.
for my $id (@$ids) {
my ($contigs, $dnaSize, $code) = $sap->GetEntityValues(Genome => $id,
[qw(contigs
dna-size
genetic-code)]);
# Only proceed if we found this genome.
if (defined $contigs) {
$retVal->{$id} = [$contigs, $dnaSize, $code];
}
}
# Return the result.
return $retVal;
}
=head3 genome_names
my $idHash = $sapObject->genome_names({
-ids => [$id1, $id2, ...],
-numbers => 1
});
Return the name of the genome containing each specified feature or genome.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of identifiers. Each identifier can be a prefixed feature ID
(e.g. C<fig|100226.1.peg.3361>, C<uni|P0AC98>) or a genome ID (C<83333.1>,
C<360108.3>).
=item -numbers (optional)
If TRUE, the genome ID number will be returned instead of the name. Note that
this facility is only useful when the incoming identifiers are feature IDs,
as genome IDs would be mapped to themselves.
=back
=item RETURN
Returns a reference to a hash mapping each incoming feature ID to the scientific
name of its parent genome. If an ID refers to more than one real feature, only
the first feature's genome is returned.
$idHash = { $id1 => $genomeName1, $id2 => $genomeName2, ... };
=back
=cut
sub genome_names {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Compute the output type (genome ID vs. name).
my $outField = ($args->{-numbers} ? 'id' : 'scientific-name');
# Get the sapling database object.
my $sap = $self->{db};
# Get the list of identifiers.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through them.
for my $id (@$ids) {
# The name we find will be put in here.
my $name;
# Is this a genome ID?
if ($id =~ /^\d+\.\d+$/) {
# Yes. Query the desired field from the genome record. Note that
# if we are in number mode, this process will return the incoming
# ID if the genome exists and an undefined value otherwise, which
# is a meaningful result.
($name) = $sap->GetFlat("Genome", "Genome(id) = ?", [$id],
"Genome($outField)");
} else {
# This is a feature identifier.
($name) = $sap->GetFlat("Identifies IsOwnedBy Genome",
'Identifies(from-link) = ?', [$id],
"Genome($outField)");
}
# If we found a result, store it in the return hash.
if (defined $name) {
$retVal->{$id} = $name;
}
}
# Return the result.
return $retVal;
}
=head3 genomes_by_md5
my $md5Hash = $sapObject->genomes_by_md5({
-ids => [$md5id1, $md5id2, ...],
-names => 1
});
Find the genomes associated with each specified MD5 genome identifier. The MD5
genome identifier is computed from the DNA sequences of the genome's contigs; as
a result, two genomes with identical sequences arranged in identical contigs
will have the same MD5 identifier even if they have different genome IDs.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of MD5 genome identifiers.
=item -names (optional)
If TRUE, then both genome IDs and their associated names will be returned;
otherwise, only the genome IDs will be returned. The default is FALSE.
=back
=item RETURN
Returns a reference to a hash keyed by incoming MD5 identifier. Each identifier
maps to a list of genomes. If C<-names> is FALSE, then the list is of genome IDs;
if C<-names> is TRUE, then the list is of 2-tuples, each consisting of (0) a genome
ID and (1) the associated genome's scientific name.
=over 8
=item if C<-names> = TRUE
$md5Hash = { $md5id1 => [[$genome1a, $name1a], [$genome1b, $name1b], ...],
$md5id2 => [[$genome2a, $name2a], [$genome2b, $name2b], ...],
... };
=item if C<-names> = FALSE
$md5Hash = { $md5id1 => [$genome1a, $genome1b, ...],
$md5id2 => [$genome2a, $genome2b, ...],
... };
=back
=back
=cut
sub genomes_by_md5 {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the list of incoming IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Determine whether or not we want the scientific names.
my $namesFlag = $args->{-names} || 0;
# Compute the field list depending on whether or not we want the scientific
# names.
my @fields = 'id';
if ($namesFlag) {
push @fields, 'scientific-name';
}
# Loop through the incoming MD5 ids.
for my $id (@$ids) {
# Get the genomes for this ID.
my @rows = $sap->GetAll("Genome", 'Genome(md5-identifier) = ?', [$id], \@fields);
# Store the results depending on the mode.
if ($namesFlag) {
# When we're asking for names, this is easy, because the output is in
# exactly the form we want.
$retVal->{$id} = \@rows
} else {
# When we only want IDs, we have to dereference the sub-lists so that
# we have a list of strings instead of a list of 1-tuples.
$retVal->{$id} = [map { $_->[0] } @rows];
}
}
# Return the results.
return $retVal;
}
=head3 intergenic_regions
my $locList = $sapObject->intergenic_regions({
-genome => $genome1,
-type => ['peg', 'rna']
});
Return a list of L</Location Strings> for the regions in the specified genome that are
not occupied by genes of the specified types. All of these will be construed to be on
the forward strand, and sorted by contig ID and start location within contig.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -genome
ID of the genome whose intergenic regions are to be returned.
=item -type (optional)
Reference to a list of gene types. Only genes of the specified type will be considered
to be occupying space on the contigs. Typically, this parameter will either be C<peg>
or a list consisting of C<peg> and C<rna>. The default is to allow all gene types,
but this will not generally produce a good result.
=back
=item RETURN
Returns a reference to a list of location strings, indicating the intergenic region
locations for the genome.
$locList = [$loc1, $loc2, ...]
=back
=cut
sub intergenic_regions {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# The regions found will be stored in here.
my @retVal;
# Get the genome ID.
my $genome = $args->{-genome};
Confess("No genome specified for intergenic_regions.") if ! defined $genome;
# Get the list of gene types. If none are specified, we'll get an empty list.
my $types = ServerThing::GetIdList(-type => $args, 1);
# We need to create the filter for the feature lookup. This list will contain
# all of the parameters except the contig ID.
my @filterParms;
# Each parameter will be a feature ID pattern that only matches genes of the
# desired type for our genome.
for my $type (@$types) {
push @filterParms, "fig|$genome.$type.%";
}
# Start the filter string with the contig ID.
my $filterString = "IsLocusFor(from-link) = ?";
if (scalar @filterParms) {
# Here we have additional filtering by feature type.
my @clauses;
for my $filterParm (@filterParms) {
push @clauses, "IsLocusFor(to-link) LIKE ?"
}
$filterString .= " AND (" . join(" OR ", @clauses) . ")";
}
# Finish off with an ordering.
$filterString .= " ORDER BY IsLocusFor(from-link), IsLocusFor(begin)";
# Now we have everything we need to create queries for the occupied regions of
# a contig. The next step is to get the contigs.
my @contigs = $sap->GetAll("IsMadeUpOf Contig", "IsMadeUpOf(from-link) = ?",
[$genome], 'Contig(id) Contig(length)');
# Loop through the contigs.
for my $contigData (@contigs) {
# Get the contig ID and length.
my ($contig, $contigLen) = @$contigData;
# Denote that our current position on the contig is the first base pair.
my $loc = 1;
# Create a query to get all the occupied regions of the contig.
my $query = $sap->Get("IsLocusFor", $filterString, [$contig, @filterParms]);
# Loop through the results.
while (my $region = $query->Fetch()) {
# Get the start and length of this occupied region.
my $begin = $region->PrimaryValue('begin');
my $len = $region->PrimaryValue('len');
# Is there an intergenic region before the start of this new area?
if ($begin > $loc) {
# Yes, write it out.
my $regionLen = $begin - $loc;
push @retVal, $contig . "_$loc+$regionLen";
}
# Record the end of this region as the last occupied position, if it's past
# the current limit.
my $regionLast = $begin + $len;
$loc = $regionLast if $loc < $regionLast;
}
# Check for a residual at the end of the contig.
if ($contigLen > $loc) {
my $regionLen = $contigLen - $loc;
push @retVal, $contig . "_$loc+$regionLen";
}
}
# Return the list of locations found.
return \@retVal;
}
=head3 is_prokaryotic
my $genomeHash = $sapObject->is_prokaryotic({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome ID, returns 1 if it is prokaryotic and 0
otherwise.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the relevant genome IDs.
=back
=item RETURN
Returns a reference to a hash that maps each incoming genome ID to C<1> if it is
a prokaryotic genome and C<0> otherwise.
$genomeHash = { $genome1 => $flag1, $genome2 => $flag2, ... };
=back
=cut
sub is_prokaryotic {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the incoming IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the IDs, pulling the prokaryotic flag.
for my $id (@$ids) {
my ($flag) = $sap->GetFlat("Genome", "Genome(id) = ?", [$id], 'Genome(prokaryotic)');
$retVal->{$id} = $flag;
}
# Return the result.
return $retVal;
}
=head3 mapped_genomes
my $genomeHash = $sapObject->mapped_genomes({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome, return a list of the genomes that have an existing
gene correspondence map (see L<ServerThing/Gene Correspondence List>). Gene
correspondence maps indicate which genes in the target genome are the best hit
of each gene in the source genome. If a correspondence map does not yet exist,
it will be created when you ask for it, but this is an expensive process and it
is sometimes useful to find an alternate genome that will give you a faster
result.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the IDs for the genomes of interest. A (possibly
empty) result list will be returned for each one.
=back
=item RETURN
Returns a reference to a hash mapping each incoming genome ID to a list of
the IDs for the genomes which have existing correspondence maps on the
server.
$genomeHash = { $genome1 => [$genome1a, $genome1b, ...],
$genome2 => [$genome2a, $genome2b, ...],
... };
=back
=cut
sub mapped_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get a hash of complete genomes. This is used to filter old, obsolete
# genome IDs out of the output and to determine which directories we want
# to examine.
my $genomeHash = $self->all_genomes({ -complete => 1 });
# Get the list of incoming genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the list.
for my $id (@$ids) {
# Get all of the genomes in this genome's correspondence map directory.
my $orgDir = ServerThing::ComputeCorrespondenceDirectory($id);
my @files = grep { exists $genomeHash->{$_} } OpenDir($orgDir, 0, 1);
# Put them in the result list.
$retVal->{$id} = \@files;
# The correspondence maps are reversible, so we only keep half of them.
# Our next task, then, is to find the converse correspondence files in
# the directories of other genomes. The other genomes will be in directories
# for genomes that satisfy the "must-flip" criterion when placed next to
# this one.
for my $otherID (keys %$genomeHash) {
if (ServerThing::MustFlipGenomeIDs($id, $otherID)) {
# Here we have an ID for a corresponding genome that will have us
# in its directory instead of being in our directory.
my ($fileName) = ServerThing::ComputeCorrespondenceFileName($id, $otherID);
if (-f $fileName) {
push @{$retVal->{$id}}, $otherID;
}
}
}
}
# Return the result hash.
return $retVal;
}
=head3 otu_members
my $genomeHash = $sapObject->otu_members({
-ids => [$genome1, $genome2, ...]
});
For each incoming genome, return the name and ID of each other genome in the same
OTU.
=over 4
=item parameter
The parameter shoudl be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the IDs for the genomes of interest.
=back
=item RETURN
Returns a reference to a hash mapping each incoming genome ID to a sub-hash.
The sub-hash is keyed by genome ID, and maps the ID of each genome in the same
OTU to its name.
$genomeHash = { $genome1 => { $genome1a => $name1a, $genome1b => $name1b, ... },
$genome2 => { $genome2a => $name2a, $genome2b => $name2b, ... },
... };
=back
=cut
sub otu_members {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the genomes.
for my $genomeID (@$ids) {
# Get the related genomes.
my %neighbors = map { $_->[0] => $_->[1] }
$sap->GetAll("IsCollectedInto IsCollectionOf Genome",
'IsCollectedInto(from-link) = ? AND Genome(id) <> ?',
[$genomeID,$genomeID], [qw(Genome(id) Genome(scientific-name))]);
# Store them in the return hash.
$retVal->{$genomeID} = \%neighbors;
}
# Return the result.
return $retVal;
}
=head3 representative
my $genomeHash = $sapObject->representative({
-ids => [$genome1, $genome2, ...]
});
Return the representative genome for each specified incoming genome ID.
Genomes with the same representative are considered closely related, while
genomes with a different representative would be considered different
enough that similarities between them have evolutionary significance.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the IDs for the genomes of interest.
=back
=item RETURN
Returns a reference to a hash mapping each incoming genome ID to the ID of
its representative genome.
$genomeHash = { $genome1 => $genome1R, $genome2 => $genome2R, ... };
=back
=cut
sub representative {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database object.
my $sap = $self->{db};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the genomes.
for my $genomeID (@$ids) {
# Get this genome's representative.
my ($representative) = $sap->GetFlat("IsCollectedInto IsCollectionOf",
'IsCollectedInto(from-link) = ? AND IsCollectionOf(representative) = ?',
[$genomeID,'1'],
'IsCollectionOf(to-link)');
# Only proceed if we found one. If we didn't, it means the genome ID
# is invalid.
if ($representative) {
$retVal->{$genomeID} = $representative;
}
}
# Return the result.
return $retVal;
}
=head3 representative_genomes
my $mappings = $sapObject->representative_genomes();
Compute mappings for the genome sets (OTUs) in the database. This method will
return a mapping from each genome to its genome set ID and from each
genome set ID to a list of the genomes in the set. For the second
mapping, the first genome in the set will be the representative.
This method does not require any parameters.
=over 4
=item RETURN
Returns a reference to a 2-tuple. The first element is a reference to a hash
mapping genome IDs to genome set IDs; the second element is a reference to a hash
mapping each genome set ID to a list of the genomes in the set. The first genome
in each of these lists will be the set's representative.
$mappings = [ { $genome1 => $set1, $genome2 => $set2, ... },
{ $set1 => [$genome1R, $genome1a, $genome1b, ...],
$set2 => [$genome2R, $genome2a, $genome2b, ...],
... }
];
=back
=cut
sub representative_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variables.
my %genomes_to_otus;
my %otus_to_genomes;
# Get the sapling database.
my $sap = $self->{db};
# Read in the genome sets. The ordering insures that for each set, we see the
# representative genome first. This means we don't need to do any fancy
# checking when we build the list of genomes in each set.
my @setData = $sap->GetAll("IsCollectionOf Genome",
'ORDER BY IsCollectionOf(from-link), IsCollectionOf(representative) DESC, IsCollectionOf(to-link)',
[], [qw(from-link to-link)]);
# Loop through the set data returned.
for my $setDatum (@setData) {
# Get the genome data.
my ($setID, $genome) = @$setDatum;
# Add this genome to the two hashes.
push @{$otus_to_genomes{$setID}}, $genome;
$genomes_to_otus{$genome} = $setID;
}
# Return the result.
return [\%genomes_to_otus, \%otus_to_genomes];
}
=head3 submit_gene_correspondence
my $statusCode = $sapObject->submit_gene_correspondence({
-genome1 => $genome1,
-genome2 => $genome2,
-correspondences => $corrList,
-passive => 1
});
Submit a set of gene correspondences to be stored on the server.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -genome1
ID of the source genome for the correspondence.
=item -genome2
ID of the target genome for the correspondence.
=item -correspondences
Reference to a list of lists containing the correspondence data
(see L<ServerThing/Gene Correspondence List>).
=item -passive (optional)
If TRUE, then the file will not be stored if one already exists. If FALSE, an
existing correspondence file will be overwritten. The default is FALSE.
=back
=item RETURN
Returns TRUE (C<1>) if the correspondences were successfully stored, FALSE
(C<0>) if they were rejected or an error occurred.
=back
=cut
sub submit_gene_correspondence {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable. We assume an error unless successful.
my $retVal = 0;
# Get the two genome IDs.
my $genome1 = $args->{-genome1};
if (! defined $genome1) {
Confess("-genome1 missing in submit_gene_correspondence call.");
}
my $genome2 = $args->{-genome2};
if (! defined $genome2) {
Confess("-genome2 missing in submit_gene_correspondence call.");
}
# Get the correspondence list.
my $corrList = $args->{-correspondences};
if (! defined $corrList) {
Confess("-correspondences missing in submit_gene_correspondence call.");
}
# Compute the name for the genome correspondence file and find out if the
# genome IDs are in the right order.
my ($fileName, $genomeA, $genomeB) = ServerThing::ComputeCorrespondenceFileName($genome1, $genome2);
my $converse = ($genomeA ne $genome1);
# Determine if we're active or passive. If we're passive and the file already
# exists, we simply return success and quit.
my ($existingFileName) = ServerThing::CheckForGeneCorrespondenceFile($genome1, $genome2);
if ($args->{-passive} && $existingFileName) {
$retVal = 1;
Trace("Correspondence for $genome1 to $genome2 already exists. Skipped in passive mode.") if T(Corr => 3);
} else {
# Insure the correspondence list is valid.
if (ref $corrList ne 'ARRAY') {
Trace("Invalid correspondence list in submit_gene_correspondence for $genome1 to $genome2: not an array.") if T(Corr => 0);
} else {
# Loop through the list, checking for errors.
my $errorCount = 0;
for (my $i = 0; $i < scalar(@$corrList); $i++) { last if $errorCount > 0;
my $row = $corrList->[$i];
if (ref $row ne 'ARRAY') {
Trace("Invalid correspondence list in submit_gene_correspondence for $genome1 to $genome2: row $i is not an array.") if T(Corr => 0);
$errorCount++;
} else {
$errorCount += ServerThing::ValidateGeneCorrespondenceRow($row);
if ($errorCount) {
Trace("Invalid correspondence list in submit_gene_correspondence for $genome1 to $genome2: row $i has errors.") if T(Corr => 0);
} elsif ($converse) {
# Here we have to flip the row to get it in the right order.
ServerThing::ReverseGeneCorrespondenceRow($row);
}
}
}
if (! $errorCount) {
# Now we need to verify the genome IDs.
my $sap = $self->{db};
for my $genome ($genome1, $genome2) {
my ($complete) = $sap->GetFlat('Genome', 'Genome(id) = ?', [$genome],
'complete');
if (! $complete) {
Trace("$genome missing or incomplete. Cannot store correspondence file.") if T(Corr => 0);
$errorCount++;
}
}
if (! $errorCount) {
# Now we know we can store the correspondence data. Try to open a temporary
# file to hold the data,
my $tempFileName = "$fileName.$$.tmp";
my $oh;
if (! open($oh, ">$tempFileName")) {
Trace("Could not open correspondence temp file: $!") if T(Corr => 0);
} else {
# Store the data in the file.
for my $row (@$corrList) {
print $oh join("\t", @$row) . "\n";
}
# Close the temporary file.
if (! close $oh) {
Trace("Error closing $tempFileName. Correspondence store aborted.") if T(Corr => 0);
} else {
# Try to rename it.
if (rename $tempFileName, $fileName) {
# It worked! Fix the permissions and denote success.
chmod 0664, $fileName;
$retVal = 1;
} else {
Trace("Error renaming $tempFileName to $fileName. Correspondence store aborted.") if T(Corr => 0);
}
}
}
# Insure the temporary file is deleted.
if (-f $tempFileName) {
unlink $tempFileName;
}
}
}
}
}
# Return the success indicator.
return $retVal;
}
=head3 taxonomy_of
my $genomeHash = $sapObject->taxonomy_of({
-ids => [$genome1, $genome2, ...],
-format => 'numbers'
});
Return the taxonomy of each specified genome. The taxonomy will start at
the domain level and moving down to the node where the genome is
attached.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of genome IDs. A taxonomy will be generated for each
specified genome.
=item -format (optional)
Format for the elements of the taxonomy string. If C<numbers>, then each
taxonomy element will be represented by its number; if C<names>, then each
taxonomy element will be represented by its primary name; if C<both>, then
each taxonomy element will be represented by a number followed by the name.
The default is C<names>.
=back
=item RETURN
Returns a reference to a hash mapping incoming genome IDs to taxonomies.
Each taxonomy will be a list of strings, starting from the domain and
ending with the genome.
=over 8
=item Normal Output
$genomeHash = { $genome1 => [$name1a, $name1b, ...],
$genome2 => [$name2a, $name2b, ...],
... };
=item Output if -format = numbers
$genomeHash = { $genome1 => [$num1a, $num1b, ...],
$genome2 => [$num2a, $num2b, ...],
... };
=item Output if =format = both
$genomeHash = { $genome1 => ["$num1a $name1a", "$num1b $name1b", ...],
$genome2 => ["$num2a $name2a", "$num2b $name2b", ...],
... };
=back
=back
=cut
sub taxonomy_of {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database object.
my $sap = $self->{db};
# Determine the format.
my $format = $args->{-format} || 'names';
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the genomes.
for my $genomeID (@$ids) {
# Get this genome's taxonomy and put it in the return hash.
$retVal->{$genomeID} = [ $sap->Taxonomy($genomeID, $format) ];
}
# Return the result.
return $retVal;
}
=head2 Scenario Data Methods
=head3 scenario_names
my $scenarioHash = $sapObject->scenario_names({
-subsystem => $subsys1
});
Return the names of all the scenarios for the specified subsystem. Each scenario
has an internal ID number and a common name. This method returns both.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -subsystem
Name of the subsystem whose scenarios are desired.
=back
=item RETURN
Returns a hash mapping the ID numbers of the subsystem's scenarios to their
common names.
$scenarioHash = { $id1 => $name1, $id2 => $name2, ... };
=back
=cut
sub scenario_names {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my %retVal;
# Get the sapling database.
my $sap = $self->{db};
# Get the ID of the target subsystem.
my $subsystem = $args->{-subsystem};
if (! defined $subsystem) {
Confess("No -subsystem specified for scenario_names.");
} else {
# Retrieve the scenario names and put them into the result hash.
%retVal = map { $_->[0] => $_->[1] }
$sap->GetAll("Subsystem IsSubInstanceOf Scenario",
'Subsystem(id) = ?', [$subsystem], [qw(Scenario(id)
Scenario(common-name))]);
}
# Return the result hash.
return \%retVal;
}
=head2 Subsystem Data Methods
=head3 all_subsystems
my $subsysHash = $sapObject->all_subsystems({
-usable => 1,
-exclude => [$type1, $type2, ...],
-aux => 1
});
Return a list of all subsystems in the system. For each subsystem, this
method will return the ID, curator, the classifications, and roles.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys, all of
which are optional. Because all of the keys are optional, it is permissible to
pass an empty hash or no parameters at all.
=over 8
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=item -aux (optional)
If TRUE, then auxiliary roles will be included in the output. The default is
FALSE, meaning they will be excluded.
=back
=item RETURN
Returns a hash mapping each subsystem ID to a 3-tuple consisting of (0) the name of the
curator, (1) a reference to a list of the subsystem classifications, and (2) a reference
to a list of the subsystem's roles.
$subsysHash = { $sub1 => [$curator1, [$class1a, $class1b, ...], [$role1a, $role1b, ...]],
$sub2 => [$curator2, [$class2a, $class2b, ...], [$role2a, $role2b, ...]],
... };
=back
=cut
sub all_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Get the spaling database.
my $sapling = $self->{db};
# Declare the return variable.
my $retVal = {};
# Compute the filter based on the parameters.
my $filter = "";
ServerThing::AddSubsystemFilter(\$filter, $args, 1);
# Create a hash for walking up the subsystem class hierarchy.
my %classMap = map { $_->[0] => $_->[1] } $sapling->GetAll("IsSubclassOf",
"", [],
[qw(from-link to-link)]);
# Read the subsystem role data from the database.
my @roleData = $sapling->GetAll("Subsystem Includes Role AND Subsystem IsInClass SubsystemClass",
$filter, [],
[qw(Subsystem(id) Subsystem(curator)
SubsystemClass(id) Role(id))]);
# Loop through the subsystems, building the result hash.
for my $roleDatum (@roleData) {
my ($subsystem, $curator, $class, $role) = @$roleDatum;
# Is this subsystem new?
if (! exists $retVal->{$subsystem}) {
# Yes. Get its classification data. We trace the classifications from
# the bottom up, so new ones are shifted onto the front.
my @classes;
while ($class) {
unshift @classes, $class;
$class = $classMap{$class};
}
# Create its hash entry.
$retVal->{$subsystem} = [$curator, \@classes, []];
}
# Now we know an entry exists for this subsystem. Push this role onto it.
push @{$retVal->{$subsystem}[2]}, $role;
}
# Return the result.
return $retVal;
}
=head3 classification_of
my $subsysHash = $sapObject->classification_of({
-ids => [$sub1, $sub2, ...]
});
Return the classification for each specified subsystem.
=over 4
=item parameter
Reference to a hash of parameters with the following possible keys.
=over 8
=item -ids
Reference to a list of subsystem IDs.
=back
=item RETURN
Returns a hash mapping each incoming subsystem ID to a list reference. Each
list contains the classification names in order from the largest classification to
the most detailed.
$subsysHash = { $sub1 => [$class1a, $class1b, ...],
$sub2 => [$class2a, $class2b, ...],
... };
=back
=cut
sub classification_of {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of subsystem IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the subsystem IDs, getting the classification data.
for my $id (@$ids) {
# We'll build the classification list in here.
my @classes;
# Normalize the ID.
my $realID = $sap->SubsystemID($id);
# Get the low-level class.
my ($class) = $sap->GetFlat("Subsystem IsInClass SubsystemClass",
"Subsystem(id) = ?", [$realID], 'SubsystemClass(id)');
# Loop through the remaining classes. Note that since we're moving up
# the hierarchy, new classes are added at the beginning.
while (defined $class) {
unshift @classes, $class;
($class) = $sap->GetFlat("SubsystemClass IsSubclassOf SubsystemClass2",
"SubsystemClass(id) = ?", [$class],
'SubsystemClass2(id)');
}
# Store this classification.
$retVal->{$id} = \@classes;
}
# Return the result.
return $retVal;
}
=head3 genomes_to_subsystems
my $genomeHash = $sapObject->genomes_to_subsystems({
-ids => [$genome1, $genome2, ...],
-all => 1,
-usable => 0,
-exclude => ['cluster-based', 'experimental', ...]
});
Return a list of the subsystems participated in by each of the specified
genomes.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the genome IDs.
=item -all (optional)
If TRUE, all subsystems will be returned, including those in which the genome
does not appear to implement the subsystem and those in which the subsystem
implementation is incomplete. The default is FALSE, in which case only subsystems
that are completely implemented by the genome will be returned.
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=back
=item RETURN
Returns a hash mapping each genome ID to a list of 2-tuples. Each 2-tuple will
contain a subsystem name followed by a variant code.
$genomeHash = { $genome1 => [[$sub1a, $variantCode1a], [$sub1b, $variantCode1b], ...],
$genome2 => [[$sub2a, $variantCode2a], [$sub2b, $variantCode2b], ...],
... };
=back
=cut
sub genomes_to_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of genome IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the all-subsystems flag.
my $all = $args->{-all} || 0;
# Format the filter clause. If we're getting all subsystems, it's shorter than
# the norm.
my $filterString = 'Genome(id) = ?';
my @parms = ();
if (! $all) {
$filterString .= ' AND Variant(type) = ?';
push @parms, 'normal';
}
# Add subsystem type filtering.
ServerThing::AddSubsystemFilter(\$filterString, $args);
# Loop through the genome IDs.
for my $genomeID (@$ids) {
# Get the subsystems for this genome.
my @data = $sap->GetAll("Genome Uses Implements Variant IsDescribedBy Subsystem",
$filterString, [$genomeID, @parms],
[qw(Subsystem(id) Variant(code))]);
# If we found any, put them in the result hash.
if (scalar @data) {
$retVal->{$genomeID} = \@data;
}
}
# Return the result.
return $retVal;
}
=head3 get_subsystems
my $subsysHash = $sapObject->get_subsystems({
-ids => [$sub1, $sub2, ...]
});
Get a complete description of each specified subsystem. This will include
the basic subsystem properties, the list of roles, and the spreadsheet.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of subsystem IDs.
=back
=item RETURN
Returns a reference to a hash mapping each incoming subsystem ID to a sub-hash
that completely describes the subsystem. The keys for the sub-hash are as
follows.
=over 8
=item curator
The name of the subsystem's curator.
=item version
The subsystem's current version number.
=item notes
The text of the subsystem notes.
=item desc
The description of the subsystem.
=item roles
Reference to a list of 3-tuples, one for each role in the subsystem. Each
3-tuple will contain (0) the role abbreviation, (1) C<1> if the role is
auxiliary and C<0> otherwise, and (2) the ID (name) of the role.
=item spreadsheet
Reference to a list of 5-tuples. For each molecular machine implementing the
subsystem, there is a 5-tuple containing (0) the target genome ID, (1) the
relevant region string, (2) C<1> if the molecular machine is curated and C<0>
if it was computer-assigned, (3) the variant code for the implemented variant,
and (4) a reference to a list of sub-lists, one per role (in order), with each
sub-list containing the IDs of all features performing that role.
=back
$subsysHash = { $sub1 =>
{ curator => $curator1,
version => $version1,
notes => $notes1,
desc => $desc1,
roles => [[$abbr1a, $aux1a, $role1a],
[$abbr1b, $aux1b, $role1b], ... ],
spreadsheet => [
[$genome1x, $region1x, $curated1x, $variant1x,
[[$fid1xa1, $fid1xa2, ...], [$fid1xb1, $fid1xb2, ...], ...]],
[$genome1y, $region1y, $curated1y, $variant1y,
[[$fid1ya1, $fid1ya2, ...], [$fid1yb1, $fid1yb2, ...], ...]],
... ]
},
$sub2 =>
{ curator => $curator2,
version => $version2,
notes => $notes2,
desc => $desc2,
roles => [[$abbr2a, $aux2a, $role2a],
[$abbr2b, $aux2b, $role2b], ... ],
spreadsheet => [
[$genome2x, $region2x, $curated2x, $variant2x,
[[$fid2xa1, $fid2xa2, ...], [$fid2xb1, $fid2xb2, ...], ...]],
[$genome2y, $region2y, $curated1y, $variant1y,
[[$fid1ya1, $fid1ya2, ...], [$fid1yb1, $fid1yb2, ...], ...]],
... ]
},
=back
=cut
sub get_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of subsystems.
my $ids = ServerThing::GetIdList(-ids => $args);
# Create the return hash.
my $retVal = {};
# Loop through the subsystems.
for my $id (@$ids) {
# Get the basic subsystem data.
my ($subData) = $sap->GetAll("Subsystem", 'Subsystem(id) = ?', [$id],
[qw(curator description notes version)]);
# Only proceed if the subsystem exists.
if ($subData) {
# Create the subsystem's sub-hash.
my %subHash = ( curator => $subData->[0], desc => $subData->[1],
notes => $subData->[2], version => $subData->[3] );
# Get the role list.
my @roleList = $sap->GetAll("Includes",
'Includes(from-link) = ? ORDER BY Includes(sequence)',
[$id], [qw(abbreviation auxiliary to-link)]);
$subHash{roles} = \@roleList;
# Now we need to get the spreadsheet. We start with a list of the
# genomes and their variants.
my @machines = $sap->GetAll("Describes Variant IsImplementedBy MolecularMachine IsUsedBy",
'Describes(from-link) = ?', [$id],
[qw(IsUsedBy(to-link) MolecularMachine(region)
MolecularMachine(curated) Variant(code))]);
# Get the MD5 of the subsystem ID.
my $subsysMD5 = ERDB::DigestKey($id);
# Get all of the features in the subsystem, and organize them into
# a hash.
my %cells;
my $qh = $sap->Get("Contains", 'Contains(from-link) LIKE ?', [$subsysMD5 . '%']);
while (my $resultRow = $qh->Fetch()) {
my $fromLink = $resultRow->PrimaryValue('from-link');
my $fid = $resultRow->PrimaryValue('to-link');
my (undef, $genome, $region, $role) = split /:/, $fromLink;
push @{$cells{"$genome:$region"}{$role}}, $fid;
}
# Get the list of role abbreviations.
my @roles = map { $_->[0] } @roleList;
# Loop through the machines. For each machine, we must create the
# list of spreadsheet cells and add it to the end.
for my $machine (@machines) {
# Get the sub-hash for this machine.
my $machineH = $cells{"$machine->[0]:$machine->[1]"};
# Map the cells into a list.
my @row = map { $machineH->{$_} || [] } @roles;
# Add the list to the machine.
push @$machine, \@row;
}
# Store the machines in the sub-hash.
$subHash{spreadsheet} = \@machines;
# Store the subhash in the result hash.
$retVal->{$id} = \%subHash;
}
}
# Return the result.
return $retVal;
}
=head3 ids_in_subsystems
my $subsysHash = $sapObject->ids_in_subsystems({
-subsystems => [$sub1, $sub2, ...],
-genome => $genome1,
-grouped => 1,
-roleForm => 1,
-source => 'UniProt'
});
Return the features of each specified subsystems in the specified genome, or
alternatively, return all features of each specified subsystem.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -subsystems
Reference to a list of the IDs for the desired subsystems.
=item -genome (optional)
ID of the relevant genome, or C<all> to return the genes in all genomes for
the subsystem. The default is C<all>.
=item -grouped (optional)
If specified, then instead of being represented in a list, the feature IDs will be
represented in a comma-delimited string.
=item -roleForm (optional)
If C<abbr>, then roles will be represented by the role abbreviation; if C<full>, then
the role will be represented by its full name; if C<none>, then roles will not be
included and there will only be a single level of hashing-- by subsystem ID. The
default is C<abbr>.
=item -source (optional)
Database source for the output IDs-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
The default is C<SEED>.
=back
=item RETURN
Returns a hash mapping each subsystem ID to a sub-hash. Each sub-hash
maps the roles of the subsystem to lists of feature IDs. The roles are recorded
as role abbreviations.
=over 8
=item Normal Output
$subsysHash = { $sub1 => { $roleAbbr1A => [$fid1Ax, $fid1Ay, ...],
$roleAbbr1B => [$fid1Bx, $fid1By, ...],
... },
$sub2 => { $roleAbbr2A => [$fid2Ax, $fid2Ay, ...],
$roleAbbr2B => [$fid2Bx, $fid2By, ...],
... },
... };
=item Output if -roleForm = full
$subsysHash = { $sub1 => { $role1A => [$fid1Ax, $fid1Ay, ...],
$role1B => [$fid1Bx, $fid1By, ...],
... },
$sub2 => { $role2A => [$fid2Ax, $fid2Ay, ...],
$role2B => [$fid2Bx, $fid2By, ...],
... },
... };
=item Output if -roleForm = none
$subsysHash = { $sub1 => [$fid1a, $fid1b, ...],
$sub2 => [$fid2a, $fid2b, ...],
... };
=back
=back
=cut
sub ids_in_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the subsystem ID list.
my $subsystems = ServerThing::GetIdList(-subsystems => $args);
# Get the genome ID.
my $genome = $args->{-genome} || 'all';
# The following list forms the tail of the machine role ID pattern. If a
# specific genome is specified, then the pattern ends with the genome ID and
# a wild card; otherwise, it's just a wild card.
my @patternTail;
push @patternTail, $genome if ($genome ne 'all');
push @patternTail, "%";
# Process the flags and options.
my $grouped = $args->{-grouped} || 0;
my $roleForm = $args->{-roleForm} || 'abbr';
my $source = $args->{-source} || 'SEED';
# The exact form of the object name list and field list depend on whether
# we're doing full roles.
my ($objectNameList, $fieldList);
if ($roleForm eq 'full') {
$objectNameList = "Contains HasRole";
$fieldList = [qw(from-link to-link HasRole(to-link))];
} else {
$objectNameList = "Contains";
$fieldList = [qw(from-link to-link)];
}
# Loop through the subsystems. Note we ask Sapling to normalize the
# subsystem names before we use them.
for my $subsys (map { $sap->SubsystemID($_) } @$subsystems) {
# The data for this subsystem will be put in here. It could be
# a hash or a list.
my $subsysData;
# Compute the molecular machine role pattern for this subsystem and
# genome ID.
my $pattern = join(":", ERDB::DigestKey($subsys), @patternTail);
Trace("Machine role pattern is \"$pattern\".") if T(3);
# Get all the features for the genome in this subsystem. Each fidTuple
# returned consists of the machine role ID, a feature ID, and a possible
# role ID. The last piece of the machine role ID is the role abbreviation.
my @fidTuples = $sap->GetAll($objectNameList, "Contains(from-link) LIKE ?",
[$pattern], $fieldList);
Trace(scalar(@fidTuples) . " features found.") if T(3);
# Translate the feature IDs to the desired form. Each feature ID is the
# second element (index 1) in the tuple.
for my $fidTuple (@fidTuples) {
$fidTuple->[1] = $sap->Alias($fidTuple->[1], $source);
}
# If we're not sorting by role, then simply store the list.
if ($roleForm eq 'none') {
$subsysData = [ map { $_->[1] } @fidTuples ];
# Convert to a string if we're grouped.
if ($grouped) {
$subsysData = join(", ", @$subsysData);
}
} else {
# Here we're going to create a hash of lists, keyed by role. The
# role ID is determined by the role format.
$subsysData = {};
# Loop through the fid tuples.
for my $fidTuple (@fidTuples) {
# Get the pieces of data we need from this tuple.
my ($machineRole, $fid, $role) = @$fidTuple;
if (! defined $role) {
# Here we need to get the role abbreviation from the
# machine role ID.
(undef, undef, undef, $role) = split /:/, $machineRole;
}
# Put this feature in the role hash.
push @{$subsysData->{$role}}, $fid;
}
# Convert the sublists to strings if we're grouped.
if ($grouped) {
for my $role (keys %$subsysData) {
my $subList = $subsysData->{$role};
$subsysData->{$role} = join(", ", @$subList);
}
}
}
# Store this subsystem's data in the return value.
$retVal->{$subsys} = $subsysData;
}
# Return the result.
return $retVal;
}
=head3 ids_to_publications
my $featureHash = $sapObject->ids_to_publications({
-ids => [$id1, $id2, ...],
-source => 'UniProt'
});
Return the PUBMED ID and title of each publication relevant to the specified
feature IDs.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of feature IDs. Normally, these are FIG feature IDs
(e.g. C<fig|100226.1.peg.3361>, C<fig|360108.3.peg.1041>), but other
ID types are permissible if the C<source> parameter is overridden.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return results for genes in all genomes.
=back
=item RETURN
Returns a reference to a hash mapping feature IDs to lists of 2-tuples. Each
2-tuple consists of a PUBMED ID followed by a publication title.
$featureHash = { $id1 => [[$pub1a, $title1a], [$pub1b, $title1b], ...],
$id2 => [[$pub2a, $title2a], [$pub2b, $title2b], ...],
... };
=back
=cut
sub ids_to_publications {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the attribute database.
require CustomAttributes;
my $ca = CustomAttributes->new();
# Declare the return variable.
my $retVal = {};
# Get the list of feature IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Create the feature filter for IDs of this type.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($args->{-source},
$args->{-genome});
# Loop through the identifiers.
for my $id (@$ids) {
# The output tuples for this identifier will be put in here.
my @tuples;
# Get the proteins for this identifier.
my @prots = $sap->GetFlat("$objects Produces", $filter, [@parms, $id], 'Produces(to-link)');
# Get the relevant attributes from the attribute database.
my @protIDs = map { "Protein:$_" } @prots;
my @attrs = $ca->GetAttributes(\@protIDs, 'evidence_code', 'dlit%');
# Convert the attributes into a list of pubmeds for these proteins.
for my $tuple (@attrs) {
# Extract the pubmed ID.
if ($tuple->[2] =~ /dlit\((\d+)/) {
my $pubmed = $1;
# Get the publication's hyperlink.
my ($hyperlink) = $sap->GetEntityValues(Publication => $pubmed, ['citation']);
# Extract the title.
my $title = (defined $hyperlink ? $hyperlink->text : "<unknown>");
# Output the publication ID and title.
push @tuples, [$pubmed, $title];
}
}
# Store this ID's results.
$retVal->{$id} = \@tuples;
}
# Return the result.
return $retVal;
}
=head3 ids_to_subsystems
my $featureHash = $sapObject->ids_to_subsystems({
-ids => [$id1, $id2, ...],
-usable => 0,
-exclude => ['cluster-based', 'private', ...],
-source => 'RefSeq',
-subsOnly => 1
});
Return the subsystem and role for each feature in the incoming list. A
feature may have multiple roles in a subsystem and may belong to multiple
subsystems, so the role/subsystem information is returned in the form of
a list of ordered pairs for each feature.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of feature IDs. Normally, these are FIG feature IDs
(e.g. C<fig|100226.1.peg.3361>, C<fig|360108.3.peg.1041>), but other
ID types are permissible if the C<source> parameter is overridden.
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=item -source (optional)
Database source of the IDs specified-- C<SEED> for FIG IDs, C<GENE> for standard
gene identifiers, or C<LocusTag> for locus tags. In addition, you may specify
C<RefSeq>, C<CMR>, C<NCBI>, C<Trembl>, or C<UniProt> for IDs from those databases.
Use C<mixed> to allow mixed ID types (though this may cause problems when the same
ID has different meanings in different databases). Use C<prefixed> to allow IDs with
prefixing indicating the ID type (e.g. C<uni|P00934> for a UniProt ID, C<gi|135813> for
an NCBI identifier, and so forth). The default is C<SEED>.
=item -genome (optional)
ID of a specific genome. If specified, results will only be returned for genes in the
specified genome. The default is to return results for genes in all genomes.
=item -subsOnly (optional)
If TRUE, instead of a list of (role, subsystem) 2-tuples, each feature ID will be
mapped to a simple list of subsystem names. The default is FALSE.
=back
=item RETURN
Returns a reference to a hash mapping feature IDs to lists of 2-tuples. Each
2-tuple consists of a role name followed by a subsystem name. If a feature is
not in a subsystem, it will not be present in the return hash.
=over 8
=item Normal Output
$featureHash = { $id1 => [[$role1a, $sub1a], [$role1b, $sub1b], ...],
$id2 => [[$role2a, $sub2a], [$role2b, $sub2b], ...],
... };
=item Output if -subsOnly = 1
$featureHash = { $id1 => [$sub1a, $sub1b, ...],
$id2 => [$sub2a, $sub2b, ...],
... };
=back
=back
=cut
sub ids_to_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the list of IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the desired ID type.
my $source = $args->{-source};
# Build the filter string, object name list, and parameter value list prefix.
my ($objects, $filter, @parms) = $sap->ComputeFeatureFilter($source,
$args->{-genome});
ServerThing::AddSubsystemFilter(\$filter, $args);
# Loop through the features.
for my $id (@$ids) {
Trace("Looking for subsystems of feature $id.") if T(3);
# Get this feature's subsystem information.
my @rows = $sap->GetAll("$objects IsContainedIn MachineRole IsRoleFor Implements Variant IsDescribedBy Subsystem AND MachineRole HasRole",
$filter, [@parms, $id], [qw(HasRole(to-link)
IsDescribedBy(to-link))]);
# Only proceed if results were found.
if (@rows) {
# Determine whether we want the roles and the names or just the names. Whatever
# we do need will be put in here.
my $subData;
if ($args->{-subsOnly}) {
# Here we want only the subsystem names. We use a hash to remove
# duplicates.
my %subNames = map { $_->[1] => 1 } @rows;
$subData = [ sort keys %subNames ];
} else {
# Here we want everything.
$subData = \@rows;
}
# Store it in the result hash.
$retVal->{$id} = $subData;
}
}
# Return the result.
return $retVal;
}
=head3 is_in_subsystem
my $featureHash = $sapObject->is_in_subsystem({
-ids => [$fid1, $fid2, ...],
-usable => 0,
-exclude => [$type1, $type2, ...]
});
Return the subsystem and role for each specified feature.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the FIG feature IDs for the features of interest.
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=back
For backward compatibility, the parameter may also be a reference to a list
of FIG feature IDs.
=item RETURN
Returns a reference to a hash that maps each incoming feature ID to a list of
2-tuples, each 2-tuple consisting of (0) the ID of a subsystem containing the
feature and (1) the feature's role in that subsystem. If an incoming feature is
not in any subsystem, its ID will be mapped to an empty list.
$featureHash = { $fid1 => [[$sub1a, $role1a], [$sub1b, $role1b], ...],
$fid2 => [[$sub2a, $role2a], [$sub2b, $role2b[, ...],
... };
In backward-compatible mode, returns a reference to a list of 3-tuples, each
3-tuple consisting of (0) a subsystem ID, (1) a role ID, and (2) the ID of a
feature from the input list.
=back
=cut
sub is_in_subsystem {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# This will be set to TRUE if we are in backward-compatible mode.
my $backwardMode = 0;
# Convert a list to a hash.
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Create the filter clause. It contains at least a feature filter.
my $filter = 'Feature(id) = ?';
# There may also be subsystem filtering.
ServerThing::AddSubsystemFilter(\$filter, $args);
# Declare the return variable.
my $retVal = {};
# Get the fig IDs from the parameters.
my $ids = ServerThing::GetIdList(-ids => $args);
foreach my $fid (@$ids) {
my @resultRows = $sapling->GetAll("Feature IsContainedIn MachineRole HasRole Role AND " .
"MachineRole IsRoleFor MolecularMachine Implements Variant IsDescribedBy Subsystem",
$filter, [$fid], [qw(Subsystem(id) Role(id))]);
$retVal->{$fid} = \@resultRows;
}
# If we're in backward-compatible mode, convert the return value to a list.
if ($backwardMode) {
my @list;
for my $fid (@$ids) {
push @list, map { [@$_, $fid] } @{$retVal->{$fid}};
}
$retVal = \@list;
}
# Return the result.
return $retVal;
}
=head3 is_in_subsystem_with
my $featureHash = $sapObject->is_in_subsystem_with({
-ids => [$fid1, $fid2, ...],
-usable => 0,
-exclude => [$type1, $type2, ...]
});
For each incoming feature, returns a list of the features in the same genome that
are part of the same subsystem. For each other feature returned, its role,
functional assignment, subsystem variant, and subsystem ID will be returned as
well.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the FIG feature IDs for the features of interest.
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=back
For backward compatibility, the parameter may also be a reference to a list
of FIG feature IDs.
=item RETURN
Returns a reference to a hash that maps each incoming feature ID to a list
of 5-tuples relating to features in the same subsystem. Each 5-tuple contains
(0) a subsystem ID, (1) a variant ID, (2) the related feature ID, (3) the
related feature's functional assignment, and (4) the related feature's role
in the subsystem.
$featureHash = { $fid1 => [[$sub1a, $variant1a, $fid1a, $function1a, $role1a],
[$sub1b, $variant1b, $fid1b, $function1b, $role1b], ...],
$fid2 => [[$sub2a, $variant2a, $fid2a, $function2a, $role2a],
[$sub2b, $variant2b, $fid2b, $function2b, $role2b], ...],
... };
In backward-compatibility mode, returns a reference to a list of lists. Each
sub-list contains 6-tuples relating to a single incoming feature ID. Each
6-tuple consists of a subsystem ID, a variant ID, the incoming feature ID, the
other feature ID, the other feature's functional assignment, and the other
feature's role in the subsystem.
=back
=cut
sub is_in_subsystem_with {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Declare the return variable.
my $retVal;
# This will be set to TRUE if we are in backward-compatible mode.
my $backwardMode = 0;
# Convert a list to a hash.
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Create the filter clause. It contains at least a feature filter.
my $filter = 'Feature(id) = ?';
# Add any required subsystem filtering.
ServerThing::AddSubsystemFilter(\$filter, $args);
# Get the fig IDs from the parameters.
my $ids = ServerThing::GetIdList(-ids => $args);
foreach my $fid (@$ids) {
my @resultRows = $sapling->GetAll("Feature IsContainedIn MachineRole IsRoleFor MolecularMachine Implements Variant IsDescribedBy Subsystem AND MolecularMachine IsMachineOf MachineRole2 Contains Feature2 AND MachineRole2 HasRole Role",
$filter, [$fid],
[qw(Subsystem(id) Variant(code)
Feature2(id) Feature2(function)
Role(id))]);
$retVal->{$fid} = \@resultRows;
}
# If this is backward-compatability mode, convert the result to a list.
if ($backwardMode) {
my @outList;
for my $fid (@$ids) {
my $fidList = $retVal->{$fid};
if (! defined $fidList) {
push @outList, [];
} else {
# Because the incoming feature ID is no longer available as the
# hash key, we need to put it back into the output tuples. It goes
# in the third position (offset 2).
for my $fidTuple (@$fidList) {
splice @$fidTuple, 2, 0, $fid;
}
push @outList, $fidList;
}
}
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
=head3 pegs_implementing_roles
my $roleHash = $sapObject->pegs_implementing_roles({
-subsystem => $subsysID,
-roles => [$role1, $role2, ...]
});
Given a subsystem and a list of roles, return a list of the subsystem's
features for each role.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 4
=item -subsystem
ID of a subsystem.
=item -roles
Reference to a list of roles.
=back
For backward compatibility, the parameter can also be a reference to a 2-tuple
consisting of (0) a subsystem ID and (1) a reference to a list of roles.
=item RETURN
Returns a hash that maps each role ID to a list of the IDs for the features that
perform the role in that subsystem.
$roleHash = { $role1 => [$fid1a, $fid1b, ...],
$role2 => [$fid2a, $fid2b, ...],
... };
In backward-compatibility mode, returns a list of 2-tuples. Each tuple consists
of a role and a reference to a list of the features in that role.
=back
=cut
sub pegs_implementing_roles {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Get the sapling subsystem object.
require SaplingSubsys;
# Declare the return variable.
my $retVal = {};
# Check for backward-compatibility mode.
my $backwardMode = 0;
if (ref $args ne 'HASH') {
$args = { -subsystem => $args->[0], -roles => $args->[1] };
$backwardMode = 1;
}
# Get the subsystem ID.
my $subsystem = $args->{-subsystem};
# Get the list of roles.
my $roles = ServerThing::GetIdList(-roles => $args);
# If there is no subsystem ID, it's an error.
if (! defined $subsystem) {
Confess("Subsystem ID not specified.");
} else {
# Normalize the subsystem ID.
my $subsystemID = $sapling->SubsystemID($subsystem);
# Get a sapling subsystem object.
my $ss = SaplingSubsys->new($subsystemID, $sapling);
# Only proceed if we found one.
if (defined $ss) {
# Loop through the roles, acquiring features.
foreach my $role (@$roles) {
$retVal->{$role} = [$ss->pegs_for_role($role)];
}
}
}
# In backward-compatible mode, we must convert the output to a list.
if ($backwardMode) {
my @outList = map { [$_, $retVal->{$_} ] } @$roles;
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
=head3 pegs_in_subsystems
my $subsysHash = $sapObject->pegs_in_subsystems({
-genomes => [$genome1, $genome2, ...],
-subsystems => [$sub1, $sub2, ...]
});
This method takes a list of genomes and a list of subsystems and returns
a list of the roles represented in each genome/subsystem pair.
=over 4
=item parameter
Reference to a hash of parameter values with the following possible keys.
=over 8
=item -genomes
Reference to a list of genome IDs.
=item -subsystems
Reference to a list of subsystem IDs.
=back
For backward compatibility, the parameter may also be a reference to a 2-tuple,
the first element of which is a list of genome IDs and the second of which is a
list of subsystem IDs.
=item RETURN
Returns a reference to a hash of hashes. The main hash is keyed by subsystem ID.
Each subsystem's hash is keyed by role ID and maps the role to a list of
the feature IDs for that role in the subsystem that belong to the specified
genomes.
$subsysHash = { $sub1 => { $role1A => [$fid1Ax, $fid1Ay, ...],
$role1B => [$fid1Bx, $fid1By, ...],
... },
$sub2 => { $role2A => [$fid2Ax, $fid2Ay, ...],
$role2B => [$fid2Bx, $fid2By, ...],
... },
... };
In backward-compatibility mode, returns a list of 2-tuples. Each tuple consists
of a subsystem ID and a second 2-tuple that contains a role ID and a reference
to a list of the feature IDs for that role that belong to the specified genomes.
=back
=cut
sub pegs_in_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Get access to the sapling subsystem object.
require SaplingSubsys;
# Declare the return variable.
my $retVal = {};
# Check for backward-compatibility mode.
my $backwardMode = 0;
if (ref $args ne 'HASH') {
$args = { -genomes => $args->[0], -subsystems => $args->[1] };
$backwardMode = 1;
}
# Get the list of genome IDs.
my $genomes = ServerThing::GetIdList(-genomes => $args);
# Get the list of subsystem IDs.
my $subs = ServerThing::GetIdList(-subsystems => $args);
# Loop through the subsystems.
for my $sub (@{$subs}) {
# Normalize the subsystem ID.
my $subID = $sapling->SubsystemID($sub);
# Get the subsystem spreadsheet in memory.
my $ss = SaplingSubsys->new($subID, $sapling);
# Only proceed if we found it.
if (defined $ss) {
# We'll build the subsystem's hash in here.
my $subHash = {};
# Loop through the genomes, assigning features to the roles.
foreach my $g (@{$genomes}) {
# Get role/featureList pairs for this genome.
my @roleTuples = $ss->get_roles_for_genome($g, 1);
# Loop through the pairs.
foreach my $roleTuple (@roleTuples) {
# Extract the role ID and the feature list.
my ($role, $features) = @$roleTuple;
# Attach the features to the role.
push @{$subHash->{$role}}, @$features;
}
}
# Attach this hash to this subsystem.
$retVal->{$sub} = $subHash;
}
}
# In backward-compatible mode, we have to convert the hashes to lists.
if ($backwardMode) {
# We'll build the output list in here.
my @outList;
# Loop through the subsystems in input order.
for my $ss (@$subs) {
my $subHash = $retVal->{$ss};
if (defined $subHash) {
# Now we convert the role -> feature map to a list of
# [sub, [role, feature]] nested pairs.
for my $role (keys %$subHash) {
push @outList, [$ss, [$role, $subHash->{$role}]];
}
}
}
# Store the output list as the result.
$retVal = \@outList;
}
# Return the result.
return $retVal;
}
# Synonym for "pegs_in_subsystems" provided for backward compatibility.
sub pegs_in_subsystem {
return pegs_in_subsystems(@_);
}
=head3 pegs_in_variants
my $subsysHash = $sapObject->pegs_in_variants({
-genomes => [$genomeA, $genomeB, ...],
-subsystems => [$sub1, $sub2, ...]
});
This method takes a list of genomes and a list of subsystems and returns
a list of the pegs represented in each genome/subsystem pair.
The main difference between this method and L</pegs_in_subsystems> is in
the organization of the output, which is more like a subsystem spreadsheet.
=over 4
=item parameter
Reference to a hash of parameter values with the following possible keys.
=over 8
=item -genomes (optional)
Reference to a list of genome IDs. If the list is omitted, all genomes will be
included in the output (which will be rather large in most cases).
=item -subsystems
Reference to a list of subsystem IDs.
=back
=item RETURN
Returns a reference to a hash mapping subsystem IDs to sub-hashes. Each sub-hash
is keyed by genome ID and maps the genome ID to a list containing the variant code
and one or more n-tuples, each n-tuple containing a role ID followed by a list of
the genes in the genome having that role in the subsystem.
$subsysHash = { $sub1 => { $genomeA => [$vc1A,
[$role1Ax, $fid1Ax1, $fid1Ax2, ...],
[$role1Ay, $fid1Ay1, $fid1Ay2, ...],
...],
$genomeB => [$vc1B,
[$role1Bx, $fid1Bx1, $fid1Bx2, ...],
[$role1By, $fid1By1, $fid1By2, ...],
...],
... },
$sub2 => { $genomeA => [$vc2A,
[$role2Ax, $fid2Ax1, $fid2Ax2, ...],
[$role2Ay, $fid2Ay1, $fid2Ay2, ...],
...],
$genomeB => [$vc2B,
[$role2Bx, $fid2Bx1, $fid2Bx2, ...],
[$role2By, $fid2By1, $fid2By2, ...],
...],
... },
... };
Note that in some cases the genome ID will include a region string. This happens
when the subsystem has multiple occurrences in the genome.
=back
=cut
sub pegs_in_variants {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the genome IDs.
my $genomes = ServerThing::GetIdList(-genomes => $args, 1);
# Form them into a hash.
my %genomeH = map { $_ => 1 } @$genomes;
# If no genomes were specified, we allow all of them.
my $allGenomes = (! @$genomes);
# Get the subsystem IDs.
my $subs = ServerThing::GetIdList(-subsystems => $args);
# Loop through the subsystems.
for my $sub (@$subs) {
# This will be the hash for this subsystem.
my %subHash;
# Get this subsystem's molecular machines.
my @machines = $sap->GetAll("Describes Variant IsImplementedBy MolecularMachine IsUsedBy",
'Describes(from-link) = ?', [$sub], [qw(Variant(code)
MolecularMachine(id) MolecularMachine(region)
IsUsedBy(to-link))]);
# Loop through the machines, pausing on the ones related to our genomes.
for my $machine (@machines) {
# Get this machine's data.
my ($variant, $machineID, $region, $genome) = @$machine;
# Only proceed if it's a genome of interest.
if ($allGenomes || $genomeH{$genome}) {
# Get the features and roles for this machine.
my @ssData = $sap->GetAll("IsMachineOf MachineRole HasRole AND MachineRole Contains",
'IsMachineOf(from-link) = ?', [$machineID],
[qw(HasRole(to-link) Contains(to-link))]);
# Only proceed if we found some.
if (@ssData) {
# Create the full genome ID using the region.
my $genomeID = $genome;
if ($region) {
$genomeID .= ":$region";
}
# The cells will be accumulated in here.
my @row = $variant;
# Get the first role.
my $role = $ssData[0][0];
# This will accumulate the current cell.
my @cell;
for my $ssTuple (@ssData) {
my ($newRole, $fid) = @$ssTuple;
# If this is a new role, output the old cell.
if ($newRole ne $role) {
push @row, [$role, @cell];
$role = $newRole;
@cell = $fid;
} else {
# It's the old role, so add the gene to the cell.
push @cell, $fid;
}
}
# Store this genome's data in the subsystem's hash.
$subHash{$genomeID} = [@row, [$role, @cell]];
}
}
}
# Store this subsystem's data in the return hash.
$retVal->{$sub} = \%subHash;
}
# Return the result.
return $retVal;
}
=head3 roles_exist_in_subsystem
my $rolesHash = $sapObject->roles_exist_in_subsystem({
-subsystem => $sub1,
-roles => [$role1, $role2, ...]
});
Indicate which roles in a given list belong to a specified subsystem.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -subsystem
The name of the subsystem of interest.
=item -roles
A reference to a list of role IDs.
=back
=item RETURN
Returns a reference to a hash mapping each incoming role ID to C<1> if it
exists in the specified subsystem and C<0> otherwise.
$roleHash = { $role1 => $flag1, $role2 => $flag2, ... };
=back
=cut
sub roles_exist_in_subsystem {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Declare the return variable.
my $retVal = {};
# Get the list of roles.
my $roles = ServerThing::GetIdList(-roles => $args);
# Get the subsystem name.
my $subsystem = $args->{-subsystem};
Confess("No -subsystem specified.") if ! defined $subsystem;
# Normalize the subsystem name.
$subsystem = $sap->SubsystemID($subsystem);
# Create a hash of all the roles in the subsystem.
my %roleHash = map { $_ => 1 } $sap->GetFlat("Includes",
"Includes(from-link) = ?",
[$subsystem],
'Includes(to-link)');
# Loop through the incoming roles, comparing them against the
# hash.
for my $role (@$roles) {
$retVal->{$role} = ($roleHash{$role} ? 1 : 0);
}
# Return the result.
return $retVal;
}
=head3 roles_to_subsystems
my $roleHash = $sapObject->({
-roles => [$role1, $role2, ...],
-usable => 0
});
Return the subsystems containing each specified role.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -roles
Reference to a list of role names.
=item -usable (optional)
If TRUE, only usable subsystems will be returned. If FALSE, all subsystems
will be returned. The defult is TRUE.
=back
=item RETURN
Returns a reference to a hash mapping each incoming role to a list of
the names of subsystems containing that role.
$roleHash = { $role1 => [$sub1a, $sub1b, ...],
$role2 => [$sub2a, $sub2b, ...],
... };
=back
=cut
sub roles_to_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Get the Sapling database.
my $sap = $self->{db};
# Declare the return hash.
my $retVal = {};
# Get the incoming roles.
my $roles = ServerThing::GetIdList(-roles => $args);
# Get the usability flag.
my $usable = $args->{-usable} || 0;
# Create the filter clause.
my $filter = "IsIncludedIn(from-link) = ?";
if ($usable) {
$filter .= " AND Subsystem(usable) = 1";
}
# Loop through the roles.
for my $role (@$roles) {
# Get the subsystems for this role.
my (@subs) = $sap->GetFlat("IsIncludedIn Subsystem", $filter,
[$role], 'Subsystem(id)');
# Store them in the return hash.
$retVal->{$role} = \@subs;
}
# Return the result hash.
return $retVal;
}
=head3 rows_of_subsystem
my $subHash = $sapObject->({
-subs => [$sub1, $sub2, ...],
-genomes => [$genomeA, $genomeB, ...],
...
});
Return the subsystem row for each subsystem/genome pair. A row in this
case consists of a reference to a hash mapping role names to a list of
the FIG feature IDs for the features in the genome performing that
role.
In the Sapling database, a subsystem row is represented by the
B<MolecularMachine> entity. The strategy of this method is therefore
to find the molecular machine for each subsystem/genome pair, and
then use its ID to get the roles and features.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -subs
Reference to a list of subsystem IDs.
=item -genomes
Reference to a list of genome IDs.
=back
=item RETURN
Returns a reference to a hash mapping each incoming subsystem ID to
a sub-hash keyed by genome ID. In the sub-hash, each genome ID will
map to a sub-sub-hash that maps role names to lists of feature IDs.
$subHash = { $sub1 => { $genomeA => { $role1Aa => [$fid1Aax, $fid1Aay, ... ],
$role1Ab => [$fid1Abx, $fid1Aby, ... ],
... },
$genomeB => { $role1Ba => [$fid1Bax, $fid1Bay, ... ],
$role1Bb => [$fid1Bbx, $fid1Bby, ... ],
... },
... },
$sub2 => { $genomeA => { $role2Aa => [$fid2Aax, $fid2Aay, ... ],
$role2Ab => [$fid2Abx, $fid2Aby, ... ],
... },
$genomeB => { $role2Ba => [$fid2Bax, $fid2Bay, ... ],
$role2Bb => [$fid2Bbx, $fid2Bby, ... ],
... },
... },
... };
=back
=cut
sub rows_of_subsystems {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of subsystems.
my $subs = ServerThing::GetIdList(-subs => $args);
# Get the list of genomes.
my $genomes = ServerThing::GetIdList(-genomes => $args);
# Declare the return hash.
my $retVal = {};
# Loop through the subsystems.
for my $subID (@$subs) {
# Normalize the subsystem ID.
my $sub = $sap->SubsystemID($subID);
# This hash will map genomes to rows.
my %rows;
# Loop through the genomes.
for my $genome (@$genomes) {
# This hash will map roles to feature lists.
my %cells;
# Get the molecular machine ID for this subsystem and genome.
my ($rowID) = $sap->GetFlat("Describes IsImplementedBy MolecularMachine IsUsedBy",
'Describes(from-link) = ? AND IsUsedBy(to-link) = ?',
[$sub,$genome], 'MolecularMachine(id)');
# If we found it (there can be at most one), get the role/feature
# pairs.
if ($rowID) {
my @pairs = $sap->GetAll("IsMachineOf MachineRole HasRole AND MachineRole Contains",
'IsMachineOf(from-link) = ?', [$rowID],
[qw(HasRole(to-link) Contains(to-link))]);
# Form them into a hash of roles to feature lists.
for my $pair (@pairs) {
push @{$cells{$pair->[0]}}, $pair->[1];
}
}
# Store this row in the hash of rows for this subsystem.
$rows{$genome} = \%cells;
}
# Store the rows for this subsystem in the return hash.
$retVal->{$subID} = \%rows;
}
# Return the result hash.
return $retVal;
}
=head3 subsystem_data
my $subsysHash = $sapObject->subsystem_data({
-ids => [$sub1, $sub2, ...],
-field => 'version'
});
For each incoming subsystem ID, return the specified data field. This
method can be used to find the curator, description, or version of the
specified subsystems.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of subsystem IDs.
=item -field (optional)
Name of the desired data field-- C<curator> to retrieve the name of each
subsystem's curator, C<version> to get the subsystem's version number,
or C<description> to get the subsystem's description, or C<notes> to
get the subsystem's notes. The default is C<description>.
=back
=item RETURN
Returns a hash mapping each incoming subsystem ID to the associated data
value.
$subsysHash = { $sub1 => $value1, $sub2 => $value2, ... };
=back
=cut
use constant VALID_SUB_DATA_FIELDS => { curator => 1,
description => 1,
version => 1,
notes => 1
};
sub subsystem_data {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the sapling database.
my $sap = $self->{db};
# Get the name of the desired field and insure it's valid.
my $field = $args->{-field} || 'description';
if (! VALID_SUB_DATA_FIELDS->{$field}) {
Confess("Invalid subsystem field \"$field\" specified.");
} else {
# Get the list of subsystem IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the subsystems, retrieving the desired data field.
for my $id (@$ids) {
# Normalize the ID.
my $realID = $sap->SubsystemID($id);
# Get the desired value.
my ($value) = $sap->GetEntityValues(Subsystem => $realID, [$field]);
# If we found something, put it in the return hash.
if (defined $value) {
$retVal->{$id} = $value;
}
}
}
# Return the result.
return $retVal;
}
=head3 subsystem_genomes
my $subHash = $sapObject->subsystem_genomes({
-ids => [$sub1, $sub2, ...],
-all => 1
});
For each subsystem, return the genomes that participate in it and their
associated variant codes.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the names of the subsystems whose genome information is
desired.
=item -all (optional)
If TRUE, then all genomes associated with the subsystem will be listed. The
default is FALSE, meaning that only genomes that completely implement the
subsystem will be listed.
=back
=item RETURN
Returns a reference to a hash that maps each subsystem ID to a sub-hash. Each
sub-hash in turn maps the ID of each subsystem that participates in the subsystem
to its variant code.
$subHash = { $sub1 => { $genome1a => $code1a, $genome1b => $code1b, ...},
$sub2 => { $genome2a => $code2a, $genome2b => $code2b, ...},
... };
=back
=cut
sub subsystem_genomes {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of subsystem IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Compute the filter clause for the variants.
my $filter = 'Describes(from-link) = ?';
if (! $args->{-all}) {
$filter .= "AND Variant(type) = 'normal'";
}
# Declare the return variable.
my $retVal = {};
# Loop through the subsystem IDs.
for my $id (@$ids) {
# Get the genome and variant information.
my %genomes = map { $_->[0] => $_->[1] }
$sap->GetAll("Describes Variant IsImplementedBy IsUsedBy",
$filter, [$id], [qw(IsUsedBy(to-link) Variant(code))]);
# Store it in the result hash.
$retVal->{$id} = \%genomes;
}
# Return the result.
return $retVal;
}
=head3 subsystem_names
my $nameList = $sapObject->subsystem_names({
-usable => 0,
-exclude => ['cluster-based', ...]
});
Return a list of all subsystems in the database.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=back
=item RETURN
Returns a reference to a list of subsystem names.
$nameList = [$sub1, $sub2, ...];
=back
=cut
sub subsystem_names {
# Get the parameters.
my ($self, $args) = @_;
# Form the filter string.
my $filter = "";
ServerThing::AddSubsystemFilter(\$filter, $args);
# Ask for the subsystem IDs.
my $retVal = [ $self->{db}->GetFlat("Subsystem", $filter, [], 'id') ];
# Return the result.
return $retVal;
}
=head3 subsystem_roles
my $subHash = $sapObject->subsystem_roles({
-ids => [$sub1, $sub2, ...],
-aux => 1
});
Return the list of roles for each subsystem, in order.
=over 4
=item parameter
Reference to a hash of parameters with the following possible keys.
=over 8
=item -ids
Reference to a list of subsystem IDs.
=item -aux (optional)
If TRUE, auxiliary roles will be included. The default is FALSE,
which excludes auxiliary roles.
=item -abbr (optional)
If TRUE, then the role abbreviations will be included in the results. In
this case, each subsystem name will be mapped to a list of 2-tuples, with
each 2-tuple consisting of (0) the role name and (1) the role abbreviation.
The default is FALSE (normal output).
=back
=item RETURN
Return a hash mapping each subsystem ID to a list of roles (normal) or
a list of role/abbreviation pairs (extended output).
=over 8
=item Output if -abbr is FALSE
$subHash = { $sub1 => [$role1a, $role1b, ...],
$sub2 => [$role2a, $role2b, ...],
... };
=item Output if -abbr is TRUE
$subHash = { $sub1 => [[$role1a, $abbr1a],
[$role1b, $abbr1b], ...],
$sub2 => [[$role2a, $abbr2a],
[$role2b, $abbr2b], ...],
... };
=back
=back
=cut
sub subsystem_roles {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sap = $self->{db};
# Get the list of subsystem IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Compute the filter.
my $filter = 'Subsystem(id) = ?';
if (! $args->{-aux}) {
$filter .= ' AND Includes(auxiliary) = 0';
}
$filter .= ' ORDER BY Includes(sequence)';
# Declare the return variable.
my $retVal = {};
# Loop through the IDs.
for my $id (@$ids) {
# Normalize the subsystem ID.
my $realID = $sap->SubsystemID($id);
# We'll put the roles for this subsystem in here.
my @results;
# Process according to the type of output desired.
if ($args->{-abbr}) {
@results = $sap->GetAll("Subsystem Includes Role", $filter,
[$realID],
'Role(id) Includes(abbreviation)');
} else {
@results = $sap->GetFlat("Subsystem Includes Role", $filter,
[$realID], 'Role(id)');
}
# Store them in the results.
$retVal->{$id} = \@results;
}
# Return the result hash.
return $retVal;
}
=head3 subsystem_spreadsheet
my $subsysHash = $sapObject->subsystem_spreadsheet({
-ids => [$sub1, $sub2, ...]
});
This method takes a list of subsystem IDs, and for each one returns a
list of the features in the subsystem. For each feature, it will include
the feature's functional assignment, the subsystem name and variant
(spreadsheet row), and its role (spreadsheet column).
=over 4
=item parameter
Reference to a hash of parameters with the following possible keys.
=over 8
=item -ids
Reference to a list of subsystem IDs.
=back
For backward compatibility, this method can also accept a reference to a list of
subsystem IDs.
=item RETURN
Returns a hash mapping each incoming subsystem ID to a list of 4-tuples. Each
tuple contains (0) a variant ID, (1) a feature ID, (2) the feature's functional
assignment, and (3) the feature's role in the subsystem.
$subsysHash = { $sub1 => [[$variant1a, $fid1a, $function1a, $role1a],
[$variant1b, $fid1b, $function1b, $role1b], ...],
$sub2 => [[$variant2a, $fid2a, $function2a, $role2a],
[$variant2b, $fid2b, $function2b, $role2b], ...],
... };
In backward-compatability mode, returns a list of 5-tuples. Each tuple contains
(0) a subsystem ID, (1) a variant ID, (2) a feature ID, (3) the feature's
functional assignment, and (4) the feature's role in the subsystem.
=back
=cut
sub subsystem_spreadsheet {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Declare the return variable.
my $retVal;
# Check for the backward-compatible mode.
my $backwardMode = 0;
if (ref $args ne 'HASH') {
$args = { -ids => $args };
$backwardMode = 1;
}
# Get the list of subsystem IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Loop through the subsystem IDs.
foreach my $subsysName (@$ids) {
# Normalize the subsystem ID.
my $subsysID = $sapling->SubsystemID($subsysName);
# Get the subsystem's spreadsheet data.
my @resultRows = $sapling->GetAll("Subsystem Describes Variant IsImplementedBy MolecularMachine IsMachineOf MachineRole Contains Feature AND MachineRole HasRole Role Includes Subsystem",
'Subsystem(id) = ? ORDER BY Variant(id), Includes(sequence)',
[$subsysID], [qw(Variant(id)
Feature(id)
Feature(function)
Role(id))]);
$retVal->{$subsysName} = \@resultRows;
}
# In backward-compatible mode, convert the hash to a list.
if ($backwardMode) {
# We'll build the list in here.
my @listForm;
for my $subsysName (@$ids) {
# Get this subsystem's spreadsheet and paste in the subsystem ID.
my $spreadsheet = $retVal->{$subsysName};
for my $row (@$spreadsheet) {
unshift @$row, $subsysName;
}
# Put it into the output.
push @listForm, @$spreadsheet;
}
# Return the list.
$retVal = \@listForm;
}
# Return the result.
return $retVal;
}
=head3 subsystem_type
my $subsysHash = $sapObject->subsystem_type({
-ids => [$sub1, $sub2, ...],
-type => 'cluster-based'
});
For each incoming subsystem, return TRUE if it has the specified
characteristic, else FALSE.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of subsystem names.
=item -type
Name of the subsystem characteristic of interest. The default is C<usable>. The
possible characteristics are
=over 8
=item cluster-based
A I<cluster-based> subsystem is one in which there is functional-coupling
evidence that genes belong together, but we do not yet know what they do.
=item experimental
An I<experimental> subsystem is designed for investigation and is not yet ready to
be used in comparative analysis and annotation.
=item private
A I<private> subsystem has valid data, but is not considered ready for general
distribution.
=item usable
An unusable subsystem is one that is experimental or is of such low quality that
it can negatively affect analysis. A I<usable> subsystem is one that is not
unusable.
=back
=back
=item RETURN
Returns a hash mapping the incoming subsystem names to TRUE/FALSE flags indicating
the value of the specified characteristic.
$subsysHash = { $sub1 => $flag1, $sub2 => $flag2, ... };
=back
=cut
use constant VALID_SUBSYSTEM_TYPES => { 'cluster-based' => 1,
experimental => 1,
private => 1,
usable => 1
};
sub subsystem_type {
# Get the parameters.
my ($self, $args) = @_;
# Declare the return variable.
my $retVal = {};
# Get the Sapling database.
my $sap = $self->{db};
# Get the subsystem IDs.
my $ids = ServerThing::GetIdList(-ids => $args);
# Get the characteristic of interest. Note the default is "usable".
my $type = $args->{-type} || 'usable';
# Insure it's valid.
if (! VALID_SUBSYSTEM_TYPES->{$type}) {
Confess("Invalid subsystem type \"$type\" specified.");
} else {
# Loop through the subsystem IDs.
for my $id (@$ids) {
# Normalize the ID.
my $realID = $sap->SubsystemID($id);
# Get the desired flag value.
my ($flag) = $sap->GetEntityValues(Subsystem => $realID, [$type]);
# Store it in the return hash.
$retVal->{$id} = $flag;
}
}
# Return the result.
return $retVal;
}
=head3 subsystems_for_role
my $roleHash = $sapObject->subsystems_for_role({
-ids => [$role1, $role2, ...],
-usable => 1,
-exclude => ['cluster-based', ...]
});
For each role, return a list of the subsystems containing that role. The
results can be filtered to include unusable subsystems or exclude subsystems
of certain exotic types.
=over 4
=item parameter
The parameter should be a reference to a hash with the following keys.
=over 8
=item -ids
Reference to a list of the IDs of the roles of interest.
=item -aux (optional)
If TRUE, then subsystems in which the role is auxiliary will be included.
The default is not to include such subsystems.
=item -usable (optional)
If TRUE, then only results from usable subsystems will be included. If FALSE,
then results from all subsystems will be included. The default is TRUE.
=item -exclude (optional)
Reference to a list of special subsystem types that should be excluded from the
result list. The permissible types are C<cluster-based> and C<experimental>.
Normally cluster-based subsystems are included, but experimental subsystems
are only included if the C<-usable> option is turned off.
=back
=item RETURN
Returns a reference to a hash that maps each incoming role ID to a list of
subsystem names.
$roleHash = { $role1 => [$ss1a, $ss1b, ...],
$role2 => [$ss2a, $ss2b, ...],
... };
=back
=cut
sub subsystems_for_role {
# Get the parameters.
my ($self, $args) = @_;
# Get the sapling database.
my $sapling = $self->{db};
# Create the filter clause. It contains at least a role filter.
my $filter = 'Role(id) = ?';
# If we are NOT getting auxiliary roles, filter on the aux flag.
if (! $args->{-aux}) {
$filter .= " AND IsIncludedIn(auxiliary) = 0";
}
# There may also be subsystem filtering.
ServerThing::AddSubsystemFilter(\$filter, $args);
# Declare the return variable.
my $retVal = {};
# Get the role IDs from the parameters.
my $ids = ServerThing::GetIdList(-ids => $args);
foreach my $role (@$ids) {
my @resultRows = $sapling->GetFlat("Role IsIncludedIn Subsystem",
$filter, [$role], 'Subsystem(id)');
$retVal->{$role} = \@resultRows;
}
# Return the result.
return $retVal;
}
1;
| kbase/kb_seed | lib/SAP.pm | Perl | mit | 388,298 |
#! /usr/bin/perl
#
# MITObim - mitochondrial baiting and iterative mapping
# wrapper script version 1.6
# Author: Christoph Hahn, 2012-2013
# christoph.hahn@nhm.uio.no
#
use strict;
use warnings;
use Getopt::Long;
use Cwd qw(abs_path);
use File::Copy;
use List::Util qw< min max >;
use POSIX qw(strftime);
use POSIX qw(ceil);
use File::Path 'rmtree';
my $startiteration = 1;
my $enditeration = 1;
my ($quick, $noshow, $help, $strainname, $paired, $mode, $refname, $readpool, $maf, $proofreading, $readlength, $insertsize, $MM, $trim, $k_bait, $clean, $clean_interval) = (0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 31, 0, 2);
my ($miramode, $key, $val, $exit, $current_number_of_contigs, $current_number_of_reads, $iontor, $Roche454);
my $platform = "solexa";
my $platform_settings = "SOLEXA";
my $shme = "";
my ($mirapath, $mira, $convert_project, $mirabait) = ("", "mira", "convert_project", "mirabait");
my $trim_off = "";
my (@reads, @output, @number_of_contigs, @current_contig_stats, @path, @contiglengths, @number_of_reads);
my %hash;
my $cite = "\nif you found MITObim useful, please cite:
Hahn C, Bachmann L and Chevreux B. (2013) Reconstructing mitochondrial genomes directly from genomic next-generation sequencing reads -
a baiting and iterative mapping approach. Nucl. Acids Res. 41(13):e129. doi: 10.1093/nar/gkt371\n\n";
my $USAGE = "\nusage: ./MITObim.pl <parameters>\n
\nparameters:\n
-start <int> iteration to start with, default=1
-end <int> iteration to end with, default=1
-strain <string> strainname as used in initial MIRA assembly
-ref <string> referencename as used in initial MIRA assembly
-readpool <FILE> readpool in fastq format
-maf <FILE> maf file from previous MIRA assembly\n
\noptional:\n
--quick <FILE> starts process with initial baiting using provided fasta reference
--kbait <int> set kmer for baiting stringency (default: 31)
--denovo runs MIRA in denovo mode, default: mapping
--pair finds pairs after baiting (relies on /1 and /2 ID convention for read pairs), default: no
--noshow do not show output of MIRA modules
--help shows this helpful information
--clean retain only the last 2 iteration directories
--trim trim data (we recommend to trim beforehand and feed MITObim with pre trimmed data)
--iontor use data produced by iontorrent (experimental - default is illumina data)
--454 use 454 data (experimental - default is illumina data)
--mirapath <string> full path to MIRA binaries (only needed if MIRA is not in PATH)
--proofread applies proofreading (atm only to be used if starting the process from a single short seed reference)
--readlength <int> read length of illumina library, default=150, needed for proofreading
--insert <int> insert size of illumina library, default=300, needed for proofreading
\nexamples:\n
./MITObim.pl -start 1 -end 5 -strain StrainX -ref reference-mt -readpool illumina_readpool.fastq -maf initial_assembly.maf
./MITObim.pl -end 10 --quick reference.fasta -strain StrainY -ref reference-mt -readpool illumina_readpool.fastq\n";
my $PROGRAM = "\nMITObim - mitochondrial baiting and iterative mapping\n";
my $VERSION = "version 1.6\n";
my $AUTHOR = "author: Christoph Hahn, (c) 2012-2013\n\n";
my $command = $0;
for (@ARGV){
$command .= " $_";
}
GetOptions ( "start=i" => \$startiteration,
"end=i" => \$enditeration,
"quick=s" => \$quick,
"noshow!" => \$noshow,
"kbait=i" => \$k_bait,
"strainname=s" => \$strainname,
"paired" => \$paired,
"denovo" => \$mode,
"ref=s" => \$refname,
"readpool=s" => \$readpool,
"clean!" => \$clean,
"help!" => \$help,
"maf=s" => \$maf,
"mirapath=s" => \$mirapath,
"proofreading!" => \$proofreading,
"trim!" => \$trim,
"iontor!" => \$iontor,
"454!" => \$Roche454,
"readlength=i" => \$readlength,
"insertsize=i" => \$insertsize) or die "Incorrect usage!\n$USAGE";
print $PROGRAM;
print $VERSION;
print $AUTHOR;
print $USAGE and exit if $help;
print $USAGE and exit if ($startiteration > $enditeration);
print $USAGE and exit if !$readpool;
unless ($quick){
print $USAGE and exit if !$maf;
}
print $USAGE and exit if !$refname;
$readpool=abs_path($readpool);
unless (-e $readpool){
print "Cant find the readpool. Is the path correct?\n";
}
if ($maf){
$maf=abs_path($maf);
unless (-e $maf){
print "Cant find *.maf file. Is the path correct?\n";
}
}
if ($quick){
$quick=abs_path($quick);
unless (-e $quick){
print "quick option selected but is the path to the file correct?\n";
exit;
}
print "quick option selected! -maf option will be ignored (if given)\n";
$maf = 0;
$startiteration = 0;
}
unless (((-e $maf)||($quick)) && (-e $readpool)){
print "\nAre readpool AND maf files there?\n";
exit;
}
if ($mirapath){
if (-e "$mirapath/mira"){
print "found executables in the path specified by the user - good!\n";
$mira = "$mirapath/mira";
$convert_project = "$mirapath/convert_project";
$mirabait = "$mirapath/mirabait";
}else{
print "somethings wrong with the path to mira.\n";
exit 1;
}
}
##if not given otherwise, readlength and insertsize are set to default. automatic readlength and insertsize detection will be implemented in time.
if (!$readlength){
$readlength = 150;
}
if (!$insertsize){
$insertsize = 300;
}
if (!$mode){
$miramode = "mapping";
}else {
$miramode = "denovo";
}
if (!$trim){
$trim_off = "\"--noclipping -CL:pec=no\"";
}
if ($iontor){
$platform = "iontor";
$platform_settings = "IONTOR";
}
if ($Roche454){
$platform = "454";
$platform_settings = "454";
}
print "\nFull command run: $command\n";
print "\nAll paramters seem to make sense:\n";
print "startiteration: $startiteration\n";
print "enditeration: $enditeration\n";
print "strainname: $strainname\n";
print "refname: $refname\n";
print "readpool: $readpool\n";
print "maf: $maf\n";
print "quick: $quick\n";
print "paired: $paired\n";
print "denovo: $mode (mapping=0, denovo=1)\n";
print "noshow: $noshow\n";
print "read trimming: $trim (off=0, on=1)\n";
print "kmer baiting: $k_bait\n";
print "platform: $platform_settings\n";
print "clean: $clean (off=0, on=1)\n";
print "proofread: $proofreading\n";
if ($proofreading){
print "readlength: $readlength\n";
print "insertsize: $insertsize\n";
print "number of allowed missmatches in proofreading assembly: $MM\n";
}
print "\nStarting MITObim \n";
my @iteration = ($startiteration .. $enditeration);
foreach (@iteration){
chomp;
my $currentiteration = $_;
mkdir "iteration$currentiteration" or die "MITObim will not overwrite an existing directory: iteration$currentiteration\nExit\n";
chdir "iteration$currentiteration" or die $!;
print "\n==============\n";
print " ITERATION $currentiteration\n";
print "==============\n";
print strftime("%b %e %H:%M:%S", localtime) . "\n\n";
# if (($proofreading) && ($currentiteration != 0)){
if ($proofreading){
$shme = "-AL:shme=$MM";
}
if ($maf){
print "\nrecover backbone by running convert_project on maf file\n";
@output= (`$convert_project -f maf -t fasta -A "$platform_settings\_SETTINGS -CO:fnicpst=yes" $maf tmp 2>&1`);
$exit = $? >> 8;
unless ($noshow){
print "@output\n";
}
unless ($exit == 0){
print "\nconvert_project seems to have failed - see detailed output above\n";
exit;
}
if ( ((($mode) && ($currentiteration > 1)) && (!$quick)) || ((($mode) && ($currentiteration >= 1)) && ($quick)) ){
open(FH1,"<tmp_default.unpadded.fasta") or die "$!";
}else{
open(FH1,"<tmp_$strainname.unpadded.fasta") or die "$!";
}
open(FH2,">$strainname-$refname\_backbone_in.fasta") or die "$!";
while (<FH1>) {
$_ =~ s/@/N/g;
print FH2 $_;
}
close(FH1);
close(FH2);
unlink glob ("tmp*");
}
MIRABAIT:
unless ($maf){
print "\nquick option baits reads from provided reference in iteration 0\n";
copy("$quick", "$strainname-$refname\_backbone_in.fasta") or die "copy failed: $!";
}
&check_ref_length("$strainname-$refname\_backbone_in.fasta","temp_baitfile.fasta",29800);
print "\nfishing readpool using mirabait (k = $k_bait)\n\n";
@output = (`$mirabait -k $k_bait -n 1 temp_baitfile.fasta $readpool $strainname-$refname\_in.$platform 2>&1`);
$exit = $? >> 8;
unless ($noshow){
print "@output\n";
}
unless ($exit == 0){
print "\nmirabait seems to have failed - see detailed output above\n";
exit;
}
FINDPAIRS:
unless (!$paired){
print "\nfind pairs to baited reads\n";
open(FH1,"<$strainname-$refname\_in.$platform.fastq") or die $!;
open(FH2,">list");
my $index=1;
while (<FH1>) {
if ($index % 8 ==1 || $index % 8 ==5) {
chomp;
$_ =~ s/@//g;
if ($_ =~ /\/[0-9]$/){
($key, $val) = split /\//;
}else {
print "\nYou have chosen to use the paired option in MITObim.\nTo successfully identify read pairing, MITObim 1.6 expects the read headers in the following format:\ne.g.\n \@HWUSI-EAS100R:6:73:941:1973#0/1\n \@HWUSI-EAS100R:6:73:941:1973#0/2\n\n";
exit;
}
$hash{$key} .= exists $hash{$key} ? ",$val" : $val;
}
$index++;
}
for (keys %hash){
$_ =~ s/$/\/1/g;
print FH2 "$_\n";
$_ =~ s/1$/2/g;
print FH2 "$_\n";
}
close(FH1);
close(FH2);
@output = (`$convert_project -f fastq -t fastq -n list $readpool $strainname-$refname\_in.$platform 2>&1`);
$exit = $? >> 8;
unless ($noshow){
print "@output\n";
}
unless ($exit == 0){
print "\nconvert_project seems to have failed - see detailed output above\n";
exit;
}
}
unlink("list");
MIRA:
print "\nrunning assembly using MIRA v3.4\n\n";
@output = (`$mira --project=$strainname-$refname --job=$miramode,genome,accurate,$platform $trim_off -notraceinfo -MI:somrnl=0 -AS:nop=1 -SB:bsn=$refname:bft=fasta:bbq=30 $platform_settings\_SETTINGS -CO:msr=no -GE:uti=$paired $shme -SB:dsn=$strainname 2>&1`);
$exit = $? >> 8;
unless ($noshow){
print "@output\n";
}
unless ($exit == 0){
print "\nMIRA seems to have failed - see detailed output above\n";
exit;
}
@path = abs_path;
push (@path, "/$strainname-$refname\_assembly/$strainname-$refname\_d_results/$strainname-$refname\_out.maf");
$maf = join("",@path);
unless (-e $maf){
print "maf file is not there \n";
exit;
}
@current_contig_stats = &get_contig_stats("$strainname-$refname\_assembly/$strainname-$refname\_d_info/$strainname-$refname\_info_contigstats.txt");
if (((scalar @current_contig_stats > 3) || ($current_contig_stats[0] > 1)) && ($proofreading)) {
print "assembly consists of more than one contigs - this is atm not permitted in proofreading mode. Sorry!\n\n";
exit 1;
}
PROOFREAD:
# if (($proofreading) && ($currentiteration >= 1)){
if ($proofreading){
print "\n Proofreading\n\n";
my $contigreadlist = "$strainname-$refname\_assembly/$strainname-$refname\_d_info/$strainname-$refname\_info_contigreadlist.txt";
my $readtaglist = "$strainname-$refname\_assembly/$strainname-$refname\_d_info/$strainname-$refname\_info_readtaglist.txt";
print "assessing coverage between positions 0 and ".(2*$insertsize)." in current contig\n";
my @coverage_limits_lower = &assess_coverage($readtaglist, 0, (2*$insertsize), "lower");
print "assessing coverage between positions ".($current_contig_stats[2] - (2*$insertsize))." and ".($current_contig_stats[2])." in current contig\n";
my @coverage_limits_upper = &assess_coverage($readtaglist, ($current_contig_stats[2] - (2*$insertsize)), ($current_contig_stats[2]), "upper");
print "\nScreening orphan reads and discarding potentially dubious reads\n";
open(OUT,">list");
# print OUT &proofread($contigreadlist, $contigreadlist_1MM);
print OUT &proofread($contigreadlist, $readtaglist, $current_contig_stats[2], $coverage_limits_lower[0], $coverage_limits_lower[1], $coverage_limits_upper[0], $coverage_limits_upper[1], (1.3*$readlength), (2*$insertsize), $noshow);
close(OUT);
print "\ngenerating proofread readpool\n";
@output = (`$convert_project -f fastq -t fastq -n list $strainname-$refname\_in.$platform.fastq $strainname-$refname-proofread\_in.$platform 2>&1`);
$exit = $? >> 8;
unless ($noshow){
print "@output\n";
}
unless ($exit == 0){
print "\nconvert_project seems to have failed - see detailed output above\n";
exit;
}
copy("$strainname-$refname\_backbone_in.fasta", "$strainname-$refname-proofread\_backbone_in.fasta") or die "copy failed: $!";
print "\nrunning proofread assembly using MIRA v3.4\n\n";
@output = (`$mira --project=$strainname-$refname-proofread --job=$miramode,genome,accurate,$platform $trim_off -notraceinfo -MI:somrnl=0 -AS:nop=1 -SB:bsn=$refname:bft=fasta:bbq=30 $platform_settings\_SETTINGS -CO:msr=no -GE:uti=yes:tismin=100:tismax=600 $shme -SB:dsn=$strainname 2>&1`);
$exit = $? >> 8;
unless ($noshow){
print "@output\n";
}
unless ($exit == 0){
print "\nMIRA seems to have failed - see detailed output above\n";
exit;
}
@path = abs_path;
push (@path, "/$strainname-$refname-proofread\_assembly/$strainname-$refname-proofread\_d_results/$strainname-$refname-proofread\_out.maf");
$maf = join("",@path);
unless (-e $maf){
print "maf file is not there \n";
exit;
}
@current_contig_stats = &get_contig_stats("$strainname-$refname-proofread_assembly/$strainname-$refname-proofread_d_info/$strainname-$refname-proofread_info_contigstats.txt");
if ((scalar @current_contig_stats > 3) || ($current_contig_stats[0] > 1)){
print "assembly consists of more than one contigs - this is atm not permitted in proofreading mode. Sorry!\n\n";
exit 1;
}
}
$current_number_of_contigs = shift @current_contig_stats;
$current_number_of_reads = shift @current_contig_stats;
if (!$mode){ #in mapping assemblies the reference is counted as one read
$current_number_of_reads -= $current_number_of_contigs;
}
push (@number_of_contigs, $current_number_of_contigs);
push (@number_of_reads, $current_number_of_reads);
print "readpool contains $current_number_of_reads reads\n";
print "assembly contains $current_number_of_contigs contig(s)\n";
if (scalar @current_contig_stats == 1){
print "contig length: $current_contig_stats[0]\n";
}elsif (scalar @current_contig_stats == 3){
print "min contig length: ".$current_contig_stats[0]." bp\nmax contig length: ".$current_contig_stats[1]." bp\navg contig length: ".sprintf("%.0f", $current_contig_stats[2])." bp\n";
print "find details on individual contigs in: ". abs_path . "/$strainname-$refname\_assembly/$strainname-$refname\_d_info/$strainname-$refname\_info_contigstats.txt\n";
}else {
print "somethings wrong with your contig stats. Sorry!\n";
exit 1;
}
if ($clean){
&clean($clean_interval, $currentiteration);
}
if ($number_of_reads[-2]){
if ($number_of_reads[-2] >= $number_of_reads[-1]){
print "\nMITObim has reached a stationary read number after $currentiteration iterations!!\n$cite";
print strftime("%b %e %H:%M:%S", localtime) . "\n\n";
exit;
}
}
chdir ".." or die "Failed to go to parent directory: $!";
}
print "\nsuccessfully completed $enditeration iterations with MITObim! " . strftime("%b %e %H:%M:%S", localtime) . "\n$cite";
#
#
###SUBROUTINES
#
#
#
sub get_contig_stats{
my $contig = $_[0];
my @array;
my @contiglength;
my (@readnumber, @stats);
my $readssum = 0;
open (CONTIGSTATS,"<$contig") or die $!;
while (<CONTIGSTATS>){
unless ($_ =~ /#/){
@array = split /\t/;
push (@contiglength, $array[1]);
push (@readnumber, $array[3]);
}
}
close (CONTIGSTATS);
if (scalar @readnumber == 1){
push (@stats, (scalar @readnumber, $readnumber[0], $contiglength[0])); #@stats contains: number of contigs, total number of reads used to build the contigs, length of contig
}
elsif (scalar @readnumber > 1){
$readssum += $_ for @readnumber;
my $minlength = min @contiglength;
my $maxlength = max @contiglength;
my @avglength = &standard_deviation(@contiglength);
push (@stats, (scalar @readnumber, $readssum, $minlength, $maxlength, $avglength[0])); #@stats contains: number of contigs, total number of reads used to build the contigs, minimal, maximal, avg length of contigs
}
return @stats;
}
sub proofread {
my $zero_MM = $_[0];
my $readtaglist_FH = $_[1];
my $contiglength = $_[2];
my $elevated_cov_lower_start = $_[3];
my $elevated_cov_lower_end = $_[4];
my $elevated_cov_upper_start = $_[5];
my $elevated_cov_upper_end = $_[6];
my $lower_limit = $_[7];
# my $lower_limit = 200;
my $lower_main_limit = $_[8];
# my $lower_main_limit = 500;
my $verb = $_[9];
my $upper_limit = $contiglength - $lower_limit;
my $upper_main_limit = $contiglength - $lower_main_limit;
my @readtaglist;
my $ref;
my $junk;
my $current_id;
my %count;
my @readid_good;
my @readid_bad;
my @reads;
my @readlist;
my @readlist_good;
my @readlist_proofread;
my @total_readlist;
my @singleton;
my @singletons;
my @taglist;
my @taglist_line;
my @readtaglist_lower;
my @readtaglist_upper;
my @read_ids_lower;
my @read_ids_all;
my @read_ids_upper;
my %ids =();
my @unsorted;
my $min;
my $max;
my $tag;
unless ($verb){
print "\nlower limit: $lower_limit\n";
print "upper limit: $upper_limit\n";
print "lower main limit: $lower_main_limit\n";
print "upper main limit: $upper_main_limit\n\n";
}
open (TAGLIST,"<$readtaglist_FH") or die $!;
while (<TAGLIST>){
push (@readtaglist, "$_");
}
close (TAGLIST);
for (@readtaglist){
@taglist_line = split /\t/;
unless ($taglist_line[0] =~ /#/){
$ref = join ("\t", $taglist_line[0], $taglist_line[6]);
push (@read_ids_all, $ref);
if (($taglist_line[1] <= $lower_limit) || ($taglist_line[2] <= $lower_limit)){
# if ((($taglist_line[1] <= $lower_limit) || (($taglist_line[1] >= $coverage_limits_lower[0])&&($taglist_line[1] <= $coverage_limits_lower[1]))) || ($taglist_line[2] <= $lower_limit)){
$ref = join ("\t", $taglist_line[0], $taglist_line[6]);
push (@read_ids_lower, $ref);
}elsif (($taglist_line[1] >= $upper_limit) || ($taglist_line[2] >= $upper_limit)){
$ref = join ("\t", $taglist_line[0], $taglist_line[6]);
push (@read_ids_upper, $ref);
}
}
}
%ids = map { $_ => 1 } @read_ids_lower;
my @unique_lower = keys %ids;
%ids = map { $_ => 1 } @read_ids_upper;
my @unique_upper = keys %ids;
%ids = map { $_ => 1 } @read_ids_all;
my @unique_all = keys %ids;
for (@unique_all) {
my @junk = split /\//;
push (@reads, $junk[0]);
@junk = split /\t/;
push (@total_readlist, $junk[1]);
}
map { $count{$_}++ } @reads;
map {if ($count{$_} == 2){ @readid_good = split /\t/; push(@readlist, "$readid_good[1]");} elsif ($count{$_} == 1) { push(@readid_bad, "$_");}} keys (%count);
@reads = {};
undef %count;
for (@readlist){
chomp;
$current_id = $_;
my @pairs_lower = grep { $_ =~ /$current_id/} @unique_lower;
my @pairs_upper = grep{ $_ =~ /$current_id/} @unique_upper;
# print "good id: $current_id\n";
my $count_lower = scalar @pairs_lower;
my $count_upper = scalar @pairs_upper;
# print "count lower: $count_lower\n";
# print "count upper: $count_upper\n";
unless ((scalar @pairs_lower == 2) || (scalar @pairs_upper == 2)){
push (@readlist_good, "$current_id");
}
}
for (@readid_bad){
chomp;
@unsorted = ();
($junk, $current_id) = split (/\t/);
@singleton = grep { $_ =~ /$current_id/} @total_readlist;
for (@singleton){
chomp;
$tag = $_;
@taglist = grep { $_ =~ /$tag/} @readtaglist;
# print "taglist: @taglist\n";
}
for (@taglist) {
@taglist_line = split /\t/;
push(@unsorted, $taglist_line[1], $taglist_line[2]);
$max = max @unsorted;
$min = min @unsorted;
}
# print "unsorted: @unsorted\n";
unless ($verb){
print "current ID: $current_id\n";
print "read mapping from $min to $max\n";
# print "min: $min\n";
# print "max: $max\n";
if ($min <= $lower_limit){
print "orphan discarded! min<lowerlimit\n----------------------------------------------\n";
}elsif ($max >= $upper_limit){
print "orphan discarded! max>upperlimit\n----------------------------------------------\n";
}elsif (($min >= $lower_main_limit) && ($max <= $upper_main_limit)){
print "orphan discarded! lower_main_limit<min-max<upper_main_limit\n----------------------------------------------\n";
}elsif (($min >= $elevated_cov_lower_start) && ($min <= $elevated_cov_lower_end - ($lower_limit / 2))){
print "orphan discarded! increased_coverage_lower_start<min<increased_coverage_lower_end\n----------------------------------------------\n";
}elsif (($max >= ($elevated_cov_upper_start + ($lower_limit / 2))) && ($max <= $elevated_cov_upper_end)){
print "orphan discarded! increased_coverage_upper_start<max<increased_coverage_upper_end\n----------------------------------------------\n";
}else {
push(@singletons, "@singleton\n");
print "orphan resurrected! \n----------------------------------------------\n";
}
}
# print "contiglength: $contiglength\n";
}
for (@singletons){
my @resurrection = split /\//;
push (@readlist_good, $resurrection[0]);
}
for (@readlist_good){
$_ =~ s/$/\/1\n/g;
push(@readlist_proofread, $_);
$_ =~ s/1$/2/g;
push(@readlist_proofread, $_);
}
return @readlist_proofread;
}
sub standard_deviation {
my(@numbers) = @_;
#Prevent division by 0 error in case you get junk data
return undef unless(scalar(@numbers));
# Step 1, find the mean of the numbers
my $total1 = 0;
foreach my $num (@numbers) {
if (!$num){
$num = 0;
}
$total1 += $num;
}
my $mean1 = $total1 / (scalar @numbers);
push (my @stdev, "$mean1");
# Step 2, find the mean of the squares of the differences between each number and the mean
my $total2 = 0;
foreach my $num (@numbers) {
if (!$num){
$num = 0;
}
$total2 += ($mean1-$num)**2;
}
my $mean2 = $total2 / (scalar @numbers);
# Step 3, standard deviation is the square root of the above mean
my $std_dev = sqrt($mean2);
push (@stdev, "$std_dev");
return @stdev;
}
sub assess_coverage{
my $readtaglist_FH = $_[0];
my @readtaglist;
my $from =$_[1];
my $to = $_[2];
my $where = $_[3];
my @taglist_line;
my @coverage_array_lower;
my @coverage_array_upper;
my @read_ids_lower;
my @read_ids_upper;
my %ids;
my @taglist;
my @unsorted;
my $min;
my $max;
my %coverage;
my @allnums;
my @coverage_change_position;
my @coverage_limits;
# print "assessing coverage from position $from to position $to\n";
open (TAGLIST,"<$readtaglist_FH") or die $!;
while (<TAGLIST>){
push (@readtaglist, "$_");
}
close (TAGLIST);
for (@readtaglist){
@taglist_line = split /\t/;
unless ($taglist_line[0] =~ /#/){
if ((($taglist_line[1] >= $from) && ($taglist_line[1] <= $to)) || (($taglist_line[2] >= $from) && ($taglist_line[2] <= $to))){
push (@coverage_array_lower, "$_");
push (@read_ids_lower, $taglist_line[6]);
}
}
}
%ids = map { $_ => 1 } @read_ids_lower;
my @unique_lower = keys %ids;
for (@unique_lower){
my @current_id = $_;
chomp;
@unsorted = ();
for (@current_id){
my $current_id = $_;
@taglist = grep { $_ =~ /$current_id/} @coverage_array_lower;
}
for (@taglist) {
@taglist_line = split /\t/;
push(@unsorted, $taglist_line[1], $taglist_line[2]);
$max = max @unsorted;
$min = min @unsorted;
}
my @nums = ($min .. $max);
for (@nums){
push (@allnums, "$_");
}
}
%coverage = map { $_ => 0 } @allnums;
map { $coverage{$_}++ } @allnums;
open (OUT,">out-$where.csv");
########## detecting coverage peak
my $max_cov = 0;
my $max_cov_position;
my @cumulative_coverage;
map { unless (!$coverage{$_}){print OUT "$_,$coverage{$_}\n"; push (@cumulative_coverage, "$coverage{$_}"); if ($coverage{$_} > $max_cov){$max_cov = $coverage{$_}; $max_cov_position = $_; }}} ($from..$to);
my @average_coverage = &standard_deviation(@cumulative_coverage);
my $coverage_factor = $max_cov / $average_coverage[0];
open (OUT,">>out-$where.csv");
print OUT "\nmaximum coverage is $max_cov at position $max_cov_position\naverge coverage is: $average_coverage[0], sd: $average_coverage[1]\nfactor $coverage_factor\n";
close (OUT);
######### detecting rapid changes in coverage
for ($from..($to - 10)){
my $position = $_;
my $cov = $coverage{$position};
unless (!$cov){
my @positions = ();
push (@positions, "$cov");
for (1 .. 10){
my $pos_plus = $position + $_;
if ($coverage{$pos_plus}){
push (@positions, "$coverage{$pos_plus}");
}
}
my @stdev = &standard_deviation(@positions);
if ($stdev[1] > 6.0){
print "positions ($position): @positions -> stdev: $stdev[1]\n";
push (@coverage_change_position, $position);
}elsif ($stdev[1] >= 4.5){
print "positions ($position): @positions -> stdev: $stdev[1]\n";
}
}
}
if (@coverage_change_position){
print "positions with rapidly changing coverage detected: @coverage_change_position\n";
my $start = min @coverage_change_position;
my $end = max @coverage_change_position;
push (@coverage_limits, "$start", "$end");
print "set limits from $coverage_limits[0] to $coverage_limits[1]\n";
}else{
print "no irregularities in coverage detected\n";
push (@coverage_limits, "0", "0");
return @coverage_limits;
}
###### assessing whether coverage peak lies within putative conserved region, if yes accept prediction; if no, reject conserved region
if (($coverage_factor >= 1.6) && (($coverage_limits[0] < $max_cov_position) && ( $max_cov_position < $coverage_limits[1]))){
print "suspicious coverage peak detected within the predicted limits\n";
}else {
print "no coverage peak detected within predicted limits - rejecting limits\n";
@coverage_limits = ();
push (@coverage_limits, "0", "0");
}
return @coverage_limits;
}
sub check_ref_length{
my $ref=$_[0];
my $output_filename=$_[1];
my $critical=$_[2];
my @header;
my $header_count=0;
my (@sequence,@temp_output,@final_output);
my $full_sequence;
open(REF,"<$ref") or die $!;
while(<REF>){
chomp;
if ($_ =~ /^>/){
push(@header,$_);
# print "found header:\n$header[$header_count]\n";
$header_count++;
# print "header count: $header_count\n";
if (@sequence){
@temp_output=&finalize_sequence($critical,$header[-2],@sequence);
for (@temp_output){
push(@final_output,$_);
}
}
undef @sequence;
}elsif ($_ =~ /[a-zA-Z]/){
# print "found sequence:\n$_\n";
push(@sequence,$_);
}
}
@temp_output=&finalize_sequence($critical,$header[-1],@sequence);
for (@temp_output){
push(@final_output,$_);
}
# print "result:\n";
open (OUT,">$output_filename") or die $!;
for(@final_output){
# print "$_\n";
print OUT "$_\n";
}
close REF;
close OUT;
}
sub finalize_sequence{
my $critical=shift(@_);
my $header=shift(@_);
my $full_sequence=join("",@_);
my $factor;
my @output;
if (!$critical){
$factor=0;
}else{
$factor=ceil(length($full_sequence)/$critical);
}
if ($factor == 1){
push(@output,$header);
push(@output,$full_sequence);
}else{ #too long
print "\nreference is too long for mirabait to be handled in one go -> will be split into sub-sequences\n";
$header=substr $header, 1;
for (my $i=0; $i<$factor; $i++){
unless ((length(substr $full_sequence, $i*$critical, $critical+31)-31)<0){
push(@output,">sub$i\_" .$header);
push(@output,substr $full_sequence, $i*$critical, $critical+31);
}
}
}
return @output;
}
sub clean {
my $interval = shift;
my $cur = shift;
my $dir = $cur-$interval;
my $path=abs_path;
if (-d "$path/../iteration$dir"){
print "\nnow removing directory iteration$dir\n";
rmtree ("$path/../iteration$dir") or die $!;
}
}
| chrishah/MITObim | old/MITObim_1.6.pl | Perl | mit | 32,002 |
#!/usr/bin/perl -w
use strict;
my $tests = 0;
my $out = '.';
my($prefix, $filename, $out_tests, $inline, $include, $tests_name, $header_guard, $lowercase_status, $no_usage);
my $usage = "usage: $0 [--out=<dir>] [--filename=<name>] [--out-tests=<dir>] [--with-tests=<testname>] [--without-usage] [--include=<include>] [--inline=<inline_func>] [--header-guard=<headerguard>] [--lowercase-status] <prefix>\n";
sub die_usage() { die $usage; }
foreach my $a (@ARGV) {
my $arg = $a;
if ($arg =~ /^--with-tests/) {
$tests = 1;
if ($arg =~ s/^--with-tests=//) {
$tests_name = $arg;
}
}
elsif ($arg =~ s/^--out=//) {
$out = $arg;
}
elsif ($arg =~ s/^--filename=//) {
$filename = $arg;
}
elsif ($arg =~ s/^--out-tests=//) {
$out_tests = $arg;
}
elsif ($arg =~ s/^--include=//) {
$include = $arg;
}
elsif ($arg =~ s/^--inline=//) {
$inline = $arg;
}
elsif ($arg =~ s/^--header-guard=//) {
$header_guard = $arg;
}
elsif ($arg !~ /^--/ && ! $prefix) {
$prefix = $arg;
}
elsif ($arg eq '--lowercase-status') {
$lowercase_status = 1;
}
elsif ($arg eq '--without-usage') {
$no_usage = 1;
}
elsif ($arg eq '--help') {
print STDOUT $usage;
exit;
}
else {
print STDERR "$0: unknown argument: $arg\n";
die_usage();
}
}
die_usage() unless $prefix;
$filename = $prefix unless($filename);
my $filename_upper = $filename;
$filename_upper =~ tr/a-z/A-Z/;
my $prefix_upper = $prefix;
$prefix_upper =~ tr/a-z/A-Z/;
$header_guard = "${filename_upper}_H" unless($header_guard);
translate("adopt.c", "${out}/${filename}.c");
translate("adopt.h", "${out}/${filename}.h");
if ($tests)
{
$out_tests = $out unless($out_tests);
if ($tests_name) {
$tests_name =~ s/::/_/g;
} else {
$tests_name = $prefix;
}
my $tests_filename = $tests_name;
$tests_filename =~ s/.*_//;
translate("tests/adopt.c", "${out_tests}/${tests_filename}.c");
}
sub translate {
my($in, $out) = @_;
open(IN, $in) || die "$0: could not open ${in}: $!\n";
my $contents = join('', <IN>);
close(IN);
$contents =~ s/\n\/\*\*\n( \*[^\n]*\n)* \*\/\nint adopt_usage_fprint\(.*?\);\n//s
if ($no_usage);
$contents =~ s/\nint adopt_usage_fprint.*}\n//s
if ($no_usage);
$contents =~ s/test_adopt__/test_${filename}__/g;
# if a prefix becomes foo_opt, we want to rewrite adopt_opt specially
# to avoid it becoming foo_opt_opt
$contents =~ s/adopt_opt/${prefix}/g if ($prefix =~ /_opt$/);
$contents =~ s/ifndef ADOPT_H/ifndef ${header_guard}/g;
$contents =~ s/define ADOPT_H/define ${header_guard}/g;
$contents =~ s/endif \/\* ADOPT_H/endif \/* ${header_guard}/g;
$contents =~ s/adopt\.h/${filename}\.h/g;
$contents =~ s/adopt_/${prefix}_/g;
$contents =~ s/ADOPT_/${prefix_upper}_/g;
$contents =~ s/fprintf\(file, "([A-Z])/fprintf\(file, "\l$1/g if($lowercase_status);
if ($include) {
$contents =~ s/^(#include "opt.h")$/#include "${include}"\n$1/mg;
}
if ($inline) {
$contents =~ s/^INLINE/${inline}/mg;
$contents =~ s/\n#ifdef _MSC_VER\n.*\n#endif\n//sg;
}
if ($tests) {
$contents =~ s/test_adopt__/test_${tests_name}__/g;
}
$contents =~ s/\n \*\/\n/\n *\n * THIS FILE IS AUTOMATICALLY GENERATED; DO NOT EDIT.\n *\n * This file was produced by using the `rename.pl` script included with\n * adopt. The command-line specified was:\n *\n * $0 @ARGV\n *\/\n/s;
# $contents =~ s/\n\n/\n\n\/*\n * THIS FILE IS AUTOMATICALLY GENERATED; DO NOT EDIT.\n *\n * This file was produced by using the `rename.pl` script included with\n * adopt. The command-line specified was:\n *\n * $0 @ARGV\n *\/\n\n/s;
open(OUT, '>' . $out) || die "$0: could not open ${out}: $!\n";
print OUT $contents;
close(OUT);
}
| ethomson/adopt | rename.pl | Perl | mit | 3,673 |
#!/usr/bin/env perl
use strict;
use warnings;
use v5.20;
say "do you respect the amanals";
while(1) {
print "huh do you? (type 'yes' or 'no'):";
my $name = <>;
chomp $name;
if ($name eq 'yes') {
say "ok hippy bro";
last;
} elsif ($name eq 'no') {
say 'im going to hunt you down with my hippy gun';
last;
} else {
say "dude, you didn't type 'yes' or 'no'! Try again";
}
}
| neybar/seth_programming_meritbadge | perl_seth/example.pl | Perl | mit | 445 |
package ORDER::CSV;
use strict;
use Text::CSV_XS;
use Data::Dumper;
sub as_csv {
my ($O2) = @_;
my $OUT = '';
my ($csv) = Text::CSV_XS->new({});
## order header
my @header = ();
my $order_id = $O2->oid();
# supplier_order_id is usually the same as the source order id.
# but it COULD be something different, it's not order_id.
if ($O2->is_supplier_order()) {
$order_id = $O2->supplier_orderid();
}
@header = ();
push @header, "HORDER";
push @header, "ORDERID";
push @header, 'bill/firstname';
push @header, 'bill/lastname';
push @header, 'bill/address1';
push @header, 'bill/address2';
push @header, 'bill/city';
push @header, 'bill/region';
push @header, 'bill/postal';
push @header, 'bill/email';
push @header, 'bill/countrycode';
push @header, 'ship/firstname';
push @header, 'ship/lastname';
push @header, 'ship/address1';
push @header, 'ship/address2';
push @header, 'ship/city';
push @header, 'ship/region';
push @header, 'ship/postal';
push @header, 'ship/country';
push @header, 'ship/phone';
push @header, 'flow/pool';
push @header, 'flow/payment_status';
push @header, 'sum/order_total';
push @header, 'want/order_notes';
push @header, 'want/referred_by';
my @line = ();
foreach my $k (@header) {
if ($k eq 'HORDER') {
push @line, 'ORDER';
}
elsif ($k eq 'ORDERID') {
push @line, $order_id;
}
else {
push @line, $O2->pr_get($k);
}
}
my $status = $csv->combine(@header);
$OUT .= $csv->string()."\r\n";
$status = $csv->combine(@line);
$OUT .= $csv->string()."\r\n";
##
## order items
##
@header = ();
push @header, "HITEM";
push @header, "ORDERID";
push @header, "stid";
push @header, "sku";
push @header, "mfgid";
push @header, "description";
push @header, "qty";
push @header, "price";
push @header, "cost";
push @header, "mkt";
push @header, "mktid";
push @header, "asm_master";
push @header, "%zoovy:prod_mfg";
push @header, "%zoovy:prod_mfgid";
$status = $csv->combine(@header);
$OUT .= $csv->string()."\r\n";
foreach my $item (@{$O2->stuff2()->items()}) {
@line = ();
foreach my $k (@header) {
if ($k eq 'HITEM') {
push @line, "ITEM";
}
elsif ($k eq 'ORDERID') {
push @line, $order_id;
}
elsif (substr($k,0,1) eq '%') {
push @line, $item->{'%attribs'}->{substr($k,1)};
}
else {
push @line, $item->{$k};
}
}
$status = $csv->combine(@line);
$OUT .= $csv->string()."\r\n";
}
##
## order events
##
if (scalar(@{$O2->history()})>0) {
@header = ();
push @header, "HEVENT";
push @header, "ORDERID";
push @header, "uuid";
push @header, "ts";
push @header, "etype";
push @header, "luser";
push @header, "content";
$status = $csv->combine(@header);
$OUT .= $csv->string()."\r\n";
foreach my $e (@{$O2->history()}) {
@line = ();
foreach my $k (@header) {
if ($k eq 'HEVENT') {
push @line, "EVENT";
}
elsif ($k eq 'ORDERID') {
push @line, $order_id;
}
else {
push @line, $e->{$k};
}
}
$status = $csv->combine(@line);
$OUT .= $csv->string()."\r\n";
}
}
##
## order tracking.
##
if (scalar(@{$O2->tracking()})>0) {
@header = ();
push @header, "HTRACK";
push @header, "ORDERID";
push @header, 'carrier';
push @header, 'created';
push @header, 'cost';
push @header, 'actualwt';
push @header, 'track';
push @header, 'content';
push @header, 'void';
push @header, 'ins';
push @header, 'dv';
push @header, 'notes';
$status = $csv->combine(@header);
$OUT .= $csv->string()."\r\n";
foreach my $trk (@{$O2->tracking()}) {
@line = ();
foreach my $k (@header) {
if ($k eq 'HTRACK') {
push @line, "TRACK";
}
elsif ($k eq 'ORDERID') {
push @line, $order_id;
}
else {
push @line, $trk->{$k};
}
}
$status = $csv->combine(@line);
$OUT .= $csv->string()."\r\n";
}
}
return($OUT);
}
1; | CommerceRack/backend | lib/ORDER/CSV.pm | Perl | mit | 3,940 |
package SGN::Test::WWW::Mechanize;
=head1 NAME
SGN::Test::WWW::Mechanize - subclass of
L<Test::WWW::Mechanize::Catalyst> with some SGN-specific convenience
=head1 SYNOPSIS
use 't/lib';
# optional skip_cgi import argument, skips loading CGIs if you
# don't need them, speeding up your test.
use SGN::Test::WWW::Mechanize skip_cgi => 1;
my $mech = SGN::Test::WWW::Mechanize->new;
# look at some pages
$mech->get_ok( '/organism/sol100/view' );
$mech->content_contains('SOL100 Organisms');
$mech->content_contains('presents a summary');
$mech->content_contains('click on an organism name');
$mech->content_lacks('Add to Tree','not logged in, does not have a form for adding an organism');
# You can use XPath selectors on $mech to find things
# see WWW::Mechanize::TreeBuilder and HTML::TreeBuilder::XPath for more info
my $value = $mech->findvalue( '/html/body//span[@class="sequence"]');
# do some tests while logged in as a temporary user
$mech->while_logged_in( { user_type => 'curator' }, sub {
$mech->get_ok( '/organism/sol100/view' );
$mech->content_contains( 'Authorized user', 'now says authorized user' );
$mech->content_contains( 'Add a SOL100 organism', 'now has an adding form' );
$mech->submit_form_ok({
form_name => 'sol100_add_form',
fields => { species => $test_organism->species },
}, 'submitted add organism form');
});
# run tests that require a certain level of access to the
# application (see "TEST LEVELS" below)
$mech->with_test_level( local => sub {
my $c = $mech->context;
my $dbh = $c->dbc->dbh;
my $check_data = $dbh->selectall_arrayref('....');
$mech->get_ok( '/organism/sol100/view' );
});
=head1 IMPORT ARGUMENTS
=head2 skip_cgi
use SGN::Test::WWW::Mechanize skip_cgi => 1;
If passed and set to true, skips loading legacy CGI scripts. Use this
to speed up your test's running if your test does not use the old
CGIs. Note that currently login-based tests still need the CGIs.
=head1 TEST LEVELS
This module introduces the concept of B<test levels>, which correspond
to how much access to the app's underlying files, databases, and
program state the test code is expected to have.
The following test levels are defined:
=head2 remote
The app and the tests are running on different hosts. The only
means of interaction is via remote requests.
This level is in effect if both SGN_TEST_SERVER and SGN_TEST_REMOTE are
set to true values.
=head2 local
The app and the tests are running on the same host, and with the same
configuration data, as this test code. Facilities under C<remote> are
available, plus files and databases can be accessed via the context
object, given by $mech->context.
This level is in effect if SGN_TEST_SERVER is set, but SGN_TEST_REMOTE
is not set, or false.
=head2 process
The app and the tests are running in the same process. Facilities under
C<local> are available, plus the app's in-memory state and
configuration can be accessed directly from the context object.
This level is in effect if no SGN_TEST_SERVER environment variable is
set.
=head1 SEE ALSO
This class inherits from all of these:
L<Test::WWW::Mechanize::Catalyst>, L<Test::WWW::Mechanize>,
L<WWW::Mechanize>
It also does the L<WWW::Mechanize::TreeBuilder> role, with a tree_class of L<HTML::TreeBuilder::XPath>.
=head1 ATTRIBUTES
=cut
use Moose;
use namespace::autoclean;
BEGIN {
$ENV{SGN_TEST_MODE} = 1;
$ENV{CATALYST_SERVER} = $ENV{SGN_TEST_SERVER};
}
sub import {
my ( $class, %args ) = @_;
for( keys %args ) {
{ skip_cgi => 1 }->{$_} or die "invalid arg $_";
}
$ENV{SGN_SKIP_CGI} = 1 if $args{skip_cgi};
}
use Carp;
use Test::More;
use Data::UUID ();
my $host_uuid = Data::UUID->new->create_str;
use HTML::Lint;
use Try::Tiny;
use CXGN::People::Person;
use CXGN::People::Login;
use SGN::Devel::MyDevLibs;
extends 'Test::WWW::Mechanize::Catalyst';
with 'WWW::Mechanize::TreeBuilder' => {
tree_class => 'HTML::TreeBuilder::XPath'
};
sub BUILD {
# increase timeout to 6 minutes. remember that this mech is
# actually a distant child of LWP::UserAgent!
shift->timeout( 360 );
}
=head2 catalyst_app
The name of the app under test. Defaults to 'SGN'.
=cut
has '+catalyst_app' => ( default => 'SGN' );
=head2 context
A context object for the app under test. Only available under
C<local> or C<process> testing levels.
Under the C<process> test level, this will be the Catalyst app class
(same as C<catalyst_app> above). Under C<local> testing, this will be
an L<SGN::Context>. Under C<remote> testing, this will throw an
exception.
=cut
has 'context' => (
is => 'ro',
lazy_build => 1,
); sub _build_context {
my $self = shift;
if( $self->can_test_level('process') ) {
Class::MOP::load_class($self->catalyst_app );
return $self->catalyst_app;
} elsif($self->can_test_level('local') ) {
require SGN::Context;
return SGN::Context->new;
} else {
confess 'context() should not ever be called at remote test level';
}
}
# private, holds the user we're using for testing
has 'test_user' => (
is => 'rw',
isa => 'HashRef',
predicate => 'has_test_user',
clearer => 'clear_test_user',
);
=head2 test_level
Read-only attribute to give the current testing level, one of 'remote',
'local', or 'process'.
=cut
sub test_level {
return 'process' if ! $ENV{SGN_TEST_SERVER};
return 'remote' if $ENV{SGN_TEST_REMOTE};
return 'local';
}
=head1 METHODS
=head2 can_test_level
Takes single test level name, returns true if the current testing
level is at least the given level.
Example:
if( $mech->can_test_level('local') ) {
test_local_stuff();
}
=cut
sub can_test_level {
my ( $self, $check ) = @_;
my %val = ( remote => 0, local => 1, process => 2 );
confess "invalid test level '$check'" unless exists $val{$check};
return $val{ $self->test_level } >= $val{ $check };
}
=head2 with_test_level
Run the subroutine if the test level is at least the given level, or
output a skip if not. Takes an optional test count after the sub.
Example:
$mech->with_test_level( local => sub {
}, $optional_test_count );
=cut
sub with_test_level {
my ( $self, $need_level, $sub, $count ) = @_;
SKIP: {
skip( "tests that require $need_level-level access, current level is ".$self->test_level, ( $count || 1 ) )
unless $self->can_test_level( $need_level );
$sub->( $self );
}
}
=head2 dbh_leak_ok
Call immediately after a get_ok() to re-fetch the same URL, checking
the database connection count before and after the GET.
If the connection count after the second fetch is greater than before
the fetch, the test fails.
Skips if the current test level does not support a leak check.
=cut
sub dbh_leak_ok {
my $self = shift;
my $test_name = shift || '';
$test_name .= ' ' if $test_name;
SKIP: {
$ENV{SGN_PARALLEL_TESTING}
and skip 'parallel testing, dbh leak check skipped', 1;
$self->with_test_level( local => sub {
my $before = $self->_db_connection_count;
my $url = $self->base;
$self->get( $url );
my $after = $self->_db_connection_count;
cmp_ok( $after, '<=', $before, "did not leak any database connections: $test_name($url)");
}, 1 );
}
}
sub _db_connection_count {
my ($mech) = @_;
my $dbh = DBI->connect( @{ $mech->context->dbc_profile }{qw{ dsn user password attributes }} );
return $dbh->selectcol_arrayref(<<'')->[0] - 1;
select count(*) from pg_stat_activity
}
sub create_test_user {
my $self = shift;
my %props = @_;
local $SIG{__DIE__} = \&Carp::confess;
my %u = (
first_name => 'testfirstname',
last_name => 'testlastname',
user_name => 'testusername',
password => 'testpassword',
user_type => $props{user_type} || 'user',
);
if( $ENV{SGN_PARALLEL_TESTING} ) {
$_ .= "-$host_uuid-$$" for @u{qw{ first_name last_name user_name password }};
#use Data::Dump;
#warn "creating user ".Data::Dump::dump( \%u );
}
$self->_delete_user( \%u );
# generate a new user for testing purposes
# (to be deleted right afterwards)
$self->context->dbc->txn( ping => sub {
my $dbh = $_;
my $p = CXGN::People::Person->new( $dbh );
$p->set_first_name( $u{first_name} );
$p->set_last_name( $u{last_name} );
my $p_id = $p->store();
$u{ 'id' } = $p_id
or die "could not create person $u{first_name} $u{last_name}";
my $login = CXGN::People::Login->new( $dbh, $p_id );
$login->set_username( $u{user_name} );
$login->set_password( $u{password} );
$login->set_user_type( $u{user_type} );
$login->store();
});
$self->test_user(\%u);
}
sub DEMOLISH {
shift->delete_test_user;
}
sub set_test_user_type {
my $self = shift;
CXGN::People::Login
->new( $self->context->dbc->dbh, $self->test_user->{id} )
->set_user_type(shift);
}
sub delete_test_user {
my $self = shift;
# delete our test user from the database if one has been created
if( $self->has_test_user ) {
my $u = $self->test_user;
$self->_delete_user( $u );
}
$self->clear_test_user;
}
sub _delete_user {
my ( $self, $u ) = @_;
my $dbc = $self->context->dbc;
# attempt to delete the user's metadata also, but we may not have
# permission, so be silent if it fails
$dbc->txn( fixup => sub {
#local $_->{RaiseError} = 0;
#local $_->{PrintError} = 0;
my $u_id = CXGN::People::Person->get_person_by_username( $_, $u->{user_name} );
$_->do( <<'', undef, $u_id, $u_id );
DELETE FROM metadata.md_metadata WHERE create_person_id = ? OR modified_person_id = ?
$_->do( <<'', undef, $u_id);
DELETE FROM metadata.md_image WHERE sp_person_id=?
$_->do( <<'', undef, $u_id);
DELETE FROM phenome.locusgroup_member WHERE sp_person_id=?
$_->do( <<'', undef, $u_id);
DELETE FROM phenome.locusgroup WHERE sp_person_id=?
});
$dbc->txn( ping => sub {
my $dbh = $_;
if ( my $u_id = CXGN::People::Person->get_person_by_username( $dbh, $u->{user_name} ) ) {
CXGN::People::Person->new( $dbh, $u_id )->hard_delete;
}
});
}
=head2 while_logged_in
Execute the given code while logged in. Takes an optional
hash-style list of parameters to set on the temp user that is created.
Args: hash ref of props for the temp user to create,
followed by a subroutine ref to execute while logged in
current supported user properties:
user_type => 'curator', 'sequencer', etc. default 'user'
Ret: nothing meaningful
In addition, the called subroutine is passed a hashref of user
information of the form:
{
first_name => 'testfirstname',
last_name => 'testlastname',
user_name => 'testusername',
password => 'testpassword',
user_type => $props{user_type} || 'user',
id => 34,
}
Example:
$mech->while_logged_in({ user_type => 'curator' }, sub {
my $user_info_hashref = shift;
diag "logged in as user id $user_info_hashref->{id}";
$mech->get_ok( '/organism/sol100/view' );
$mech->content_contains( 'Authorized user', 'now says authorized user' );
});
=cut
sub while_logged_in {
my ($self,$props,$sub) = @_;
croak 'must provide hashref of user props to while_logged_in'
unless ref $props && ref $props eq 'HASH';
$self->with_test_level( local => sub {
$self->create_test_user( %$props );
$self->log_in_ok;
try {
$sub->( $self->test_user );
} catch {
die $_;
} finally {
$self->log_out;
};
});
}
=head2 while_logged_in_all
Execute the given code while logged in for each user_type.
Args: a subroutine ref to execute while logged in
Ret: nothing meaningful
Example:
$mech->while_logged_in_all(sub {
$mech->get_ok( '/organism/sol100/view' );
$mech->content_contains( 'Authorized user', 'now says authorized user' );
});
=cut
sub while_logged_in_all {
my ($self,$sub) = @_;
for ( qw/ user curator submitter sequencer genefamily_editor / ) {
diag "Running tests as $_ user_type";
$self->while_logged_in( { user_type => $_ }, $sub );
}
}
sub log_in_ok {
my ($self) = @_;
$self->get_ok("/solpeople/top-level.pl");
$self->content_contains("Login");
my %form = (
form_name => 'login',
fields => {
username => $self->test_user->{user_name},
pd => $self->test_user->{password},
},
);
$self->submit_form_ok( \%form, "submitted login form" );
$self->content_lacks('Incorrect username', 'did not get "Incorrect username"')
or Test::More::diag $self->content;
$self->content_lacks('Incorrect password','did not get "Incorrect password"')
or Test::More::diag $self->content;
}
sub log_out {
my ($self) = @_;
$self->get_ok( "/solpeople/login.pl?logout=yes", 'logged out' );
}
sub _lint_content_ok {
my $self = shift;
my $desc = shift;
local $Test::Builder::Level = $Test::Builder::Level + 1;
my $lint = HTML::Lint->new;
$lint->only_types( HTML::Lint::Error::STRUCTURE );
$lint->parse( $self->content );
my @errors = $lint->errors;
my $nerrors = @errors;
my $ok;
my $Test = Test::Builder->new;
if ( $nerrors ) {
$ok = $Test->ok( 0, $desc );
$Test->diag( 'HTML::Lint errors for ' . $self->uri );
$Test->diag( $_->as_string ) for @errors;
my $s = $nerrors == 1 ? '' : 's';
$Test->diag( "$nerrors error$s on the page" );
}
else {
$ok = $Test->ok( 1, $desc );
}
return $ok;
}
__PACKAGE__->meta->make_immutable( inline_constructor => 0 );
1;
| solgenomics/sgn | t/lib/SGN/Test/WWW/Mechanize.pm | Perl | mit | 14,275 |
use lib '/';
use Handlers;
use Plex::Daemon;
use Plex::Log;
use YAML::Tiny;
use POSIX;
my $cfg = $ARGV[0] || '/etc/plex/plex.cfg';
my (%children, $children);
my $yaml = YAML::Tiny->new();
$yaml = YAML::Tiny->read( $cfg );
$yaml = $yaml->[0];
opendir(VHOSTS, $yaml->{common}->{document_root});
my @vhosts = readdir(VHOSTS);
close(VHOSTS);
my $log = Plex::Log->new();
my $http = Plex::Daemon->new( LocalPort => $yaml->{port}->{http} ) or $log->log(0,"Couldn't setup sock: $!");
if ($http) {
$log->log(2,"Server Sock $http is there.");
print "Plex Webserver is booted.\n";
}
&daemonize;
&spawn_children;
&keep_ticking;
sub spawn {
my $pid;
my $sigset = POSIX::SigSet->new(SIGINT);
sigprocmask(SIG_BLOCK, $sigset) or die "Can't block SIGINT for fork: $!";
die "Cannot fork child: $!\n" unless defined ($pid = fork);
if ($pid) {
$children{$pid} = 1;
$children++;
warn "forked new child, we now habe $children children";
return;
}
my $i = 0;
while ($i < $yaml->{child}->{lifetime}) {
$i++;
my $client = $http->accept or last;
$client->autoflush(1);
log_message("[CLIENT] ".$client->peerhost."\n");
my $request = $client->get_request(1) or last;
my $url = $request->url->path;
my $host = $request->header('Host');
my $found = 0;
foreach my $vhost (@vhosts) {
if ($host eq $vhost) {
$found = 1;
}
}
if ($found == 0) {
$host = "";
}
my $yep = $yaml->{common}->{document_root}."/$host/".$url;
if (-d $yep) {
opendir(DIR, $yep);
my @files_in_dir = readdir(DIR);
closedir(DIR);
foreach my $file_in_dir (@files_in_dir) {
if ($file_in_dir =~ /index\.(.*)/i) {
my $is_suffix = $1;
foreach my $obj (keys %{$yaml->{filetype}}) {
foreach my $suffix (@{$yaml->{filetype}->{$obj}->{suffix}}) {
if ($is_suffix eq $suffix) {
my $handler = $yaml->{filetype}->{$obj}->{handler};
my $retr = Handlers->$handler($yaml->{common}->{document_root}."/$host/index.".$is_suffix, $client, $request);
my $req = HTTP::Response->new(200);
$req->header("Content-Type",$yaml->{filetype}->{$obj}->{filetype}, "Server", "Plex2");
$req->content( $retr);
$client->send_response($req);
exit;
}
}
}
}
}
} elsif (-e $yep) {
if ($yep =~ /\/(.*?)\.(.*)/) {
my $is_suffix = $2;
foreach my $obj (keys %{$yaml->{filetype}}) {
foreach my $suffix (@{$yaml->{filetype}->{$obj}->{suffix}}) {
if ($is_suffix eq $suffix) {
my $handler = $yaml->{filetype}->{$obj}->{handler};
my $retr = Handlers->$handler($yep, $client, $request);
my $req = HTTP::Response->new(200);
$req->header("Content-Type",$yaml->{filetype}->{$obj}->{filetype}, "Server", "Plex2");
$req->content( $retr );
$client->send_response( $req );
$log->log(1, "[200] Delivered File ".$request->url->path);
exit;
}
}
}
}
} else {
my $req = HTTP::Response->new(404);
$req->header("Content-Type","text/html","Server","Plex2");
$req->content("No such file or directory.");
$client->send_response( $req );
$log->log(1, "[404] Couldn't find ".$request->url->path);
}
$client->close;
}
warn "child terminated after $i requests";
exit;
}
sub keep_ticking {
while (1) {
for (my $i = $children; $i < $yaml->{child}->{total}; $i++) {
&spawn;
}
};
}
sub spawn_children {
for (1..$yaml->{child}->{total}) {
&spawn;
}
}
sub reaper {
my $stiff;
while (($stiff = waitpid(-1, &WHOHANG)) > 0) {
warn("child $stiff terminated -- status $?");
$children--;
delete $children{$stiff};
}
$SIG{CHLD} = \&reaper;
}
sub daemonize {
my $pid = fork;
defined ($pid) or die "Cannot start daemon: $!";
print "Parent Daemon running.\n" if $pid;
exit if $pid;
POSIX::setsid();
close (STDOUT);
close (STDIN);
close (STDERR);
$SIG{__WARN__} = sub {
&log_message("NOTE! " . join(" ", @_));
};
$SIG{__DIE__} = sub {
&log_message("FATAL! " . join(" ", @_));
exit;
};
$SIG{HUP} = $SIG{INT} = $SIG{TERM} = sub {
my $sig = shift;
$SIG{$sig} = 'IGNORE';
kill 'INT' => keys %children;
die "killed by $sig\n";
exit;
};
$SIG{CHLD} = \&reaper;
}
sub log_message {
my $text = shift;
open(LOG, ">>".$y->{common}->{logfile});
print LOG "$text\n";
close (LOG);
}
| zulla/plex | src/httpd.pl | Perl | mit | 5,395 |
# =========================================================================
# Formatting: Expand tabs, 4 spaces per indent level.
# =========================================================================
=pod
=head1 NAME
Adobe::Codex2::Types::role - Define Codex type data.
=head1 DESCRIPTION
See POD from Adobe::Codex2.pm for details on using the Codex modules.
=head2 XML
<xs:element name="role">
<xs:complexType>
<xs:attribute name="name" type="xs:string" use="required"/>
<xs:attribute name="description" type="xs:string" use="required"/>
<xs:attribute name="id" type="xs:integer" use="required"/>
</xs:complexType>
</xs:element>
=head1 SUBROUTINES/METHODS
=cut
# -------------------------------------------------------------------------
# Module setup.
# -------------------------------------------------------------------------
package Adobe::Codex2::Types::role;
require 5.8.0;
use strict;
use warnings;
use base qw(Exporter);
our $VERSION = "0.2.0";
our @ISA = qw();
our @EXPORT = qw();
our @EXPORT_OK = qw(&Data);
our %EXPORT_TAGS = ();
# Get Perl modules.
# None.
# Get Codex modules.
# None.
# -------------------------------------------------------------------------
# Data
# -------------------------------------------------------------------------
sub Data
{
# Get arguments.
my $Data = shift;
# Check arguments.
# TODO
# Check data.
# TODO
# Storage for results.
my %role;
# Process data.
$role{'id'} = $Data->{'id'};
$role{'name'} = $Data->{'name'};
$role{'description'} = $Data->{'description'};
# Return results.
return %role;
}
1;
__END__
=pod
=head1 AUTHOR
Dave Foglesong (fogleson@adobe.com)
=head1 COPYRIGHT
Copyright 2008 Adobe Systems Incorporated. All rights reserved.
=cut
| brycelelbach/asl | tools/adobe/Codex2/Types/role.pm | Perl | mit | 1,876 |
package Statistics::Basic::StdDev;
use strict;
use warnings;
use Carp;
use base 'Statistics::Basic::_OneVectorBase';
sub new {
my $class = shift;
warn "[new $class]\n" if $Statistics::Basic::DEBUG >= 2;
my $this = bless {}, $class;
my $variance = $this->{V} = eval { Statistics::Basic::Variance->new(@_) } or croak $@;
my $vector = $this->{v} = $variance->query_vector;
my $c = $vector->_get_computer( 'stddev' ); return $c if defined $c;
$vector->_set_computer( stddev => $this );
return $this;
}
sub _recalc {
my $this = shift;
my $first = shift;
delete $this->{recalc_needed};
my $var = $this->{V}->query;
return unless defined $var;
# no need to query filled here, variance does it for us
warn "[recalc " . ref($this) . "] sqrt( $var )\n" if $Statistics::Basic::DEBUG;
$this->{_value} = sqrt( $var );
return;
}
sub query_mean {
my $this = shift;
return $this->{V}->query_mean;
}
1;
| ppapasaikas/SANJUAN | perllib/Statistics/Basic/StdDev.pm | Perl | mit | 993 |
#!/usr/bin/perl
require 'quickies.pl'
($User,$Planet,$AuthCode)=split(/&/,$ENV{QUERY_STRING});
$PlanetDir = $MasterPath . "/se/Planets/$Planet";
$UserDir = "$PlanetDir/users/$User";
if (-e "$UserDir/Dead.txt") {
print "Location: http://www.bluewand.com/cgi-bin/classic/Dead.pl?$User&$Planet&$AuthCode\n\n";
die;
}
if (-e "$UserDir/dupe.txt") {
print "<SCRIPT>alert(\"Your nation has been locked down for security reasons. Please contact the Bluewand Entertainment team at shattered.empires\@canada.com for details.\");history.back();</SCRIPT>";
die;
}
if (-e "$UserDir/notallowed.txt") {
print "<SCRIPT>alert(\"Your nation has been taken off-line temporarily. Please contact the GSD team for details.\");history.back();</SCRIPT>";
$flags = 1;
die;
}
print "Content-type: text/html\n\n";
$user_information = $MasterPath . "/User Information";
dbmopen(%authCode, "$user_information/accesscode", 0777);
if(($AuthCode ne $authCode{$User}) || ($AuthCode eq "")){
print "<SCRIPT>alert(\"Security Failure. Please notify the GSD team immediately.\");history.back();</SCRIPT>";
die;
}
dbmclose(%authCode);
&parse_form;
$PlayerDir = $MasterPath . "/se/Planets/$Planet/users";
$TargetDir = $MasterPath . "/se/Planets/$Planet/users/$data{'target'}";
$SF = qq!<font face=verdana size=-1>!;
$Target = $data{'target'};
$Target =~ tr/_/ /;
if (-e "$TargetDir/alliance.txt") {
open (IN, "$TargetDir/alliance.txt");
$AllianceName = <IN>;
close (IN);
chop ($AllianceName);
$AllianceName =~ tr/_/ /;
} else {
$AllianceName = "None";
}
open (IN, "$TargetDir/country.txt");
flock (IN, 1);
@countrydata2 = <IN>;
close (IN);
&chopper (@countrydata2);
$DefTotalSize = @countrydata2[8];
open (IN, "$UserDir/country.txt") or print $!;
flock (IN, 1);
@countrydata = <IN>;
close (IN);
&chopper (@countrydata);
$AttTotalSize = @countrydata[8];
open (IN, "$TargetDir/userinfo.txt");
@Values = <IN>;
close (IN);
&chopper (@Values);
open (IN, "$TargetDir/City.txt");
@City = <IN>;
close (IN);
&chopper (@City);
$Count = scalar(@City);
open (IN, "$PlayerDir/Retal.txt");
flock (IN, 1);
@DataIn = <IN>;
close (IN);
&chopper (@DataIn);
foreach $Line (@DataIn) {
($AgrCountry, $TurnsToAttack) = split (/,/, $Line);
$AttackHash{$AgrCountry} = $TurnsToAttack;
}
open (IN, "$TargetDir/military.txt");
$Def = <IN>;
close (IN);
chop ($Def);
if ($Def == 2) {$Def = "Red"}
if ($Def == 1) {$Def = "Yellow"}
if ($Def == 0 or $Def eq "") {$Def = "Green"}
open (IN, "$TargetDir/continent.txt");
$Continent = <IN>;
close (IN);
if (@Values[5] eq "CA") {$GT = "Capitalist "}
if (@Values[5] eq "FA") {$GT = "Facist "}
if (@Values[5] eq "CO") {$GT = "Socialist "}
if (@Values[5] eq "ME") {$GT = "Mercantalist "}
if (@Values[4] eq "DE") {$GT .= "Democracy"}
if (@Values[4] eq "MO") {$GT .= "Monarchy"}
if (@Values[4] eq "DI") {$GT .= "Dictatorship"}
if (@Values[4] eq "TH") {$GT .= "Theocracy"}
if (@Values[4] eq "RE") {$GT .= "Republic"}
if ((($AttTotalSize * .75) > $DefTotalSize) || (($DefTotalSize * .75) > $AttTotalSize)) {
$WarnMessage = qq!Attacking this nation <B>will</b> incur the wrath of the UWG.!;
}
print qqÞ
<body bgcolor=000000 text=white>$SF
<SCRIPT>parent.frames[1].location.reload()</SCRIPT>
<table width=100% border=1 cellspacing=0><TR><TD bgcolor=333333>$SF<B><Center>Attack Selection</TD></TR></table>
<BR><Center><font color=red>$WarnMessage</font><BR><BR>Please Select A Target City<BR></center><BR>
<form method=POST action="http://www.bluewand.com/cgi-bin/classic/war3.pl?$User&$Planet&$AuthCode&$data{'target'}"><Center>
<table width=100% border=1 cellspacing=0 bgcolor=666666>
<TR><TD bgcolor="#333333" colspan=4>$SF<center>Target Information</TD></TR>
<TR><TD bgcolor="#333333" width=25%>$SF Target Name:</TD><TD bgcolor="#666666" width=25%>$SF$Target</TD><TD bgcolor="#333333" width=25%>$SF Alliance:</TD><TD width=25% bgcolor=#666666>$SF $AllianceName</TD></TR>
<TR><TD bgcolor="#333333">$SF Government:</TD><TD bgcolor="#666666">$SF$GT</TD><TD bgcolor="#333333">$SF Defense Condition:</TD><TD bgcolor="#666666">$SF$Def</TD></TR>
<TR><TD bgcolor="#333333">$SF Cities:</TD><TD bgcolor="#666666">$SF$Count</TD><TD bgcolor="#333333">$SF Continent:</TD><TD bgcolor="#666666">$SF$Continent</TD></TR>
</table>
<BR><BR>
</center>
<Table width=100% border=1 cellspacing=0>
<TR bgcolor="#333333"><TD>$SF City Name</TD><TD>$SF Continent</TD><TD>$SF Size</TD><TD>$SF Population</TD><TD>$SF Worth</TD><TD>$SF Target</TD></TR>
Þ;
foreach $Item (@City) {
($Name,$Population,$Status,$Contint,$Acceptance,$Feature,$Hospitals,$Barracks,$Agriculture,$Ag,$Commercial,$Co,$Industrial,$In,$Residential,$Re,$LandSize,$FormerOwner,$CityPlanet,$Worth,$Modern,$CityType,$Schools,$PercentLeft,$TurnsLeft) = split(/\|/, $Item);
if ($TurnsLeft > $AttackLevel) {$AttackLevel = $TurnsLeft;}
}
if ($AttackLevel < 1) {$AttackLevel = 0;}
foreach $Item (@City) {
($Name,$Population,$Status,$Contint,$Acceptance,$Feature,$Hospitals,$Barracks,$Agriculture,$Ag,$Commercial,$Co,$Industrial,$In,$Residential,$Re,$LandSize,$FormerOwner,$CityPlanet,$Worth,$Modern,$CityType,$Schools,$PercentLeft,$TurnsLeft) = split (/\|/, $Item);
$Size = &Space($Re + $Ag + $Co + $In);
$People = &Space($Population);
$Worth = &Space($Worth);
$Name2 = $Name;
$Name2 =~ tr/ /_/;
if ($TurnsLeft == $AttackLevel) {
print qqÞ<TR bgcolor="#666666"><TD>$SF$Name</TD><TD>$SF$Contint</TD><TD>$SF $Size</TD><TD>$SF$People</TD><TD>$SF \$$Worth</TD><TD>$SF Attack <input type=radio name="City" value="$Name2"></TD></TR>Þ;
}
}
print qq!
</table><BR><BR>
$Levels
<center><input type=submit value="Select City" name=city></form>
!;
if (-e "$PlayerDir/alliance.txt") {
open (IN, "$PlayerDir/alliance.txt");
$AllianceName = <IN>;
close (IN);
chop ($AllianceName);
print qqÞ
</table><BR><BR>
<Table width=80% border=1 cellspacing=0>
<TR bgcolor="#999999"><TD>$SF Allied Army Name</TD><TD>$SF Personnel</TD><TD>$SF Cost</TD><TD>$SF Use</TD></TR>Þ;
print qqÞ
</table>
Þ;
}
sub parse_form {
# Get the input
read(STDIN, $buffer, $ENV{'CONTENT_LENGTH'});
# Split the name-value pairs
@pairs = split(/&/, $buffer);
foreach $pair (@pairs) {
($name, $value) = split(/=/, $pair);
# Un-Webify plus signs and %-encoding
$value =~ tr/+/ /;
$value =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
$value =~ s/<!--(.|\n)*-->//g;
$value =~ s/<([^>]|\n)*>//g;
$data{$name} = $value;
}
}
#sub chopper{
# foreach $k(@_){
# chop($k);
# }
#}
#
#sub Space {
# local($_) = @_;
# 1 while s/^(-?\d+)(\d{3})/$1 $2/;
# return $_;
#}
| cpraught/shattered-empires | war2.pl | Perl | mit | 6,574 |
package T2t::UserPreferencesCache;
use strict;
use warnings;
use T2t::Utilities;
our $instance = undef;
sub _new
{
my $package = shift;
my $self = {};
$self->{skipFields} = 'zz';
$self->{header} = 0;
$self->{debug} = 0;
$self->{emptyRowCellAtts} = "";
$self->{quiet} = 0;
$self->{verbose} = 0;
$self->{overwrite} = 1;
$self->{addNewLineToCell} = 0;
$self->{nbsp} = 0;
$self->{xhtml} = 0;
$self->{addSpace} = 0;
$self->{limit} = -1; # no limit by default
$self->{'highlightColumn.color'} = '';
$self->{'highlightColumn.number'} = '';
$self->{removeEmptyRows} = 0;
$self->{templateFile} = undef;
$self->{ledgerColors} = [];
$self->{cellAlignments} = [];
$self->{cellWidths} = [];
$self->{cellAtts} = {};
$self->{tableAtts} = {};
$self->{rowAtts} = {};
$self->{bodyAtts} = {};
bless( $self, $package );
}
sub getInstance
{
$instance = _new T2t::UserPreferencesCache() if ! defined $instance;
$instance;
}
sub parseAttributes
{
my $self = shift;
my $prefs = shift;
foreach my $key ( sort keys %{$prefs} )
{
$self->saveOption($prefs, $key );
}
# for all of these options, only set them if a value is being passed in
$self->setTemplateFile( $prefs->{'general.template'} ) if defined $prefs->{'general.template'};
$self->setDelimiter( $prefs->{'general.delim'} ) if defined $prefs->{'general.delim'};
$self->setDebug( $prefs->{'general.debug'} ) if defined $prefs->{'general.debug'};
$self->setQuiet( $prefs->{'general.quiet'} ) if defined $prefs->{'general.quiet'};
$self->setVerbose( $prefs->{'general.verbose'} ) if defined $prefs->{'general.verbose'};
$self->setOverwrite( $prefs->{'general.overwrite'} ) if defined $prefs->{'general.overwrite'};
$self->setSkipFields( $prefs->{'general.skipFields'} ) if defined $prefs->{'general.skipFields'};
$self->setAddNewLineToCell( $prefs->{'general.addNewLine'} ) if defined $prefs->{'general.addNewLine'};
$self->setNbsp( $prefs->{'general.nbsp'} ) if defined $prefs->{'general.nbsp'};
$self->setHeader( $prefs->{'general.header'} ) if defined $prefs->{'general.header'};
$self->setEmptyRowCellAtts( $prefs->{'general.emptyRowCellAtt'} ) if defined $prefs->{'general.emptyRowCellAtt'};
$self->setAddSpace( $prefs->{'general.addSpace'} ) if defined $prefs->{'general.addSpace'};
$self->setLimit( $prefs->{'general.limit'} ) if defined $prefs->{'general.limit'};
$self->setTablesOnly( $prefs->{'general.tablesOnly'} ) if defined $prefs->{'general.tablesOnly'};
$self->setDashes( $prefs->{'general.dashes'} ) if defined $prefs->{'general.dashes'};
$self->setSqueeze( $prefs->{'general.squeeze'} ) if defined $prefs->{'general.squeeze'};
$self->setOneTable( $prefs->{'general.oneTable'} ) if defined $prefs->{'general.oneTable'};
$self->setTitle( $prefs->{'general.title'} ) if defined $prefs->{'general.title'};
$self->setCreateH1( $prefs->{'general.createH1'} ) if defined $prefs->{'general.createH1'};
$self->setEqualColumns( $prefs->{'general.equalColumns'} ) if defined $prefs->{'general.equalColumns'};
$self->setRemoveEmptyRows( $prefs->{'general.removeEmptyRows'} ) if defined $prefs->{'general.removeEmptyRow'};
$self->setLedgerColors( $prefs->{'table.ledgerColors'} ) if defined $prefs->{'table.ledgerColors'};
$self->setCellWidths( $prefs->{'table.cellWidths'} ) if defined $prefs->{'table.cellWidths'};
$self->setCellAlignments( $prefs->{'table.cellAlignments'} ) if defined $prefs->{'table.cellAlignments'};
$self->setHighlightColumnColor( $prefs->{'table.highlightColumn.color'} ) if( $prefs->{'table.highlightColumn.color'} );
$self->setHighlightColumnNumber( $prefs->{'table.highlightColumn.number'} ) if( $prefs->{'table.highlightColumn.number'} );
}
sub saveOption
{
my $self = shift;
my $attributes = shift;
my $key = shift;
local $_ = $key;
s!(.*?)\.!!;
if( $key =~ /^cell/ )
{
$self->{cellAtts}->{$_} = $attributes->{$key};
}
elsif( $key =~ /^table/ )
{
$self->{tableAtts}->{$_} = $attributes->{$key};
}
}
sub getHighlightColumnNumber()
{
my $value = $_[0]->{'highlightColumn.number'};
return $value ? $ value : -1;
}
sub setDelimiter
{
my ($self,$delim) = @_;
$delim = "\t" if ! $delim;
$delim = "\t" if $delim eq '\t';
$self->{delimiter} = $delim;
}
sub getDelimiter { $_[0]->{delimiter}; }
sub getTemplateFile { $_[0]->{templateFile} }
sub setTemplateFile { $_[0]->{templateFile} = $_[1]; }
sub isEqualColumns { $_[0]->{equalColumns}; }
sub setEqualColumns { $_[0]->{equalColumns} = $_[1]; }
sub setTablesOnly { $_[0]->{tablesOnly} = $_[1]; }
sub isTablesOnly { $_[0]->{tablesOnly}; }
sub setDashes { $_[0]->{dashes} = $_[1]; }
sub isDashes { $_[0]->{dashes}; }
sub setSqueeze { $_[0]->{squeeze} = $_[1]; }
sub isSqueeze { $_[0]->{squeeze}; }
sub setOneTable { $_[0]->{oneTable} = $_[1]; }
sub isOneTable { $_[0]->{oneTable}; }
sub setTitle { $_[0]->{title} = $_[1]; }
sub getTitle { $_[0]->{title}; }
sub setCreateH1 { $_[0]->{h1} = $_[1]; }
sub isCreateH1 { $_[0]->{h1}; }
sub setHighlightColumnNumber() { $_[0]->{'highlightColumn.number'} = $_[1]; }
sub getHighlightColumnColor() { $_[0]->{'highlightColumn.color'}; }
sub setHighlightColumnColor() { $_[0]->{'highlightColumn.color'} = $_[1]; }
sub setAddSpace { $_[0]->{addSpace} = $_[1]; }
sub isAddSpace { $_[0]->{addSpace}; }
sub setDebug { $_[0]->{debug} = $_[1]; }
sub isDebug { $_[0]->{debug}; }
sub setQuiet { $_[0]->{quiet} = $_[1]; }
sub isQuiet { $_[0]->{quiet}; }
sub setVerbose { $_[0]->{verbose} = $_[1]; }
sub isVerbose { $_[0]->{verbose}; }
sub setOverwrite { $_[0]->{overwrite} = $_[1]; }
sub isOverwrite { $_[0]->{overwrite}; }
sub isHeader { $_[0]->{header}; }
sub setHeader { $_[0]->{header} = $_[1]; }
sub setLimit { $_[0]->{limit} = $_[1]; }
sub getLimit { $_[0]->{limit}; }
sub isLimitSet
{
my $self = shift;
return $self->getLimit() > 0;
}
sub setSkipFields { $_[0]->{skipFields} = $_[1]; }
sub getSkipFields { $_[0]->{skipFields}; }
sub setEmptyRowCellAtts { $_[0]->{emptyRowCellAtts} = $_[1]; }
sub getEmptyRowCellAtts
{
my $self = shift;
return '' if ! $self->{emptyRowCellAtts};
return $self->{emptyRowCellAtts};
}
sub getAllLedgerColors { $_[0]->{ledgerColors}; }
sub setLedgerColors
{
my $self = shift;
my $text = shift;
return if ! $text;
$self->addLedgerColors( split(',',$text) );
}
sub addLedgerColors
{
my $self = shift;
my @colors = @_;
foreach ( @colors )
{
push( @{$self->{ledgerColors}}, trim($_) );
}
}
sub removeLedgerColors
{
my $self = shift;
splice @{$self->{ledgerColors}};
}
sub getAllCellWidths { $_[0]->{cellWidths}; }
sub getCellWidth
{
my $self = shift;
my $pos = shift;
$self->{cellWidths}->[$pos];
}
sub setCellWidths
{
my $self = shift;
my $text = shift;
return if ! $text;
$self->addCellWidths( split(',',$text) );
}
sub addCellWidths
{
my $self = shift;
my @widths = @_;
foreach ( @widths )
{
push(@{$self->{cellWidths}}, trim($_) );
}
}
sub removeCellWidths
{
my $self = shift;
splice @{$self->{cellWidths}};
}
sub getAllCellAlignments { $_[0]->{cellAlignments}; }
sub getCellAlignment
{
my $self = shift;
my $pos = shift;
$self->{cellAlignments}->[$pos];
}
sub setCellAlignments
{
my $self = shift;
my $text = shift;
return if ! $text;
$self->addCellAlignments( split(',',$text) );
}
sub addCellAlignments
{
my $self = shift;
my @widths = @_;
foreach ( @widths )
{
push(@{$self->{cellAlignments}}, trim($_) );
}
}
sub removeCellAlignments
{
my $self = shift;
splice @{$self->{cellAlignments}};
}
sub setTableAttributes { $_[0]->{tableAtts} = $_[1]; }
sub getTableAttributes
{
my $self = shift;
my %localTableAtts = ();
my %savedAtts = %{$self->{tableAtts}};
foreach ( keys(%savedAtts) )
{
# only return the attibutes that are HTML compliant
if( ! /cellWidths|cellAlignments|ledgerColors|highlightColumn/ )
{
$localTableAtts{$_} = $savedAtts{$_};
}
}
return \%localTableAtts;
}
sub setNbsp { $_[0]->{nbsp} = $_[1]; }
sub isNbsp { $_[0]->{nbsp}; }
sub setCellAttributes { $_[0]->{cellAtts} = $_[1]; }
sub getCellAttributes { $_[0]->{cellAtts}; }
sub setRowAttributes { $_[0]->{rowAtts} = $_[1]; }
sub getRowAttributes { $_[0]->{rowAtts}; }
sub setBodyAttributes { $_[0]->{bodyAtts} = $_[1]; }
sub getBodyAttributes { $_[0]->{bodyAtts}; }
sub setAddNewLineToCell { $_[0]->{addNewLineToCell} = $_[1]; }
sub getAddNewLineToCell { $_[0]->{addNewLineToCell}; }
sub setRemoveEmptyRows { $_[0]->{removeEmptyRows} = $_[1]; }
sub getRemoveEmptyRows { $_[0]->{removeEmptyRows}; }
1;
__END__
=head1 AUTHOR INFORMATION
Copyright 2000-, Steven Scholnick <scholnicks@gmail.com>
t2t is published under MIT. See license.txt for details
| scholnicks/t2t | T2t/UserPreferencesCache.pm | Perl | mit | 9,435 |
# See bottom of file for license and copyright information
package VisDoc::XMLOutputFormatterAllMethods;
use base 'VisDoc::XMLOutputFormatterListingBase';
use strict;
use warnings;
use warnings;
use XML::Writer();
our $URI = 'all-methods';
=pod
=cut
sub _uri {
my ($this) = @_;
return $URI;
}
sub _title {
my ($this) = @_;
return $this->_docTerm('all_methods_title');
}
=pod
_writeList( $xmlWriter ) -> $bool
Create a list of classes.
=cut
sub _writeList {
my ( $this, $inWriter ) = @_;
my $methods;
foreach my $fileData ( @{ $this->{data} } ) {
foreach my $package ( @{ $fileData->{packages} } ) {
next
if !$this->{preferences}->{listPrivate} && !$package->isPublic();
foreach my $function ( @{ $package->{functions} } ) {
next
if !$this->{preferences}->{listPrivate}
&& !$function->isPublic();
my $methodId = $function->getId() . ".$package->{name}";
push @$methods,
{
id => $methodId,
method => $function,
language => $fileData->{language},
fileData => $fileData,
package => $package
};
}
foreach my $class ( @{ $package->{classes} } ) {
next
if !$this->{preferences}->{listPrivate}
&& !$class->isPublic();
foreach my $method ( @{ $class->{methods} } ) {
next
if !$this->{preferences}->{listPrivate}
&& !$method->isPublic();
my $methodId =
$method->getId() . ".$class->{name}.$package->{name}";
push @$methods,
{
id => $methodId,
method => $method,
language => $fileData->{language},
fileData => $fileData,
class => $class
};
}
}
}
}
return 0 if ( !$methods || !scalar @{$methods} );
# sort methods
@{$methods} =
sort { lc( $a->{id} ) cmp lc( $b->{id} ) } @{$methods};
# find duplicate member names and add class names
my $refCounts;
foreach my $methodHash ( @{$methods} ) {
my $name = $methodHash->{method}->{name};
$refCounts->{$name}->{count}++;
push @{ $refCounts->{$name}->{methods} }, $methodHash;
}
while ( my ( $key, $value ) = each(%$refCounts) ) {
next if $refCounts->{$key}->{count} < 2;
foreach my $methodHash ( @{ $refCounts->{$key}->{methods} } ) {
$methodHash->{isDuplicateName} = 1;
}
}
$inWriter->startTag('tocList');
$inWriter->startTag('listGroup');
foreach my $methodHash ( @{$methods} ) {
my $method = $methodHash->{method};
my $summary =
$this->getSummaryLine( $method->{javadoc}, $methodHash->{fileData} );
my $className;
$className = $methodHash->{class}->{name}
if $methodHash->{isDuplicateName};
my $attributes = {
isMethod => 1,
memberName => $method->{nameId},
isPublic => $method->isPublic(),
language => $methodHash->{language},
access => $method->{access},
type => $method->{type},
summary => $summary,
className => $className,
};
my $uri = '';
if ( $methodHash->{package} ) {
$uri = $methodHash->{package}->getUri();
}
elsif ( $methodHash->{class} ) {
$uri = $methodHash->{class}->getUri();
}
$this->_writeClassItem( $inWriter, $method->{name}, $uri, $attributes );
}
$inWriter->endTag('listGroup');
$inWriter->endTag('tocList');
return 1;
}
1;
# VisDoc - Code documentation generator, http://visdoc.org
# This software is licensed under the MIT License
#
# The MIT License
#
# Copyright (c) 2010-2011 Arthur Clemens, VisDoc contributors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
| ArthurClemens/VisDoc | code/perl/lib/VisDoc/XMLOutputFormatterAllMethods.pm | Perl | mit | 5,293 |
package GAL::Parser::bed;
use strict;
use vars qw($VERSION);
$VERSION = 0.2.0;
use base qw(GAL::Parser);
use GAL::Reader::DelimitedLine;
=head1 NAME
GAL::Parser::bed - Parse BED files
=head1 VERSION
This document describes GAL::Parser::bed version 0.2.0
=head1 SYNOPSIS
my $parser = GAL::Parser::bed->new(file => 'bed.txt');
while (my $feature_hash = $parser->next_feature_hash) {
print $parser->to_gff3($feature_hash) . "\n";
}
=head1 DESCRIPTION
L<GAL::Parser::bed> provides a parser for BED data.
=head1 Constructor
New L<GAL::Parser::bed> objects are created by the class method
new. Arguments should be passed to the constructor as a list (or
reference) of key value pairs. All attributes of the
L<GAL::Parser::bed> object can be set in the call to new. An
simple example of object creation would look like this:
my $parser = GAL::Parser::bed->new(file => 'bed.txt');
The constructor recognizes the following parameters which will set the
appropriate attributes:
=over
=item * C<< file => feature_file.txt >>
This optional parameter provides the filename for the file containing
the data to be parsed. While this parameter is optional either it, or
the following fh parameter must be set.
=item * C<< fh => feature_file.txt >>
This optional parameter provides a filehandle to read data from. While
this parameter is optional either it, or the following fh parameter
must be set.
=back
=cut
#-----------------------------------------------------------------------------
=head2 new
Title : new
Usage : GAL::Parser::bed->new();
Function: Creates a GAL::Parser::bed object;
Returns : A GAL::Parser::bed object
Args : See the attributes described above.
=cut
sub new {
my ($class, @args) = @_;
my $self = $class->SUPER::new(@args);
return $self;
}
#-----------------------------------------------------------------------------
sub _initialize_args {
my ($self, @args) = @_;
######################################################################
# This block of code handels class attributes. Use the
# @valid_attributes below to define the valid attributes for
# this class. You must have identically named get/set methods
# for each attribute. Leave the rest of this block alone!
######################################################################
my $args = $self->SUPER::_initialize_args(@args);
my @valid_attributes = qw(); # Set valid class attributes here
$self->set_attributes($args, @valid_attributes);
######################################################################
}
#-----------------------------------------------------------------------------
=head2 parse_record
Title : parse_record
Usage : $a = $self->parse_record();
Function: Parse the data from a record.
Returns : A hash ref needed by Feature.pm to create a Feature object
Args : A hash ref of fields that this sub can understand (In this case GFF3).
=cut
sub parse_record {
my ($self, $record) = @_;
# $record is a hash reference that contains the keys assigned
# in @field_names array in the reader method below.
# Fill in the first 8 columns for GFF3
# See http://www.sequenceontology.org/resources/gff3.html for details.
my $id = $self->counter('ID');
my $seqid = $record->{seqid};
my $source = '.';
my $type = 'region';
my $start = $record->{start} + 1;
my $end = $record->{end};
my $score = $record->{score} || '.';
my $strand = $record->{strand} || '.';
my $phase = '.';
# Create the attribute hash reference. Note that all values
# are array references - even those that could only ever have
# one value. This is for consistency in the interface.
# Suggested keys include (from the GFF3 spec), but are not
# limited to: ID, Name, Alias, Parent, Target, Gap,
# Derives_from, Note, Dbxref and Ontology_term. Note that
# attribute names are case sensitive. "Parent" is not the same
# as "parent". All attributes that begin with an uppercase
# letter are reserved for later use. Attributes that begin
# with a lowercase letter can be used freely by applications.
my $attributes = {ID => [$id],
};
for my $key (qw(thickStart thickEnd itemRgb blockCount
blockSized blockStarts)) {
next unless defined $record->{$key};
$attributes->{$key} = $record->{$key};
}
my $feature_data = {feature_id => $id,
seqid => $seqid,
source => $source,
type => $type,
start => $start,
end => $end,
score => $score,
strand => $strand,
phase => $phase,
attributes => $attributes,
};
return $feature_data;
}
#-----------------------------------------------------------------------------
=head2 reader
Title : reader
Usage : $a = $self->reader
Function: Return the reader object.
Returns : A L<GAL::Reader::DelimitedLine> singleton.
Args : None
=cut
sub reader {
my $self = shift;
if (! $self->{reader}) {
my @field_names = qw(seqid start end name score strand
thickStart thickEnd itemRgb blockCount
blockSized blockStarts);
my $reader = GAL::Reader::DelimitedLine->new(field_names => \@field_names);
$self->{reader} = $reader;
}
return $self->{reader};
}
#-----------------------------------------------------------------------------
=head1 DIAGNOSTICS
L<GAL::Parser::bed> does not throw any warnings or errors.
=head1 CONFIGURATION AND ENVIRONMENT
L<GAL::Parser::bed> requires no configuration files or environment variables.
=head1 DEPENDENCIES
L<GAL::Parser>
L<GAL::Reader::DelimitedLine>
=head1 INCOMPATIBILITIES
None reported.
=head1 BUGS AND LIMITATIONS
No bugs have been reported.
Please report any bugs or feature requests to:
barry.moore@genetics.utah.edu
=head1 AUTHOR
Barry Moore <barry.moore@genetics.utah.edu>
=head1 LICENCE AND COPYRIGHT
Copyright (c) 2010-2014, Barry Moore <barry.moore@genetics.utah.edu>. All
rights reserved.
This module is free software; you can redistribute it and/or
modify it under the same terms as Perl itself (See LICENSE).
=head1 DISCLAIMER OF WARRANTY
BECAUSE THIS SOFTWARE IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
FOR THE SOFTWARE, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT
WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER
PARTIES PROVIDE THE SOFTWARE "AS IS" WITHOUT WARRANTY OF ANY KIND,
EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
SOFTWARE IS WITH YOU. SHOULD THE SOFTWARE PROVE DEFECTIVE, YOU ASSUME
THE COST OF ALL NECESSARY SERVICING, REPAIR, OR CORRECTION.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
REDISTRIBUTE THE SOFTWARE AS PERMITTED BY THE ABOVE LICENCE, BE LIABLE
TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL, OR
CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
SOFTWARE (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
FAILURE OF THE SOFTWARE TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
DAMAGES.
=cut
1;
| 4ureliek/TEanalysis | Lib/GAL/Parser/bed.pm | Perl | mit | 7,473 |
#!/usr/bin/perl
=head1 NAME
experimental_design.pl
Controller for experimental design mason components
=cut
=head1 DESCRIPTION
This is the script to show the web_page using MASON
=cut
=head1 AUTHORS
Aureliano Bombarely Gomez
(ab782@cornell.edu)
=cut
use strict;
use warnings;
use HTML::Mason;
use CXGN::Page;
use CXGN::MasonFactory;
use CXGN::DB::Connection;
use CXGN::DB::DBICFactory;
use CXGN::GEM::Schema;
use CXGN::GEM::ExperimentalDesign;
my $m = CXGN::MasonFactory->new();
## Use of CXGN::Page to take the arguments from the URL
my %args = CXGN::Page->new()
->get_all_encoded_arguments();
## Create the schema used for all the gem searches
my $psqlv = `psql --version`;
chomp($psqlv);
my @schema_list = ('gem', 'biosource', 'metadata', 'public');
if ($psqlv =~ /8\.1/) {
push @schema_list, 'tsearch2';
}
my $schema = CXGN::DB::DBICFactory->open_schema( 'CXGN::GEM::Schema', search_path => \@schema_list, );
my $expdesign = CXGN::GEM::ExperimentalDesign->new($schema);
if (exists $args{'id'} && $args{'id'} =~ m/^\d+$/) {
$expdesign = CXGN::GEM::ExperimentalDesign->new($schema, $args{'id'});
} elsif (exists $args{'name'}) {
$expdesign = CXGN::GEM::ExperimentalDesign->new_by_name($schema, $args{'name'});
}
my @exp_list;
if (defined $expdesign->get_experimental_design_id() ) {
@exp_list = $expdesign->get_experiment_list();
}
my @pubs = ();
if (defined $expdesign->get_experimental_design_id()) {
@pubs = $expdesign->get_publication_list();
}
## There are two ways to access to the page, using id=int or name=something. If use other combinations give an error message
if (defined $expdesign->get_experimental_design_id or defined $expdesign->get_experimental_design_name ) {
$m->exec('/gem/experimental_design_detail.mas',
schema => $schema,
expdesign => $expdesign,
pub_list => \@pubs,
exp_list => \@exp_list );
} else {
$m->exec('/gem/gem_page_error.mas',
schema => $schema,
object => $expdesign );
}
| solgenomics/sgn | cgi-bin/gem/experimental_design.pl | Perl | mit | 2,043 |
#!/usr/bin/perl -w
##############################################################################
#
# File: disk_usage.pl
#
# Function: Traverse a Directory and get the disk usage of
# items in the folder(s)
# additional functionality :
# threshold (only show items > threshold val)
# recurse from initial directory to show grand-total
# (TODO)
#
# the syntax is :
# ./du.pl [directory] -t=[threshold value] -r
#
# Author(s): Michael Jelks
#
# Copyright: Copyright (c) 2003 Michael Jelks
# All Rights Reserved.
#
# Source: Started anew.
#
# Notes:
#
# Change History:
# 10/15/03 Started source
#
#
##############################################################################
use strict;
#Unbuffers output- good for nph-scripts
$| = 1;
my $output;
my @total_list;
my $dir = $ARGV[0];
my $threshold = 0;
my $subtotal = 0;
my $total = 0;
#clear the screen for readability
system "clear";
#exit program if they don't enter a [valid] directory
if (!$dir) {
print "You need to supply a directory before we can process the folders\n";
exit; }
if (!-d $dir) {
print "\"$dir\" is not a valid directory - check to make sure it exists\n";
exit; }
#this adds a trailing slash - needed for du -ks later on...
if ($dir !~ /\/$/) {
$dir =~ s/(.+)/$1\//; }
# add threshold values if -t argument exists -
# otherwise use initialized value up top
if ($ARGV[1]) {
$threshold = $ARGV[1];
$threshold =~ s/-t=(.+)/$1/;
print "Checking for values >= " .$threshold."MB\n"; }
&process_header;
&process_directory($dir, \$subtotal);
# init the process by priming the @total_list array
# with the $dir input via ARGV
$total_list[0] = $dir;
&process_list(\@total_list,$subtotal,"subtotal");
#then get the grand total for the entire directory
@total_list = ();
$output = &command_line("du", "-ks" , "\"" . $dir . "\"");
push(@total_list,$output);
$total = &process_list(\@total_list,$total,"total");
print "\n\n\n";
exit;
##############################################################################
#
# SUBS
#
##############################################################################
sub command_line {
my ($command, $argument, $parameter) = @_;
my $system;
$system = `$command $argument $parameter`;
return $system;
}
sub process_directory {
my ($dir, $subtotal) = @_;
#first get a list of all files for the specified directory
my $output = &command_line("ls","-1F",$dir);
my $list = &parse_list($output);
my $list_tmp;
#then get the subtotal for any files that exceed the threshold value
foreach my $item (@$list) {
$output = &command_line("du", "-ks" , "\"" . $dir . $item . "\"");
$list_tmp = &parse_list($output);
$subtotal = &process_list($list_tmp,$subtotal,"");
}
}
sub parse_list {
my $output = shift;
my (@list,@new_list);
my $listing;
@list = split(/\n/,$output);
foreach $listing (@list)
{
# if it's not a symlink - push into new array
if ($listing !~ /.+\@$/)
{
#remove any funny stuff at the end - * / etc.
$listing =~ s/(.+)\W$/$1/;
push (@new_list, $listing);
}
}
#sort file listing case-insensitve - gives human alphabetical order...
@new_list = sort {lc($a) cmp lc($b)} @new_list;
return \@new_list;
}
sub process_list {
my ($list, $total, $type) = @_;
my ($item, $name, $size);
foreach $item (@$list)
{
#extract the byte count and file/dir name from the list
if ($item =~ /^(\d+)\s+(.+)/)
{
$size = $1;
$name = $2;
#format the output to 2 decimals - print only if threshold exceeded
$size = sprintf("%.2f", ($size/1024));
if ($type eq "total")
{
&process_footer;
$name = "Total for directory -> " . $name;
&process_items($name, $size);
}
elsif ($size >= $threshold)
{
&process_items($name, $size);
$total += $size;
}
}
}
return $total;
}
## FORMATTING PERL CONSTRUCTS ##
sub process_header {
format HEADER =
SIZE
Path/Filename Size (in MB)
------------------------------------------------------------------------------
.
&setHandle("HEADER");
write;
}
sub process_footer {
format FOOTER =
-------------------------------------------------------------------------------
.
&setHandle("FOOTER");
write;
}
sub process_items {
my ($name, $size) = @_;
format ITEMS =
@<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<@>>>>>>>>>>>>>>
$name, $size
.
&setHandle("ITEMS");
write;
}
## PERL FORMATTING MAGIC ##
sub setHandle {
my $handle = shift;
my $oldhandle = select STDOUT;
$~ = $handle;
select ($oldhandle);
} | mjelks/disk-usage | du.pl | Perl | mit | 4,703 |
##**************************************************************
##
## Copyright (C) 1990-2011, Condor Team, Computer Sciences Department,
## University of Wisconsin-Madison, WI.
##
## Licensed under the Apache License, Version 2.0 (the "License"); you
## may not use this file except in compliance with the License. You may
## obtain a copy of the License at
##
## http://www.apache.org/licenses/LICENSE-2.0
##
## Unless required by applicable law or agreed to in writing, software
## distributed under the License is distributed on an "AS IS" BASIS,
## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
## See the License for the specific language governing permissions and
## limitations under the License.
##
##**************************************************************
# CondorPersonal.pm - a Perl API to Condor for Personal Condors
#
# Designed to allow a flexible way to have tests and other jobs
# run in conjunction with other Condor perl modules and control
# the environment in which they run
#
# 1-6-05 Bill Taylor
#
#################################################################
#
# A Personal Condor will be constructed in a subdirectory
# based on current PID and the version string passed in
# such that bin, sbin, log, condor_config and the rest
# live in:
#
# PID/PIDversion/sbin
# PID/PIDversion/bin
# PID/PIDversion/log
# PID/PIDversion/execute
# PID/PIDversion/condor_config
# ...
package CondorPersonal;
use strict;
use warnings;
use Carp;
use Cwd;
use POSIX qw/sys_wait_h strftime/;
use Socket;
use Sys::Hostname;
use CondorUtils;
#################################################################
#
# Parameters used within parameter config file......
#
# Parameter Use Default Variable stored in
# ----------------------------------------------------------------------------------------------
# condortemplate Core config file condor_config_template $personal_template
# condorlocalsrc Name for condor local config src $personal_local_src
# daemonwait Wait for startd/schedd to be seen true $personal_startup_wait
# localpostsrc New end of local config file $personal_local_post_src
# append_condor_config Text to append to end of local config file
# secprepostsrc New security settings $personal_sec_prepost_src
# condordaemon daemon list to start contents of config template $personal_daemons
# condorconfig Name for condor config file condor_config $personal_config
# condordomain Name for domain local $condordomain
# condorlocal Name for condor local config condor_config.local $personal_local
# condor "install" or path to tarball nightlies $condordistribution
# collector Used to define COLLECTOR_HOST $collectorhost
# nameschedd Used to define SCHEDD_NAME cat(name and collector) $scheddname
# condorhost Used to define CONDOR_HOST $condorhost
# ports Select dynamic or normal ports dynamic $portchanges
# slots sets NUM_CPUS NUM_SLOTS none
# universe parallel configuration of schedd none $personal_universe
#
# Notes added 12/14/05 bt
#
# The current uses of this module only support condor = [ nightlies | install ]
# The difference is that we "look" for the binaries using condor_config_val -config
# if "install" is specified and we assume the "testing" environment for binary locations
# if "nightlies" is called out. In either case we utilize the current condor configuration
# file and the current local config file to ensure working with an up to date
# base configuration file. These we copy into the new directory we are building
# for the personal condor. We then "tune" them up....
#
# currently "universe" is only used to add a bit to the config file for the parallel
# universe.
#
# So a base parameter file could be as little as: (I think)
# condor = install
# ports = dynamic
# condordaemon = master, startd, schedd
#
# Current uses to look at are *stork*.run tests, *_par.run tests and *condorc*.run tests.
# This module is in a very early stage supporting current testing.
#
# Notes from efforts 10/10/2007
# This module continues to grow and complicated tests combining multiple
# pools continue to be added for testing of anything to flocking, had, security modes
# and a run through of all 16 protocol negotions. We have just finshed being much
# more strict about when we decide the pool we are creating is actually up. This has created
# more consistent results. The daemons(main 5 -Col, Neg, Mas, Strt, Schd) all are configured
# with address files and we wait until those files exist if that daemon is configured.
# We will adjust for daemons controlled by HAD. We also wait for the collector(if we have one)
# to see the schedd or the startd if those daemons are configured. HOWEVER, I am adding a
# bypass for this for the 16 variations of the authentication protocol negotiations.
# If "daemonwait" is set to false, we will only wait for the address files to exist and not
# require inter-daemon communication.
# Notes from efforts 11/24/2008
# The recent adding of tracking of core files and ERROR statements in daemon
# logs shows (since the negotiation tests never shut down generating a ongoing
# stream of ERROR statements making other good tests fail) that we need a better
# and surer KILL for personal condors. if condor_off -master fails then we
# leave loos personal condors around which could cause other tests to fail.
# We are going to collect the PIDs of the daemons for a personal condor just
# after they start and before they have a chance to rotate and write a file PIDS
# in the log directory we will kill later.
my %daemon_logs =
(
"COLLECTOR" => "CollectorLog",
"NEGOTIATOR" => "NegotiatorLog",
"MASTER" => "MasterLog",
"STARTD" => "StartLog",
"SCHEDD" => "SchedLog",
"collector" => "CollectorLog",
"negotiator" => "NegotiatorLog",
"master" => "MasterLog",
"startd" => "StartLog",
"schedd" => "SchedLog",
);
my $topleveldir = getcwd();
my $home = $topleveldir;
my $localdir;
my $condorlocaldir;
my $pid = $$;
my $version = ""; # remote, middle, ....... for naming schedd "schedd . pid . version"
my $mastername = ""; # master_$verison
my $DEBUG = 1;
my $DEBUGLEVEL = 2; # nothing higher shows up
my $debuglevel = 3; # take all the ones we don't want to see
# and allowed easy changing and remove hard
# coded value
my $isnightly = IsThisNightly($topleveldir);
my $wrap_test;
#################################################################
#
# Debug messages get time stamped. These will start showing up
# at DEBUGLEVEL = 3 with some rather verbous at 4.
#
# For a single test which uses this module simply
# CondorPersonal::DebugOn();
# CondorPersonal::DebugLevel(3);
# .... some time later .....
# CondorPersonal::DebugLevel(2);
#
# There is no reason not to have debug always on the the level
# pretty completely controls it. All DebugOff calls
# have been removed.
#
# This is a similar debug setup as the rest of the test
# suite but I did not want to require the other condor
# modules for this one.
#
#################################################################
my %personal_condor_params;
my %personal_config_changes;
my $personal_config = "condor_config";
my $personal_template = "condor_config_template";
my $personal_daemons = "";
my $personal_local = "condor_config.local";
my $personal_local_src = "";
my $personal_local_post_src = "";
my $personal_sec_prepost_src = "";
my $personal_universe = "";
my $personal_startup_wait = "true";
my $personalmaster;
my $portchanges = "dynamic";
my $collector_port = "0";
my $personal_config_file = "";
my $condordomain = "";
my $procdaddress = "";
#################################################################
#
# Main interface StartCondor
#
# Calls functions to parse parameters, install binaries, tune the config file
# and start the personal condor. Passes back config file location and port
# number<config_file_location:collector_port>.
#
sub StartCondor
{
my $mpid = "";
my $arraysz = scalar(@_);
my $testname = shift || die "Missing test name\n";
my $paramfile = shift || die "Missing parameter file!\n";
$version = shift || die "Missing parameter version!\n";
my $config_and_port = "";
my $winpath = "";
if(!(-f $paramfile)) {
die "StartCondor: param file $paramfile does not exist!!\n";
}
if($arraysz == 3) {
$mpid = $pid; # assign process id
} else {
$mpid = shift; # assign process id
}
CondorPersonal::ParsePersonalCondorParams($paramfile);
# Insert the positional arguments into the new-style named-argument
# hash and call the version of this function which handles it.
$personal_condor_params{"test_name"} = $testname;
$personal_condor_params{"condor_name"} = $version;
$personal_condor_params{"owner_pid"} = $mpid;
return StartCondorWithParams(%personal_condor_params);
}
############################################
## StartCondorWithParams
##
## Starts up a personal condor that is configured as specified in
## the named arguments to this function. If you are using the
## CondorTest framework, do not call this function directly.
## Call CondorTest::StartCondorWithParams().
##
## Required Arguments:
## condor_name - a descriptive name, used when generating directory names
##
## Optional Arguments:
## test_name - name of the test that is using this personal condor
## append_condor_config - lines to be added to the (local) configuration file
## daemon_list - list of condor daemons to run
##
##
############################################
sub StartCondorWithParams
{
%personal_condor_params = @_;
my $testname = $personal_condor_params{"test_name"} || die "Missing test_name\n";
$version = $personal_condor_params{"condor_name"} || die "Missing condor_name!\n";
my $mpid = $personal_condor_params{"owner_pid"} || $pid;
my $config_and_port = "";
my $winpath = "";
$topleveldir = "$topleveldir/$testname.saveme/$mpid/$mpid$version";
runcmd("mkdir -p $topleveldir/$testname.saveme/$mpid/$mpid$version");
$procdaddress = $mpid . $version;
if(exists $personal_condor_params{"personaldir"}) {
$topleveldir = $personal_condor_params{"personaldir"};
debug( "SETTING $topleveldir as topleveldir\n",$debuglevel);
runcmd("mkdir -p $topleveldir");
}
# if we are wrapping tests, publish log location
$wrap_test = $ENV{WRAP_TESTS};
if(defined $wrap_test) {
my $logdir = $topleveldir . "/log";
#CondorPubLogdirs::PublishLogDir($testname,$logdir);
}
$personal_config_file = $topleveldir ."/condor_config";
$localdir = CondorPersonal::InstallPersonalCondor();
if($localdir eq "")
{
return("Failed to do needed Condor Install\n");
}
if( CondorUtils::is_windows() == 1 ){
$winpath = `cygpath -m $localdir`;
CondorUtils::fullchomp($winpath);
$condorlocaldir = $winpath;
CondorPersonal::TunePersonalCondor($condorlocaldir, $mpid);
} else {
CondorPersonal::TunePersonalCondor($localdir, $mpid);
}
$collector_port = CondorPersonal::StartPersonalCondor();
debug( "collector port is $collector_port\n",$debuglevel);
if( CondorUtils::is_windows() == 1 ){
$winpath = `cygpath -m $personal_config_file`;
CondorUtils::fullchomp($winpath);
print "Windows conversion of personal config file to $winpath!!\n";
$config_and_port = $winpath . "+" . $collector_port ;
} else {
$config_and_port = $personal_config_file . "+" . $collector_port ;
}
debug( "StartCondor config_and_port is --$config_and_port--\n",$debuglevel);
CondorPersonal::Reset();
debug( "StartCondor config_and_port is --$config_and_port--\n",$debuglevel);
debug( "Personal Condor Started\n",$debuglevel);
print scalar localtime() . "\n";
return( $config_and_port );
}
sub debug {
my $string = shift;
my $markedstring = "CP:" . $string;
my $level = shift;
if(!(defined $level)) {
print( "", timestamp(), ": $markedstring" ) if $DEBUG;
}
elsif($level <= $DEBUGLEVEL) {
print( "", timestamp(), ": $markedstring" ) if $DEBUG;
}
}
sub DebugLevel
{
my $newlevel = shift;
$DEBUGLEVEL = $newlevel;
}
sub DebugOn
{
$DEBUG = 1;
}
sub DebugOff
{
$DEBUG = 0;
}
sub timestamp {
return strftime("%Y/%m/%d %H:%M:%S", localtime);
}
sub Reset
{
debug( "CondorPersonal RESET\n",$debuglevel);
%personal_condor_params = ();
%personal_config_changes = ();
$personal_config = "condor_config";
$personal_template = "condor_config_template";
$personal_daemons = "";
$personal_local = "condor_config.local";
$personal_local_src = "";
$personal_local_post_src = "";
$personal_sec_prepost_src = "";
$personal_universe = "";
$personal_startup_wait = "true";
$topleveldir = getcwd();
$home = $topleveldir;
$portchanges = "dynamic";
$collector_port = "0";
$personal_config_file = "";
$condordomain = "";
$procdaddress = "";
}
#################################################################
#
# ParsePersonalCondorParams
#
# Parses parameter file in typical condor form of NAME = VALUE
# and stores results into a hash for lookup later.
#
sub ParsePersonalCondorParams
{
my $submit_file = shift || croak "missing submit file argument";
my $line = 0;
if( ! open( SUBMIT_FILE, $submit_file ) )
{
die "error opening \"$submit_file\": $!\n";
return 0;
}
debug( "reading submit file...\n" ,4);
my $variable;
my $value;
while( <SUBMIT_FILE> )
{
CondorUtils::fullchomp($_);
$line++;
# skip comments & blank lines
next if /^#/ || /^\s*$/;
# if this line is a variable assignment...
if( /^(\w+)\s*\=\s*(.*)$/ ) {
$variable = lc $1;
$value = $2;
# if line ends with a continuation ('\')...
while( $value =~ /\\\s*$/ ) {
# remove the continuation
$value =~ s/\\\s*$//;
# read the next line and append it
<SUBMIT_FILE> || last;
$value .= $_;
}
# compress whitespace and remove trailing newline for readability
$value =~ s/\s+/ /g;
CondorUtils::fullchomp($value);
# Do proper environment substitution
if( $value =~ /(.*)\$ENV\((.*)\)(.*)/ ) {
my $envlookup = $ENV{$2};
debug( "Found $envlookup in environment \n",4);
$value = $1.$envlookup.$3;
}
debug( "(CondorPersonal.pm) $variable = $value\n" ,$debuglevel);
# save the variable/value pair
$personal_condor_params{$variable} = $value;
} else {
# debug( "line $line of $submit_file not a variable assignment... " .
# "skipping\n" );
}
}
close(SUBMIT_FILE);
return 1;
}
#################################################################
#
# WhichCondorConfig
#
# Analysis to decide if we are currently in the environment of this same personal
# Condor. Not very likely anymore as the config files are all dependent on
# the pid of the requesting shell so every request for a personal condor
# should be unique.
#
sub WhichCondorConfig
{
my $pathtoconfig = shift @_;
my $line = "";
my $badness = "";
my $matchedconfig = "";
open(CONFIG, "condor_config_val -config -master 2>&1 |") || die "condor_config_val: $!\n";
while(<CONFIG>)
{
CondorUtils::fullchomp($_);
$line = $_;
debug ("--$line--\n",$debuglevel);
if( $line =~ /^\s*($pathtoconfig)\s*$/ )
{
$matchedconfig = $1;
debug ("Matched! $1\n",$debuglevel);
}
if( $line =~ /(Can't find address for this master)/ )
{
$badness = $1;
debug( "Not currently running! $1\n",$debuglevel);
}
}
close(CONFIG);
# we want matched running or matched not running or lost returned
if( $matchedconfig eq "" )
{
return("lost");
}
else
{
if( $badness eq "" )
{
return("matched running");
}
else
{
return("matched not running");
}
}
}
##################################################################
#
# Run condor_config_val using the specified configuration file.
#
sub CondorConfigVal
{
my $config_file = shift;
my $param_name = shift;
my $oldconfig = $ENV{CONDOR_CONFIG};
$ENV{CONDOR_CONFIG} = $config_file;
my $result = `condor_config_val $param_name`;
chomp $result;
$ENV{CONDOR_CONFIG} = $oldconfig;
return $result;
}
#################################################################
#
# InstallPersonalCondor
#
# We either find binaries in the environment or we install
# a particular tar ball.
#
sub InstallPersonalCondor
{
my %control = %personal_condor_params;
my $schedd;
my $master;
my $collector;
my $submit;
my $startd;
my $negotiator;
my $condorq = Which("condor_q");
my $sbinloc;
my $configline = "";
my @configfiles;
my $condordistribution;
my $binloc;
$condordistribution = $control{"condor"} || "nightlies";
debug( "Install this condor --$condordistribution--\n",$debuglevel);
if( $condordistribution eq "nightlies" ) {
# test if this is really the environment we are in or
# switch it to install mode.
if(! -f "../../condor/sbin/condor_master") {
$condordistribution = "install";
}
}
if( $condordistribution eq "install" )
{
# where is the hosting condor_config file? The one assumed to be based
# on a setup with condor_configure.
open(CONFIG,"condor_config_val -config | ") || die "Can not find config file: $!\n";
while(<CONFIG>)
{
CondorUtils::fullchomp($_);
$configline = $_;
push @configfiles, $configline;
}
close(CONFIG);
$personal_condor_params{"condortemplate"} = shift @configfiles;
$personal_condor_params{"condorlocalsrc"} = shift @configfiles;
debug("condor_q: $condorq\n",$debuglevel);
debug("topleveldir: $topleveldir",$debuglevel);
if( $condorq =~ /^(\/.*\/)(\w+)\s*$/ ) {
debug( "Root path $1 and base $2\n",$debuglevel);
$binloc = $1; # we'll get our binaries here.
} elsif(-f "../release_dir/bin/condor_status") {
print "Bummer which condor_q failed\n";
print "Using ../release_dir/bin(s)\n";
$binloc = "../release_dir/bin"; # we'll get our binaries here.
}
else
{
print "which condor_q responded <<<$condorq>>>! CondorPersonal Failing now\n";
die "Can not seem to find a Condor install!\n";
}
if( $binloc =~ /^(\/.*\/)s*bin\/\s*$/ )
{
debug( "Root path to sbin is $1\n",$debuglevel);
$sbinloc = $1; # we'll get our binaries here. # local_dir is here
}
else
{
die "Can not seem to locate Condor release binaries\n";
}
$schedd = $sbinloc . "sbin/". "condor_schedd";
$master = $sbinloc . "sbin/". "condor_master";
$collector = $sbinloc . "sbin/". "condor_collector";
$submit = $binloc . "condor_submit";
$startd = $sbinloc . "sbin/". "condor_startd";
$negotiator = $sbinloc . "sbin/". "condor_negotiator";
debug( "$schedd $master $collector $submit $startd $negotiator\n",$debuglevel);
debug( "Sandbox started rooted here: $topleveldir\n",$debuglevel);
runcmd("cd $topleveldir && mkdir -p execute spool log log/tmp");
}
elsif( $condordistribution eq "nightlies" )
{
# we want a mechanism by which to find the condor binaries
# we are testing. But we know where they are relative to us
# ../../condor/bin etc
# That is simply the nightly test setup.... for now at least
# where is the hosting condor_config file? The one assumed to be based
# on a setup with condor_configure.
debug(" Nightlies - find environment config files\n",$debuglevel);
open(CONFIG,"condor_config_val -config | ") || die "Can not find config file: $!\n";
while(<CONFIG>)
{
CondorUtils::fullchomp($_);
$configline = $_;
debug( "$_\n" ,$debuglevel);
push @configfiles, $configline;
}
close(CONFIG);
# yes this assumes we don't have multiple config files!
$personal_condor_params{"condortemplate"} = shift @configfiles;
$personal_condor_params{"condorlocalsrc"} = shift @configfiles;
debug( "My path to condor_q is $condorq and topleveldir is $topleveldir\n",$debuglevel);
if( $condorq =~ /^(\/.*\/)(\w+)\s*$/ )
{
debug( "Root path $1 and base $2\n",$debuglevel);
$binloc = $1; # we'll get our binaries here.
}
else
{
print "which condor_q responded <<<$condorq>>>! CondorPersonal Failing now\n";
die "Can not seem to find a Condor install!\n";
}
if( $binloc =~ /^(\/.*\/)bin\/\s*$/ )
{
debug( "Root path to sbin is $1\n",$debuglevel);
$sbinloc = $1; # we'll get our binaries here. # local_dir is here
}
else
{
die "Can not seem to locate Condor release binaries\n";
}
#$binloc = "../../condor/bin/";
#$sbinloc = "../../condor/";
debug( "My path to condor_q is $binloc and topleveldir is $topleveldir\n",$debuglevel);
$schedd = $sbinloc . "sbin/" . "condor_schedd";
$master = $sbinloc . "sbin/" . "condor_master";
$collector = $sbinloc . "sbin/" . "condor_collector";
$submit = $binloc . "condor_submit";
$startd = $sbinloc . "sbin/" . "condor_startd";
$negotiator = $sbinloc . "sbin/" . "condor_negotiator";
debug( "$schedd $master $collector $submit $startd $negotiator\n",$debuglevel);
debug( "Sandbox started rooted here: $topleveldir\n",$debuglevel);
runcmd("cd $topleveldir && mkdir -p execute spool log log/tmp");
}
elsif( -e $condordistribution )
{
# in this option we ought to run condor_configure
# to get a current config files but we'll do this
# after getting the current condor_config from
# the environment we are in as it is supposed to
# have been generated this way in the nightly tests
# run in the NWO.
my $res = chdir "$topleveldir";
if(!$res) {
die "chdir $topleveldir failed: $!\n";
exit(1);
}
runcmd("cd $topleveldir && mkdir -p execute spool log");
runcmd("tar -xf $home/$condordistribution");
$sbinloc = $topleveldir; # local_dir is here
chdir "$home";
}
else
{
die "Undiscernable install directive! (condor = $condordistribution)\n";
}
debug( "InstallPersonalCondor returning $sbinloc for LOCAL_DIR setting\n",$debuglevel);
return($sbinloc);
}
#################################################################
#
# TunePersonalCondor
#
# Most changes go into the condor_config.local file but
# some changes are done to the condor_config template.
#
# RELEASE_DIR, LOCAL_DIR and LOCAL_CONFIG_FILE are
# adjusted from the main template file and other
# changes are in the condor_config.local file.
#
sub TunePersonalCondor
{
my %control = %personal_condor_params;
my $myhost = CondorTest::getFqdnHost();
my @domainparts = split /\./, $myhost;
my $condorhost = "";
my $collectorhost = "";
my $mpid = "";
my $localdir = shift;
my $scheddname;
my $startdname;
if(scalar( @_ ) == 1) {
$mpid = $pid; # assign process id
} else {
$mpid = shift; # assign process id
}
debug( "TunePersonalCondor setting LOCAL_DIR to $localdir\n",$debuglevel);
#print "domain parts follow:";
#foreach my $part (@domainparts)
#{
#print " $part";
#}
#print "\n";
#$myhost = @domainparts[0];
debug( "My basic name is $myhost\n",$debuglevel);
# was a special condor host called out?
if( exists $control{"condorhost"} )
{
$condorhost = $control{"condorhost"};
}
# was a special condor collector called out?
if( exists $control{"collector"} )
{
$collectorhost = $control{"collector"};
}
# was a special domain called out?
if( exists $control{"condordomain"} )
{
$condordomain = $control{"condordomain"};
}
if( $condordomain ne "" ) {
$condorhost = $myhost . "." . $condordomain;
} else {
$condorhost = $myhost;
}
debug( "Fully qualified domain name is ************************ $condorhost ********************\n",$debuglevel);
# was a special template called out?
if( exists $control{"condortemplate"} )
{
$personal_template = $control{"condortemplate"};
}
# was a special config file called out?
if( exists $control{"condorconfig"} )
{
$personal_config = $control{"condorconfig"};
} else {
$personal_config = "condor_config";
# store this default in the personal condor params so
# other parts of the code can rely on it.
$personal_condor_params{"condorconfig"} = $personal_config;
}
# was a special daemon list called out?
if( exists $control{"daemon_list"} )
{
$personal_daemons = $control{"daemon_list"};
}
# was a special local config file name called out?
if( exists $control{"condorlocal"} )
{
$personal_local = $control{"condorlocal"};
} else {
$personal_local = "condor_config.local";
}
# was a special local config file src called out?
if( exists $control{"condorlocalsrc"} )
{
$personal_local_src = $control{"condorlocalsrc"};
}
# was a special local config file post src called out?
if( exists $control{"secprepostsrc"} )
{
$personal_sec_prepost_src = $control{"secprepostsrc"};
}
# was a special local config file post src called out?
if( exists $control{"localpostsrc"} )
{
$personal_local_post_src = $control{"localpostsrc"};
}
# is this for a specific universe like parallel?
if( exists $control{"universe"} )
{
$personal_universe = $control{"universe"};
debug( "HMMMMMMMMMMM universe request is $personal_universe\n",$debuglevel);
}
debug( "Proto file is --$personal_template--\n",4);
$personalmaster = "$topleveldir/sbin/condor_master";
#filter fig file storing entries we set so we can test
#for completeness when we are done
my $mytoppath = "";
if( CondorUtils::is_windows() == 1 ){
$mytoppath = `cygpath -m $topleveldir`;
CondorUtils::fullchomp($mytoppath);
} else {
$mytoppath = $topleveldir;
}
my $line;
#system("ls;pwd");
#print "***************** opening $personal_template as config file template *****************\n";
open(TEMPLATE,"<$personal_template") || die "Can not open template<<$personal_template>>: $!\n";
debug( "want to open new config file as $topleveldir/$personal_config\n",$debuglevel);
open(NEW,">$topleveldir/$personal_config") || die "Can not open new config file<$topleveldir/$personal_config>: $!\n";
print NEW "# Editing requested config<$personal_template>\n";
while(<TEMPLATE>)
{
CondorUtils::fullchomp($_);
$line = $_;
if( $line =~ /^RELEASE_DIR\s*=.*/ )
{
debug( "-----------$line-----------\n",4);
$personal_config_changes{"RELEASE_DIR"} = "RELEASE_DIR = $localdir\n";
print NEW "RELEASE_DIR = $localdir\n";
}
elsif( $line =~ /^LOCAL_DIR\s*=.*/ )
{
debug( "-----------$line-----------\n",4);
$personal_config_changes{"LOCAL_DIR"} = "LOCAL_DIR = $mytoppath\n";
print NEW "LOCAL_DIR = $mytoppath\n";
}
elsif( $line =~ /^LOCAL_CONFIG_FILE\s*=.*/ )
{
debug( "-----------$line-----------\n",4);
$personal_config_changes{"LOCAL_DIR"} = "LOCAL_DIR = $mytoppath\n";
print NEW "LOCAL_CONFIG_FILE = $mytoppath/$personal_local\n";
}
else
{
print NEW "$line\n";
}
}
close(TEMPLATE);
if( ! exists $personal_config_changes{"CONDOR_HOST"} )
{
$personal_config_changes{"CONDOR_HOST"} = "CONDOR_HOST = $condorhost\n";
}
close(NEW);
if( exists $control{"ports"} )
{
debug( "Port Changes being Processed!!!!!!!!!!!!!!!!!!!!\n",$debuglevel);
$portchanges = $control{"ports"};
debug( "portchanges set to $portchanges\n",$debuglevel);
}
open(NEW,">$topleveldir/$personal_local") || die "Can not open template: $!\n";
if($personal_local_src ne "")
{
print NEW "# Requested local config<$personal_local_src>\n";
#print "******************** Must seed condor_config.local <<$personal_local_src>> ************************\n";
open(LOCSRC,"<$personal_local_src") || die "Can not open local config template: $!\n";
while(<LOCSRC>)
{
CondorUtils::fullchomp($_);
$line = $_;
print NEW "$line\n";
}
# now make sure we have the local dir we want after the generic .local file is seeded in
$line = $personal_config_changes{"LOCAL_DIR"};
print NEW "$line\n";
# and a lock directory we like
print NEW "LOCK = \$(LOG)\n";
close(LOCSRC);
}
# Dan: Jan 30, '08 added D_NETWORK in order to debug condor_rm timeout
print NEW "ALL_DEBUG = D_FULLDEBUG\n";
# bill: 8/13/09 speed up dagman
print NEW "DAGMAN_USER_LOG_SCAN_INTERVAL = 1\n";
if($personal_daemons ne "")
{
# Allow the collector to run on the default and expected port as the main
# condor install on this system.
print NEW "# Adding requested daemons\n";
print NEW "DAEMON_LIST = $personal_daemons\n";
}
if($personal_universe eq "parallel")
{
# set up dedicated scheduler
print NEW "# Adding Dedicated Scheduler $personal_universe Universe\n";
print NEW "DedicatedScheduler = \"DedicatedScheduler\@schedd$mpid$version\@$condorhost\"\n";
print NEW "STARTD_EXPRS = \$(STARTD_EXPRS), DedicatedScheduler\n";
print NEW "SCHEDD_DEBUG = D_FULLDEBUG\n";
}
if( $portchanges eq "dynamic")
{
# this variation requests a dynamic port for collector and negotiator
# and the location where we can look up the adresses.
print NEW "# Adding for portchanges equal dynamic\n";
if( $collectorhost )
{
print NEW "COLLECTOR_HOST = $collectorhost\n";
debug("COLLECTOR_HOST = $collectorhost\n",$debuglevel);
}
else
{
print NEW "COLLECTOR_HOST = \$(CONDOR_HOST):0\n";
debug("COLLECTOR_HOST = \$(CONDOR_HOST):0\n",$debuglevel);
}
# For simulated pools, we need schedds and master to have unique names
if(exists $control{"nameschedd"}) {
$mastername = "master" . "_" . $version;
$scheddname = $mastername . "_schd";
$startdname = $mastername . "_strtd";
debug("MASTERNAME now master + $version($mastername)\n",$debuglevel);
print NEW "MASTER_NAME = $mastername\n";
print NEW "SCHEDD_NAME = $scheddname\n";
print NEW "STARTD_NAME = $startdname\n";
} else {
print NEW "SCHEDD_NAME = schedd$mpid$version\n";
}
print NEW "MASTER_ADDRESS_FILE = \$(LOG)/.master_address\n";
print NEW "COLLECTOR_ADDRESS_FILE = \$(LOG)/.collector_address\n";
print NEW "NEGOTIATOR_ADDRESS_FILE = \$(LOG)/.negotiator_address\n";
print NEW "CONDOR_HOST = $condorhost\n";
print NEW "START = TRUE\n";
print NEW "RUNBENCHMARKS = FALSE\n";
print NEW "JAVA_BENCHMARK_TIME = 0\n";
print NEW "SCHEDD_INTERVAL = 5\n";
print NEW "UPDATE_INTERVAL = 5\n";
print NEW "NEGOTIATOR_INTERVAL = 5\n";
print NEW "CONDOR_JOB_POLL_INTERVAL = 5\n";
print NEW "PERIODIC_EXPR_TIMESLICE = .99\n";
print NEW "JOB_START_DELAY = 0\n";
print NEW "# Done Adding for portchanges equal dynamic\n";
}
elsif( $portchanges eq "standard" )
{
# Allow the collector to run on the default and expected port as the main
# condor install on this system.
print NEW "# Adding for portchanges equal standard\n";
if( $collectorhost )
{
print NEW "COLLECTOR_HOST = $collectorhost\n";
debug("COLLECTOR_HOST is $collectorhost\n",$debuglevel);
}
else
{
print NEW "COLLECTOR_HOST = \$(CONDOR_HOST)\n";
debug("COLLECTOR_HOST is \$(CONDOR_HOST)\n",$debuglevel);
}
print NEW "CONDOR_HOST = $condorhost\n";
print NEW "START = TRUE\n";
print NEW "SCHEDD_INTERVAL = 5\n";
print NEW "UPDATE_INTERVAL = 5\n";
print NEW "NEGOTIATOR_INTERVAL = 5\n";
print NEW "CONDOR_JOB_POLL_INTERVAL = 5\n";
print NEW "RUNBENCHMARKS = false\n";
print NEW "JAVA_BENCHMARK_TIME = 0\n";
print NEW "# Done Adding for portchanges equal standard\n";
}
else
{
die "Misdirected request for ports\n";
exit(1);
}
#print NEW "PROCD_LOG = \$(LOG)/ProcLog\n";
if( CondorUtils::is_windows() == 1 ){
print NEW "# Adding procd pipe for windows\n";
print NEW "PROCD_ADDRESS = \\\\.\\pipe\\$procdaddress\n";
}
# now we consider configuration requests
if( exists $control{"slots"} )
{
my $myslots = $control{"slots"};
debug( "Slots wanted! Number = $myslots\n",$debuglevel);
print NEW "# Adding slot request from param file\n";
print NEW "NUM_CPUS = $myslots\n";
print NEW "SLOTS = $myslots\n";
print NEW "# Done Adding slot request from param file\n";
}
if($personal_sec_prepost_src ne "")
{
debug( "Adding to local config file from $personal_sec_prepost_src\n",$debuglevel);
open(SECURITY,"<$personal_sec_prepost_src") || die "Can not do local config additions: $! <<$personal_sec_prepost_src>>\n";
print NEW "# Adding changes requested from $personal_sec_prepost_src\n";
while(<SECURITY>)
{
print NEW "$_";
}
close(SECURITY);
print NEW "# Done Adding changes requested from $personal_sec_prepost_src\n";
}
if($personal_local_post_src ne "")
{
debug("Adding to local config file from $personal_local_post_src\n",$debuglevel);
open(POST,"<$personal_local_post_src") || die "Can not do local config additions: $! <<$personal_local_post_src>>\n";
print NEW "# Adding changes requested from $personal_local_post_src\n";
while(<POST>)
{
print NEW "$_";
}
close(POST);
print NEW "# Done Adding changes requested from $personal_local_post_src\n";
}
if( exists $control{append_condor_config} ) {
print NEW "# Appending from 'append_condor_config'\n";
print NEW "$control{append_condor_config}\n";
print NEW "# Done appending from 'append_condor_config'\n";
}
close(NEW);
PostTunePersonalCondor($personal_config_file);
}
#################################################################
#
# PostTunePersonalCondor() is called after TunePersonalCondor.
# It assumes that the configuration file is all set up and
# ready to use.
sub PostTunePersonalCondor
{
my $config_file = shift;
# If this is a quill test, then quill is within
# $personal_daemons AND $topleveldir/../pgpass wants to be
# $topleveldir/spool/.pgpass
my $configured_daemon_list = CondorConfigVal($config_file,"daemon_list");
if($configured_daemon_list =~ m/quill/i ) {
debug( "This is a quill test (because DAEMON_LIST=$configured_daemon_list)\n", $debuglevel );
my $cmd = "cp $topleveldir/../pgpass $topleveldir/spool/.pgpass";
runcmd("$cmd");
}
}
#################################################################
#
# StartPersonalCondor will start a personal condor which has
# been set up. If the ports are dynamic, it will look up the
# address and return the port number.
#
sub StartPersonalCondor
{
my %control = %personal_condor_params;
my $personalmaster = "";
my $configfile = $control{"condorconfig"};
my $fullconfig = "$topleveldir/$configfile";
my $oldpath = $ENV{PATH};
my $newpath = $localdir . "sbin:" . $localdir . "bin:" . "$oldpath";
my $figpath = "";
$ENV{PATH} = $newpath;
debug( "Using this path: --$newpath--\n",$debuglevel);
debug( "Want $configfile for config file\n",$debuglevel);
if( CondorUtils::is_windows() == 1 ){
$figpath = `cygpath -m $fullconfig`;
CondorUtils::fullchomp($figpath);
$fullconfig = $figpath;
# note: on windows all binaaries in bin!
$personalmaster = $localdir . "bin/condor_master -f &";
} else {
$personalmaster = $localdir . "sbin/condor_master -f &";
}
# We may not want to wait for certain daemons to talk
# to each other on startup.
if( exists $control{"daemonwait"} ) {
my $waitparam = $control{"daemonwait"};
if($waitparam eq "false") {
$personal_startup_wait = "false";
}
}
# set up to use the existing generated configfile
$ENV{CONDOR_CONFIG} = $fullconfig;
debug( "Is personal condor running config<$fullconfig>\n",$debuglevel);
my $condorstate = IsPersonalRunning($fullconfig);
debug( "Condor state is $condorstate\n",$debuglevel);
my $fig = $ENV{CONDOR_CONFIG};
debug( "Condor_config from environment is --$fig--\n",$debuglevel);
# At the momment we only restart/start a personal we just configured
# or reconfigured
if( $condorstate == 0 ) {
# not running with this config so treat it like a start case
debug("Condor state is off\n",$debuglevel);
debug( "start up the personal condor!--$personalmaster--\n",$debuglevel);
# when open3 is used it sits and waits forever
runcmd($personalmaster,{use_system=>1});
#system("condor_config_val -v log");
} else {
die "Bad state for a new personal condor configuration!<<running :-(>>\n";
}
my $res = IsRunningYet();
if($res == 0) {
die "Can not continue because condor is not running!!!!\n";
}
# if this was a dynamic port startup, return the port which
# the collector is listening on...
if( $portchanges eq "dynamic" )
{
debug("Looking for collector port!\n",$debuglevel);
return( FindCollectorPort() );
}
else
{
debug("NOT Looking for collector port!\n",$debuglevel);
return("0");
}
}
#################################################################
#
# IsPersonalRunning( configpath )
#
# the one above "WhichConfig" is simply bogus and relies
# on a side effect of the wrong request condor_config_val -config -master
# without adding a parameter to look up. What we really want to know
# is if this personal is running(probably not...)
################################################################
sub IsPersonalRunning
{
my $pathtoconfig = shift @_;
my $line = "";
my $badness = "";
my $matchedconfig = "";
CondorUtils::fullchomp($pathtoconfig);
#if(CondorUtils::is_windows() == 1) {
#$pathtoconfig =~ s/\\/\\\\/g;
#}
debug("call - condor_config_val -config -master log \n",$debuglevel);
open(CONFIG, "condor_config_val -config -master log 2>&1 |") || die "condor_config_val: $
!\n";
debug("parse - condor_config_val -config -master log \n",$debuglevel);
while(<CONFIG>) {
CondorUtils::fullchomp($_);
$line = $_;
debug ("--$line--\n",$debuglevel);
debug("Looking to match \"$pathtoconfig\"\n",$debuglevel);
my $indexreturn = index($line,$pathtoconfig);
#if( $line =~ /^.*($pathtoconfig).*$/ ) {
if( $indexreturn > -1 ) {
$matchedconfig = $pathtoconfig;
debug ("Matched! $pathtoconfig\n",$debuglevel);
last;
} else {
debug("hmmmm looking for <<$pathtoconfig>> got <<$line>> \n",$debuglevel);
}
}
if ( close(CONFIG) && ($? != 13) ) { # Ignore SIGPIPE
warn "Error executing condor_config_val: '$?' '$!'"
}
if( $matchedconfig eq "" ) {
die "lost: config does not match expected config setting......\n";
}
# find the master file to see if it exists and threrfore is running
open(MADDR,"condor_config_val MASTER_ADDRESS_FILE 2>&1 |") || die "condor_config_val: $
!\n";
while(<MADDR>) {
CondorUtils::fullchomp($_);
$line = $_;
if($line =~ /^(.*master_address)$/) {
if(-f $1) {
if(exists $personal_condor_params{"personaldir"}) {
# ignore if we want to fall to same place
# and the previous might have been kill badly
return(0);
} else {
debug("Master running\n",$debuglevel);
return(1);
}
} else {
debug("Master not running\n",$debuglevel);
return(0);
}
}
}
close(MADDR);
}
#################################################################
#
# IsRunningYet
#
# We want to do out best to be sure the personal is fully running
# before going on to start a test against it. And this is also
# a great time to harvest the PIDS of the daemons to allow a more
# sure kill then condor_off can do in circumstances like
# screwed up authentication tests
#
#################################################################
sub IsRunningYet
{
my $daemonlist = `condor_config_val daemon_list`;
CondorUtils::fullchomp($daemonlist);
my $collector = 0;
my $schedd = 0;
my $startd = 0;
my $first = 1;
my @status;
my $runlimit = 8;
my $backoff = 2;
my $loopcount;
# first failure was had test where we looked for
# a negotiator but MASTER_NEGOTIATOR_CONTROLLER
# was set. So we will check for bypasses to normal
# operation and rewrite the daemon list
#my $old_debuglevel = $debuglevel;
#$debuglevel = $DEBUGLEVEL;
debug("In IsRunningYet DAEMON_LIST=$daemonlist\n",$debuglevel);
$daemonlist =~ s/\s*//g;
my @daemons = split /,/, $daemonlist;
$daemonlist = "";
my $line = "";
foreach my $daemon (@daemons) {
$line = "";
debug("Looking for MASTER_XXXXXX_CONTROLLER for $daemon\n",$debuglevel);
my $definedcontrollstr = "condor_config_val MASTER_" . $daemon . "_CONTROLLER";
open(CCV, "$definedcontrollstr 2>&1 |") || die "condor_config_val: $!\n";
while(<CCV>) {
$line = $_;
if( $line =~ /^.*Not defined.*$/) {
debug("Add $daemon to daemon list\n",$debuglevel);
if($first == 1) {
$first = 0;
$daemonlist = $daemon;
} else {
$daemonlist = $daemonlist . "," . $daemon;
}
}
debug("looking: $daemonlist\n",$debuglevel);
}
close(CCV);
}
if($daemonlist =~ /MASTER/i) {
print "Has master dropped an address file yet - ";
# now wait for the master to start running... get address file loc
# and wait for file to exist
# Give the master time to start before jobs are submitted.
my $masteradr = `condor_config_val MASTER_ADDRESS_FILE`;
$masteradr =~ s/\012+$//;
$masteradr =~ s/\015+$//;
debug( "MASTER_ADDRESS_FILE is <<<<<$masteradr>>>>>\n",$debuglevel);
debug( "We are waiting for the file to exist\n",$debuglevel);
# Where is the master address file? wait for it to exist
my $havemasteraddr = "no";
$loopcount = 0;
while($havemasteraddr ne "yes") {
$loopcount++;
debug( "Looking for $masteradr\n",$debuglevel);
if( -f $masteradr ) {
debug( "Found it!!!! master address file\n",$debuglevel);
$havemasteraddr = "yes";
} elsif ( $loopcount == $runlimit ) {
debug( "Gave up waiting for master address file\n",$debuglevel);
return 0;
} else {
sleep ($loopcount * $backoff);
}
}
print "ok\n";
}
if($daemonlist =~ /COLLECTOR/i){
print "Has collector dropped an address file yet - ";
# now wait for the collector to start running... get address file loc
# and wait for file to exist
# Give the master time to start before jobs are submitted.
my $collectoradr = `condor_config_val COLLECTOR_ADDRESS_FILE`;
$collectoradr =~ s/\012+$//;
$collectoradr =~ s/\015+$//;
debug( "COLLECTOR_ADDRESS_FILE is <<<<<$collectoradr>>>>>\n",$debuglevel);
debug( "We are waiting for the file to exist\n",$debuglevel);
# Where is the collector address file? wait for it to exist
my $havecollectoraddr = "no";
$loopcount = 0;
while($havecollectoraddr ne "yes") {
$loopcount++;
debug( "Looking for $collectoradr\n",$debuglevel);
if( -f $collectoradr ) {
debug( "Found it!!!! collector address file\n",$debuglevel);
$havecollectoraddr = "yes";
} elsif ( $loopcount == $runlimit ) {
debug( "Gave up waiting for collector address file\n",$debuglevel);
return 0;
} else {
sleep ($loopcount * $backoff);
}
}
print "ok\n";
}
if($daemonlist =~ /NEGOTIATOR/i) {
print "Has negotiator dropped an address file yet - ";
# now wait for the negotiator to start running... get address file loc
# and wait for file to exist
# Give the master time to start before jobs are submitted.
my $negotiatoradr = `condor_config_val NEGOTIATOR_ADDRESS_FILE`;
$negotiatoradr =~ s/\012+$//;
$negotiatoradr =~ s/\015+$//;
debug( "NEGOTIATOR_ADDRESS_FILE is <<<<<$negotiatoradr>>>>>\n",$debuglevel);
debug( "We are waiting for the file to exist\n",$debuglevel);
# Where is the negotiator address file? wait for it to exist
my $havenegotiatoraddr = "no";
$loopcount = 0;
while($havenegotiatoraddr ne "yes") {
$loopcount++;
debug( "Looking for $negotiatoradr\n",$debuglevel);
if( -f $negotiatoradr ) {
debug( "Found it!!!! negotiator address file\n",$debuglevel);
$havenegotiatoraddr = "yes";
} elsif ( $loopcount == $runlimit ) {
debug( "Gave up waiting for negotiator address file\n",$debuglevel);
return 0;
} else {
sleep ($loopcount * $backoff);
}
}
print "ok\n";
}
if($daemonlist =~ /STARTD/i) {
print "Has startd dropped an address file yet - ";
# now wait for the startd to start running... get address file loc
# and wait for file to exist
# Give the master time to start before jobs are submitted.
my $startdadr = `condor_config_val STARTD_ADDRESS_FILE`;
$startdadr =~ s/\012+$//;
$startdadr =~ s/\015+$//;
debug( "STARTD_ADDRESS_FILE is <<<<<$startdadr>>>>>\n",$debuglevel);
debug( "We are waiting for the file to exist\n",$debuglevel);
# Where is the startd address file? wait for it to exist
my $havestartdaddr = "no";
$loopcount = 0;
while($havestartdaddr ne "yes") {
$loopcount++;
debug( "Looking for $startdadr\n",$debuglevel);
if( -f $startdadr ) {
debug( "Found it!!!! startd address file\n",$debuglevel);
$havestartdaddr = "yes";
} elsif ( $loopcount == $runlimit ) {
debug( "Gave up waiting for startd address file\n",$debuglevel);
return 0;
} else {
sleep ($loopcount * $backoff);
}
}
print "ok\n";
}
####################################################################
if($daemonlist =~ /SCHEDD/i) {
print "Has schedd dropped an address file yet - ";
# now wait for the schedd to start running... get address file loc
# and wait for file to exist
# Give the master time to start before jobs are submitted.
my $scheddadr = `condor_config_val SCHEDD_ADDRESS_FILE`;
$scheddadr =~ s/\012+$//;
$scheddadr =~ s/\015+$//;
debug( "SCHEDD_ADDRESS_FILE is <<<<<$scheddadr>>>>>\n",$debuglevel);
debug( "We are waiting for the file to exist\n",$debuglevel);
# Where is the schedd address file? wait for it to exist
my $havescheddaddr = "no";
$loopcount = 0;
while($havescheddaddr ne "yes") {
$loopcount++;
debug( "Looking for $scheddadr\n",$debuglevel);
if( -f $scheddadr ) {
debug( "Found it!!!! schedd address file\n",$debuglevel);
$havescheddaddr = "yes";
} elsif ( $loopcount == $runlimit ) {
debug( "Gave up waiting for schedd address file\n",$debuglevel);
return 0;
} else {
sleep 1;
}
}
print "ok\n";
}
if($daemonlist =~ /STARTD/i) {
# lets wait for the collector to know about it if we have a collector
my $currenthost = CondorTest::getFqdnHost();
if(($daemonlist =~ /COLLECTOR/i) && ($personal_startup_wait eq "true")) {
print "Waiting for collector to see startd - ";
$loopcount = 0;
while(1) {
$loopcount += 1;
my $output = `condor_status -startd -format \"%s\\n\" name`;
my $res = $?;
if ($res != 0) {
print "\ncondor_status returned error code $res\n";
print timestamp(), " The collector probably is not running after all, giving up\n";
print timestamp(), " Output from condor_status:\n";
print $output;
return 0;
}
if($output =~ /$currenthost/) {
print "ok\n";
last;
}
if($loopcount == $runlimit) {
print "bad\n";
print timestamp(), " Timed out waiting for collector to see startd\n";
last;
}
sleep ($loopcount * $backoff);
}
}
}
if($daemonlist =~ /SCHEDD/i) {
# lets wait for the collector to know about it
# if we have a collector
my $haveschedd = "";
my $done = "no";
my $currenthost = CondorTest::getFqdnHost();
if(($daemonlist =~ /COLLECTOR/i) && ($personal_startup_wait eq "true")) {
print "Waiting for collector to see schedd - ";
$loopcount = 0;
TRY: while( $done eq "no") {
$loopcount += 1;
my @cmd = `condor_status -schedd -format \"%s\\n\" name`;
foreach my $line (@cmd)
{
if( $line =~ /^.*$currenthost.*/)
{
print "ok\n";
$done = "yes";
last TRY;
}
}
if($loopcount == $runlimit) {
print "bad\n";
last;
}
sleep ($loopcount * $backoff);
}
}
}
if($daemonlist =~ /NEGOTIATOR/i) {
# lets wait for the collector to know about it
# if we have a collector
my $havenegotiator = "";
my $done = "no";
my $currenthost = CondorTest::getFqdnHost();
if(($daemonlist =~ /COLLECTOR/i) && ($personal_startup_wait eq "true")) {
print "Waiting for collector to see negotiator - ";
$loopcount = 0;
TRY: while( $done eq "no") {
$loopcount += 1;
my @cmd = `condor_status -negotiator -format \"%s\\n\" name`;
foreach my $line (@cmd)
{
if( $line =~ /^.*$currenthost.*/)
{
print "ok\n";
$done = "yes";
last TRY;
}
}
if($loopcount == $runlimit) {
print "bad\n";
last;
}
sleep ($loopcount * $backoff);
}
}
}
debug("In IsRunningYet calling CollectDaemonPids\n",$debuglevel);
CollectDaemonPids();
debug("Leaving IsRunningYet\n",$debuglevel);
#$debuglevel = $old_debuglevel;
return(1);
}
#################################################################
#
# CollectDaemonPids
#
# Open each known daemon's log and extract its PID
# and collect them all in a file called PIDS in the
# log directory.
#
#################################################################
sub CollectDaemonPids {
my $daemonlist = `condor_config_val daemon_list`;
$daemonlist =~ s/\s*//g;
my @daemons = split /,/, $daemonlist;
my $logdir = `condor_config_val log`;
CondorUtils::fullchomp($logdir);
my $logfile = "$logdir/MasterLog";
debug("In CollectDaemonPids(), examining log $logfile\n", $debuglevel);
open(TA, '<', $logfile) or die "Can not read '$logfile': $!\n";
my $master;
my %pids = ();
while(<TA>) {
chomp;
if(/PID\s+=\s+(\d+)/) {
# Capture the master pid. At kill time we will suggest with signal 3
# that the master and friends go away before we get blunt.
$master = $1;
# Every time we find a new master pid it means that all the previous pids
# we had recorded were for a different instance of Condor. So reset the list.
%pids = ();
}
elsif(/Started DaemonCore process\s\"(.*)\",\s+pid\s+and\s+pgroup\s+=\s+(\d+)/) {
# We store these in a hash because if the daemon crashes and restarts
# we only want the latest value for its pid.
$pids{$1} = $2;
}
}
close(TA);
my $pidfile = "$logdir/PIDS";
open(PIDS, '>', $pidfile) or die "Can not create file '$pidfile': $!\n";
debug("Master pid: $master\n");
print PIDS "$master MASTER\n";
foreach my $daemon (keys %pids) {
debug("\t$daemon pid: $pids{$daemon}\n", $debuglevel);
print PIDS "$pids{$daemon}\n";
}
close(PIDS);
}
#################################################################
#
# KillDaemonPids
#
# Find the log directory via the config file passed in. Then
# open the PIDS fill and kill every pid in it for a sure kill.
#
#################################################################
sub KillDaemonPids
{
my $desiredconfig = shift;
my $oldconfig = $ENV{CONDOR_CONFIG};
$ENV{CONDOR_CONFIG} = $desiredconfig;
my $logdir = `condor_config_val log`;
$logdir =~ s/\012+$//;
$logdir =~ s/\015+$//;
my $masterpid = 0;
my $cnt = 0;
my $cmd;
my $saveddebuglevel = $debuglevel;
$debuglevel = 1;
if($isnightly) {
DisplayPartialLocalConfig($desiredconfig);
}
#print "logs are here:$logdir\n";
my $pidfile = $logdir . "/PIDS";
debug("Asked to kill <$oldconfig>\n",$debuglevel);
my $thispid = 0;
# first find the master and use a kill 3(fast kill)
open(PD,"<$pidfile") or die "Can not open<$pidfile>:$!\n";
while(<PD>) {
chomp();
$thispid = $_;
if($thispid =~ /^(\d+)\s+MASTER.*$/) {
$masterpid = $1;
if(CondorUtils::is_windows() == 1) {
$cmd = "/usr/bin/kill -f -s 3 $masterpid";
runcmd($cmd);
} else {
$cnt = kill 3, $masterpid;
}
debug("Gentle kill for master <$masterpid><$thispid($cnt)>\n",$debuglevel);
last;
}
}
close(PD);
# give it a little time for a shutdown
sleep(10);
# did it work.... is process still around?
$cnt = kill 0, $masterpid;
# try a kill again on master and see if no such process
if(CondorUtils::is_windows() == 1) {
$cnt = 1;
open(KL,"/usr/bin/kill -f -s 15 $masterpid 2>&1 |")
or die "can not grab kill output\n";
while(<KL>) {
#print "Testing soft kill<$_>\n";
if( $_ =~ /^.*couldn\'t\s+open\s+pid\s+.*/ ) {
debug("Windows soft kill worked\n",$debuglevel);
$cnt = 0;
}
}
}
if($cnt == 0) {
debug("Gentle kill for master <$thispid> worked!\n",$debuglevel);
} else {
# hmm bullets are placed in heads here.
debug("Gentle kill for master <$thispid><$cnt> failed!\n",$debuglevel);
open(PD,"<$pidfile") or die "Can not open<$pidfile>:$!\n";
while(<PD>) {
chomp();
$thispid = $_;
if($thispid =~ /^(\d+)\s+MASTER.*$/) {
$thispid = $1;
debug("Kill MASTER PID <$thispid:$1>\n",$debuglevel);
if(CondorUtils::is_windows() == 1) {
$cmd = "/usr/bin/kill -f -s 15 $thispid";
runcmd($cmd);
} else {
$cnt = kill 15, $thispid;
}
} else {
debug("Kill non-MASTER PID <$thispid>\n",$debuglevel);
if(CondorUtils::is_windows() == 1) {
$cmd = "kill -f -s 15 $thispid";
runcmd($cmd,{expect_result=>\&ANY});
} else {
$cnt = kill 15, $thispid;
}
}
if($cnt == 0) {
debug("Failed to kill PID <$thispid>\n",$debuglevel);
} else {
debug("Killed PID <$thispid>\n",$debuglevel);
}
}
close(PD);
}
# reset config to whatever it was.
$ENV{CONDOR_CONFIG} = $oldconfig;
$debuglevel = $saveddebuglevel;
}
#################################################################
#
# FindCollectorPort
#
# Looks for collector_address_file via condor_config_val and tries
# to parse port number out of the file.
#
sub FindCollectorAddress
{
my $collector_address_file = `condor_config_val collector_address_file`;
my $line;
CondorUtils::fullchomp($collector_address_file);
debug( "Looking for collector port in file ---$collector_address_file---\n",$debuglevel);
if($collector_address_file eq "") {
debug( "No collector address file defined! Can not find port\n",$debuglevel);
return("0");
}
if( ! -e "$collector_address_file") {
debug( "No collector address file exists! Can not find port\n",$debuglevel);
return("0");
}
open(COLLECTORADDR,"<$collector_address_file") || die "Can not open collector address file: $!\n";
while(<COLLECTORADDR>) {
CondorUtils::fullchomp($_);
$line = $_;
if( $line =~ /^\s*<([^>]+)>\s*$/ ) {
debug( "Collector address is $1\n",$debuglevel);
return($1);
} else {
debug( "$line\n",$debuglevel);
}
}
close(COLLECTORADDR);
debug( "No collector address found in collector address file!\n",$debuglevel);
return("");
}
sub FindCollectorPort
{
my $addr = FindCollectorAddress();
if( $addr =~ /^(\d+\.\d+\.\d+\.\d+):(\d+)$/ ) {
debug( "Collector ip $1 and port $2\n",$debuglevel);
return($2);
} else {
debug( "Failed to extract port from collector address: $addr\n",$debuglevel);
}
return("0");
}
#################################################################
#
# SaveMeSetup
#
# Make the saveme directory for a test, Create the pid based
# location for the current test within this saveme directory
# and then create a symbolic link to this pid directory. By doing this
# when the personal condor setup go to make a pid directory to
# run in, it ends up running within the saveme directory.
# This saveme directory allows more data to be returned during the
# nightly testing.
#
# If all is good the current pid is returned but if there
# is an error 0 is returned.
#
#################################################################
sub SaveMeSetup
{
my $testname = shift;
my $mypid = $$;
my $res = 1;
my $mysaveme = $testname . ".saveme";
$res = verbose_system("mkdir -p $mysaveme");
if($res != 0) {
print "SaveMeSetup: Could not create \"saveme\" directory for test\n";
return(0);
}
my $mypiddir = $mysaveme . "/" . $mypid;
# there should be no matching directory here
# unless we are getting pid recycling. Start fresh.
$res = verbose_system("rm -rf $mypiddir");
if($res != 0) {
print "SaveMeSetup: Could not remove prior pid directory in savemedir \n";
return(0);
}
$res = verbose_system("mkdir $mypiddir");
if($res != 0) {
print "SaveMeSetup: Could not create pid directory in \"saveme\" directory\n";
return(0);
}
# make a symbolic link for personal condor module to use
# if we get pid recycling, blow the symbolic link
# This might not be a symbolic link, so use -r to be sure
#$res = verbose_system("rm -fr $mypid");
#if($res != 0) {
#print "SaveMeSetup: Could not remove prior pid directory\n";
#return(0);
#}
#$res = verbose_system("ln -s $mypiddir $mypid");
#if($res != 0) {
#print "SaveMeSetup: Could not link to pid dir in \"saveme\" directory\n";
#return(0);
#}
return($mypid);
}
sub PersonalSystem
{
my $args = shift @_;
my $dumpLogs = $ENV{DUMP_CONDOR_LOGS};
my $mypid = $$;
if(defined $dumpLogs) {
print "Dump Condor Logs if things go south\n";
print "Pid dir is $mypid\n";
runcmd("pwd");
}
my $hashref = runcmd($args);
my $rc = ${$hashref}{exitcode};
if(defined $dumpLogs) {
print "Dumping Condor Logs\n";
my $savedir = getcwd();
chdir("$mypid");
runcmd("ls");
PersonalDumpLogs($mypid);
chdir("$savedir");
runcmd("pwd");
}
return $rc;
}
sub PersonalDumpLogs
{
my $piddir = shift;
local *PD;
#print "PersonalDumpLogs for $piddir\n";
#system("pwd");
#system("ls -la");
opendir PD, "." || die "failed to open . : $!\n";
#print "Open worked.... listing follows.....\n";
foreach my $file (readdir PD)
{
#print "Consider: $file\n";
next if $file =~ /^\.\.?$/; # skip . and ..
if(-f $file ) {
#print "F:$file\n";
} elsif( -d $file ) {
#print "D:$file\n";
my $logdir = $file . "/log";
PersonalDumpCondorLogs($logdir);
}
}
close(PD);
}
sub PersonalDumpCondorLogs
{
my $logdir = shift;
local *LD;
#print "PersonalDumpLogs for $logdir\n";
my $now = getcwd();
chdir("$logdir");
#system("pwd");
#system("ls -la");
print "\n\n******************* DUMP $logdir ******************\n\n";
opendir LD, "." || die "failed to open . : $!\n";
#print "Open worked.... listing follows.....\n";
foreach my $file (readdir LD)
{
#print "Consider: $file\n";
next if $file =~ /^\.\.?$/; # skip . and ..
if(-f $file ) {
print "\n\n******************* DUMP $file ******************\n\n";
open(FF,"<$file") || die "Can not open logfile<$file>: $!\n";
while(<FF>){
print "$_";
}
close(FF);
} elsif( -d $file ) {
#print "D:$file\n";
}
}
close(LD);
chdir("$now");
}
sub DisplayPartialLocalConfig
{
my $configloc = shift;
my $logdir = `condor_config_val log`;
my $fullpathtolocalconfig = "";
my $line = "";
chomp($logdir);
if($logdir =~ /(.*\/)log/) {
#print "Config File Location <$1>\n";
$fullpathtolocalconfig = $1 . $personal_local;
print "\nlocal config file <$fullpathtolocalconfig>\n";
if( -f $fullpathtolocalconfig) {
print "\nDumping Adjustments to <$personal_local>\n\n";
my $startdumping = 0;
open(LC,"<$fullpathtolocalconfig") or die "Can not open $fullpathtolocalconfig: $!\n";
while(<LC>) {
chomp($_);
$line = $_;
if($line =~ /# Requested.*/) {
print "$line\n";
} elsif($line =~ /# Adding.*/) {
if($startdumping == 0) {
$startdumping = 1;
}
print "$line\n";
} else {
if($startdumping == 1) {
print "$line\n";
}
}
}
close(LC);
print "\nDONE Dumping Adjustments to <$personal_local>\n\n";
}
}
}
sub IsThisNightly
{
my $mylocation = shift;
debug("IsThisNightly passed <$mylocation>\n",$debuglevel);
if($mylocation =~ /^.*(\/execute\/).*$/) {
return(1);
} else {
return(0);
}
}
1;
| bbockelm/condor-network-accounting | src/condor_scripts/CondorPersonal.pm | Perl | apache-2.0 | 60,287 |
package Model::R::ToPay::ContractorApiAccess;
use strict;
use base qw(Model::R::ToPay);
__PACKAGE__->meta->setup(
table => 'contractor_api_access',
columns => [
id => { type => 'integer', not_null => 1, sequence => 'gate_sequence' },
name => { type => 'varchar', length => 256, not_null => 1, remarks => 'A name of access table' },
date_created => { type => 'timestamp', default => 'now()' },
valid_from => { type => 'timestamp', remarks => 'This access only valid after certain date-time (if specified)' },
valid_until => { type => 'timestamp', remarks => 'This access is valid until certain date-time (if specified)' },
active => { type => 'boolean', default => 'true', remarks => 'An access is granted' },
contractor_id => { type => 'integer', not_null => 1, remarks => 'ID of partner to grant access to' },
external_api_environment_id => { type => 'integer', not_null => 1, remarks => 'An access to extenal API environment' },
pass_key => { type => 'varchar', default => 'md5((random())::text)', length => 256, remarks => 'A token for partner as a symbol that access is granted to this system' },
allow_ips => { type => 'varchar', default => '*', remarks => 'An IP mask from which requests for given access is allowed' },
],
primary_key_columns => [ 'id' ],
allow_inline_column_values => 1,
foreign_keys => [
contractor => {
class => 'Model::R::ToPay::Contractor',
key_columns => { contractor_id => 'id' },
},
external_api_environment => {
class => 'Model::R::ToPay::ExternalApiEnvironment',
key_columns => { external_api_environment_id => 'id' },
},
],
relationships => [
external_api_functions => {
map_class => 'Model::R::ToPay::ContractorApiFunctionAccess',
map_from => 'contractor_api_access',
map_to => 'external_api_function',
type => 'many to many',
},
],
);
__PACKAGE__->meta->make_manager_class('contractor_api_access');
1;
| ant-i/db-crud | dbs/Model/R/ToPay/ContractorApiAccess.pm | Perl | apache-2.0 | 2,299 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::TopLevelAssemblyMapper -
Handles mapping between a given coordinate system and the toplevel
pseudo coordinate system.
=head1 SYNOPSIS
$db = Bio::EnsEMBL::DBSQL::DBAdaptor->new(...);
$asma = $db->get_AssemblyMapperAdaptor();
$csa = $db->get_CoordSystemAdaptor();
my $toplevel = $cs_adaptor->fetch_by_name('toplevel');
my $ctg_cs = $cs_adaptor->fetch_by_name('contig');
$asm_mapper = $map_adaptor->fetch_by_CoordSystems( $toplevel, $ctg_cs );
# map to toplevel coord system for this region
@chr_coords =
$asm_mapper->map( 'AL30421.1.200.92341', 100, 10000, -1, $ctg_cs );
# list toplevel seq_region_ids for this region
@chr_ids =
$asm_mapper->list_ids( 'AL30421.1.200.92341', 1, 1000, -1,
$ctg_cs );
=head1 DESCRIPTION
The TopLevelAssemblyMapper performs mapping between a provided
coordinate system and the toplevel pseudo cooordinate system. The
toplevel coordinate system is not a real coordinate system, but
represents the highest coordinate system that can be mapped to in a
given region. It is only possible to perform unidirectional mapping
using this mapper, because it does not make sense to map from the
toplevel coordinate system to another coordinate system.
=head1 METHODS
=cut
use strict;
use warnings;
package Bio::EnsEMBL::TopLevelAssemblyMapper;
use Bio::EnsEMBL::Utils::Exception qw(throw);
use Bio::EnsEMBL::Mapper;
use Bio::EnsEMBL::CoordSystem;
use Scalar::Util qw(weaken);
=head2 new
Arg [1] : Bio::EnsEMBL::DBAdaptor $dbadaptor the adaptor for
the database this mapper is using.
Arg [2] : Toplevel CoordSystem
Arg [3] : Other CoordSystem
Description: Creates a new TopLevelAssemblyMapper object
Returntype : Bio::EnsEMBL::DBSQL::TopLevelAssemblyMapper
Exceptions : throws if any of the 3 arguments are missing/ not
: of the correct type.
Caller : Bio::EnsEMBL::DBSQL::AssemblyMapperAdaptor
Status : Stable
=cut
sub new {
my ($caller, $adaptor, $toplevel_cs, $other_cs) = @_;
my $class = ref($caller) || $caller;
if(!ref($toplevel_cs)) {
throw('Toplevel CoordSystem argument expected.');
}
if(!ref($other_cs)) {
throw('Other CoordSystem argument expected.');
}
if(!$toplevel_cs->is_top_level()) {
throw($toplevel_cs->name() . " is not the toplevel CoordSystem.");
}
if($other_cs->is_top_level()) {
throw("Other CoordSystem argument should not be toplevel CoordSystem.");
}
my $cs_adaptor = $adaptor->db()->get_CoordSystemAdaptor();
my $coord_systems = $cs_adaptor->fetch_all();
my $self = bless {'coord_systems' => $coord_systems,
'toplevel_cs' => $toplevel_cs,
'other_cs' => $other_cs}, $class;
$self->adaptor($adaptor);
return $self;
}
sub adaptor {
my $self = shift;
weaken($self->{'adaptor'} = shift) if(@_);
return $self->{'adaptor'};
}
=head2 map
Arg [1] : string $frm_seq_region
The name of the sequence region to transform FROM
Arg [2] : int $frm_start
The start of the region to transform FROM
Arg [3] : int $frm_end
The end of the region to transform FROM
Arg [4] : int $strand
The strand of the region to transform FROM
Arg [5] : Bio::EnsEMBL::CoordSystem
The coordinate system to transform FROM
Arg [6] : if set will do a fastmap
Arg [7] : (optional) dummy placeholder to keep the interface
consistent across different mappers
Arg [8] : (optional) boolean
Whether or not to include the original coordinates
Example : @coords = $mapper->map('X', 1_000_000, 2_000_000,
1, $chr_cs);
Description: Transforms coordinates from one coordinate system
to another.
Returntype : List of Bio::EnsEMBL::Mapper::Coordinate and/or
Bio::EnsEMBL::Mapper:Gap objects
Exceptions : thrown if if the specified TO coordinate system is not one
of the coordinate systems associated with this mapper
Caller : general
Status : Stable
=cut
sub map {
throw('Incorrect number of arguments.') if @_ < 6;
my($self, $frm_seq_region_name, $frm_start, $frm_end, $frm_strand, $frm_cs,
$fastmap, $dummy, $include_org_coord) = @_;
if($frm_cs->is_top_level()) {
throw("The toplevel CoordSystem can only be mapped TO, not FROM.");
}
my @tmp;
push @tmp, $frm_seq_region_name;
my $seq_region_id = @{$self->adaptor()->seq_regions_to_ids($frm_cs, \@tmp)}[0];
my $mapper = $self->{'mapper'};
my $toplevel_cs = $self->{'toplevel_cs'};
my $other_cs = $self->{'other_cs'};
my $adaptor = $self->adaptor;
if($frm_cs != $other_cs && !$frm_cs->equals($other_cs)) {
throw("Coordinate system " . $frm_cs->name . " " . $frm_cs->version .
" is neither the assembled nor the component coordinate system " .
" of this AssemblyMapper");
}
my $coord_systems = $self->{'coord_systems'};
my $csa = $self->adaptor()->db()->get_CoordSystemAdaptor();
#
# TBD try to make this more efficient
#
my $from_rank = $other_cs->rank();
foreach my $cs (@$coord_systems) {
last if($cs->rank >= $from_rank);
#check if a mapping path even exists to this coordinate system
my @mapping_path = @{ $csa->get_mapping_path( $cs, $other_cs ) };
if(@mapping_path) {
# Try to map to this coord system. If we get back any coordinates then
# it is our 'toplevel' that we were looking for
my $mapper = $adaptor->fetch_by_CoordSystems($other_cs, $cs);
if($fastmap) {
my @result = $mapper->fastmap($frm_seq_region_name, $frm_start, $frm_end,
$frm_strand, $frm_cs);
return @result if(@result);
} else {
my @coords = $mapper->map($frm_seq_region_name, $frm_start, $frm_end,
$frm_strand, $frm_cs, undef, undef, $include_org_coord);
if(@coords > 1) {
return @coords;
} elsif ($include_org_coord) {
return @coords unless $coords[0]{mapped}->isa('Bio::EnsEMBL::Mapper::Gap');
} else {
return @coords unless $coords[0]->isa('Bio::EnsEMBL::Mapper::Gap');
}
}
}
}
#
# the toplevel coordinate system for the region requested *is* the requested region.
#
if ($fastmap) {
return ($seq_region_id, $frm_start, $frm_end, $frm_strand, $other_cs);
}
my $coord = Bio::EnsEMBL::Mapper::Coordinate->new($seq_region_id, $frm_start,$frm_end, $frm_strand, $other_cs);
if ($include_org_coord) {
return { 'original' => $coord, 'mapped' => $coord };
} else {
return $coord;
}
}
=head2 flush
Args : none
Example : none
Description: polymorphism with AssemblyMapper, does nothing
Returntype : none
Exceptions : none
Status : Stable
=cut
sub flush {}
=head2 fastmap
Arg [1] : string $frm_seq_region
The name of the sequence region to transform FROM
Arg [2] : int $frm_start
The start of the region to transform FROM
Arg [3] : int $frm_end
The end of the region to transform FROM
Arg [4] : int $strand
The strand of the region to transform FROM
Arg [5] : Bio::EnsEMBL::CoordSystem
The coordinate system to transform FROM
Example : @coords = $mapper->fastmap('X', 1_000_000, 2_000_000,
1, $chr_cs);
Description: Transforms coordinates from one coordinate system
to another.
Returntype : List of Bio::EnsEMBL::Mapper::Coordinate and/or
Bio::EnsEMBL::Mapper:Gap objects
Exceptions : thrown if if the specified TO coordinate system is not one
of the coordinate systems associated with this mapper
Caller : general
Status : Stable
=cut
sub fastmap {
my $self = shift;
return $self->map(@_,1);
}
=head2 assembled_CoordSystem
Arg [1] : none
Example : $cs = $mapper->assembled_CoordSystem
Description: Retrieves the assembled CoordSystem from this mapper
Returntype : Bio::EnsEMBL::CoordSystem
Exceptions : none
Caller : internal, AssemblyMapperAdaptor
Status : Stable
=cut
sub assembled_CoordSystem {
my $self = shift;
return $self->{'toplevel_cs'};
}
=head2 component_CoordSystem
Arg [1] : none
Example : $cs = $mapper->component_CoordSystem
Description: Retrieves the component CoordSystem from this mapper
Returntype : Bio::EnsEMBL::CoordSystem
Exceptions : none
Caller : internal, AssemblyMapperAdaptor
Status : Stable
=cut
sub component_CoordSystem {
my $self = shift;
return $self->{'other_cs'};
}
sub _list {
my($self, $frm_seq_region_name, $frm_start, $frm_end, $frm_cs, $seq_regions) = @_;
my $mapper = $self->{'mapper'};
my $toplevel_cs = $self->{'toplevel_cs'};
my $other_cs = $self->{'other_cs'};
my $adaptor = $self->adaptor;
if($frm_cs->is_top_level()) {
throw("The toplevel CoordSystem can only be mapped TO, not FROM.");
}
if($frm_cs != $other_cs && !$frm_cs->equals($other_cs)) {
throw("Coordinate system " . $frm_cs->name . " " . $frm_cs->version .
" is neither the assembled nor the component coordinate system " .
" of this AssemblyMapper");
}
my $coord_systems = $self->{'coord_systems'};
my $csa = $self->adaptor()->db()->get_CoordSystemAdaptor();
#
# TBD try to make this more efficient
#
my $from_rank = $other_cs->rank();
foreach my $cs (@$coord_systems) {
last if($cs->rank >= $from_rank);
#check if a mapping path even exists to this coordinate system
my @mapping_path = @{ $csa->get_mapping_path( $cs, $other_cs ) };
if(@mapping_path) {
# Try to map to this coord system. If we get back any coordinates then
# it is our 'toplevel' that we were looking for
my $mapper = $adaptor->fetch_by_CoordSystems($other_cs, $cs);
my @result;
my @tmp;
push @tmp, $frm_seq_region_name;
my $seq_region_id = @{$self->adaptor()->seq_regions_to_ids($frm_cs, \@tmp)}[0];
if($seq_regions) {
@result = $mapper->list_seq_regions($frm_seq_region_name, $frm_start,
$frm_end, $frm_cs);
} else {
@result = $mapper->list_ids($frm_seq_region_name, $frm_start,
$frm_end, $frm_cs);
}
return @result if(@result);
}
}
# the toplevel coordinate system for the region requested *is* the
return ($frm_seq_region_name);
# requested region.
if($seq_regions) {
return ($frm_seq_region_name);
}
#this seems a bit silly and inefficient, but it is probably never
#called anyway.
my $slice_adaptor = $adaptor->db()->get_SliceAdaptor();
my $slice = $slice_adaptor->fetch_by_region($other_cs->name(),
$frm_seq_region_name,
undef,undef,undef,$other_cs);
return ($slice_adaptor->get_seq_region_id($slice));
}
=head2 list_seq_regions
Arg [1] : string $frm_seq_region
The name of the sequence region of interest
Arg [2] : int $frm_start
The start of the region of interest
Arg [3] : int $frm_end
The end of the region to transform of interest
Arg [5] : Bio::EnsEMBL::CoordSystem $frm_cs
The coordinate system to obtain overlapping ids of
Example : foreach $id ($asm_mapper->list_ids('X',1,1000,$ctg_cs)) {...}
Description: Retrieves a list of overlapping seq_region names
of another coordinate system. This is the same as the
list_ids method but uses seq_region names rather internal ids
Returntype : List of strings
Exceptions : none
Caller : general
Status : Stable
=cut
sub list_seq_regions {
throw('Incorrect number of arguments.') if(@_ != 5);
return _list(@_,1);
}
=head2 list_ids
Arg [1] : string $frm_seq_region
The name of the sequence region of interest.
Arg [2] : int $frm_start
The start of the region of interest
Arg [3] : int $frm_end
The end of the region to transform of interest
Arg [5] : Bio::EnsEMBL::CoordSystem $frm_cs
The coordinate system to obtain overlapping ids of
Example : foreach $id ($asm_mapper->list_ids('X',1,1000,$chr_cs)) {...}
Description: Retrieves a list of overlapping seq_region internal identifiers
of another coordinate system. This is the same as the
list_seq_regions method but uses internal identfiers rather
than seq_region strings
Returntype : List of ints
Exceptions : thrown if the from CoordSystem is the toplevel coord system
thrown if the from CoordSystem is not the one used in the mapper
Caller : general
Status : Stable
=cut
sub list_ids {
throw('Incorrect number of arguments.') if(@_ != 5);
return _list(@_,0);
}
1;
| james-monkeyshines/ensembl | modules/Bio/EnsEMBL/TopLevelAssemblyMapper.pm | Perl | apache-2.0 | 14,050 |
use 5.10.0;
use strict;
use warnings;
package Seq::Tracks::Score;
our $VERSION = '0.001';
# ABSTRACT: The getter for any score track
# VERSION
use Mouse 2;
use namespace::autoclean;
extends 'Seq::Tracks::Get';
has scalingFactor => (is => 'ro', isa => 'Int', default => 100);
sub BUILD {
my $self = shift;
# purely to save accessor time
$self->{_s} = $self->scalingFactor;
#Provided by Seq::Tracks::Get
#$self->{_dbName} = $self->dbName;
}
sub get {
#my ($self, $href, $chr, $refBase, $allele, $outAccum, $alleleNumber) = @_
# $_[0] == $self
# $_[1] == <ArrayRef> $href : the database data, with each top-level index corresponding to a track
# $_[2] == <String> $chr : the chromosome
# $_[3] == <String> $refBase : ACTG
# $_[4] == <String> $allele : the allele (ACTG or -N / +ACTG)
# $_[5] == <Int> $positionIdx : the position in the indel, if any
# $_[6] == <ArrayRef> $outAccum : a reference to the output, which we mutate
$_[6][$_[5]] = defined $_[1]->[ $_[0]->{_dbName} ] ? $_[1]->[ $_[0]->{_dbName} ] / $_[0]->{_s} : undef;
return $_[6];
}
__PACKAGE__->meta->make_immutable;
1;
| akotlar/bystro | lib/Seq/Tracks/Score.pm | Perl | apache-2.0 | 1,129 |
#!/usr/bin/perl
# Ex.
# ./glubix_voltype.pl --volume_name vol0
#
# options:
# --volume_name vol1
# return value type: String
# return value list: Unknown ... Can't parse result of gluster command
# Blank ... --volume_name is not set
# Distribute
# Stripe
# Replicate
# Striped-Replicate
# Distributed-Stripe
# Distributed-Replicate
# Distributed-Striped-Replicate
use strict;
use Getopt::Long qw(:config posix_default no_ignore_case gnu_compat);
my $voltype;
my $gluster_volume_name = '';
my $getopt_result = GetOptions('volume_name=s' => \$gluster_volume_name);
if ($gluster_volume_name eq '') {
$voltype = "Blank";
exit;
}
my $gluster_cmd = "/usr/sbin/gluster";
my $exec_cmd = "$gluster_cmd volume info $gluster_volume_name 2> /dev/null";
my $result = `$exec_cmd`;
if ($result =~ m/Type: (\S+)/) {
$voltype = $1;
} else {
$voltype = "Unknown";
}
printf "$voltype\n";
exit
| fuel-infra/puppet-manifests | modules/fuel_project/files/glusterfs/zabbix/glubix/glubix_voltype.pl | Perl | apache-2.0 | 1,072 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::netapp::snmp::mode::components::communication;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
my %map_com_states = (
1 => 'initializing',
2 => 'transitioning',
3 => 'active',
4 => 'inactive',
5 => 'reconfiguring',
6 => 'nonexistent',
);
my $oid_enclChannelShelfAddr = '.1.3.6.1.4.1.789.1.21.1.2.1.3';
my $oid_enclContactState = '.1.3.6.1.4.1.789.1.21.1.2.1.2';
sub load {
my ($self) = @_;
push @{$self->{request}}, { oid => $oid_enclContactState };
}
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking communications");
$self->{components}->{communication} = {name => 'communications', total => 0, skip => 0};
return if ($self->check_filter(section => 'communication'));
for (my $i = 1; $i <= $self->{number_shelf}; $i++) {
my $shelf_addr = $self->{shelf_addr}->{$oid_enclChannelShelfAddr . '.' . $i};
my $com_state = $map_com_states{$self->{results}->{$oid_enclContactState}->{$oid_enclContactState . '.' . $i}};
next if ($self->check_filter(section => 'communication', instance => $shelf_addr));
$self->{components}->{communication}->{total}++;
$self->{output}->output_add(long_msg => sprintf("Shelve '%s' communication state is '%s'",
$shelf_addr, $com_state));
my $exit = $self->get_severity(section => 'communication', value => $com_state);
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Shelve '%s' communication state is '%s'",
$shelf_addr, $com_state));
}
}
}
1;
| wilfriedcomte/centreon-plugins | storage/netapp/snmp/mode/components/communication.pm | Perl | apache-2.0 | 2,614 |
=head1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=pod
=head1 NAME
Bio::EnsEMBL::Compara::Production::Projection::FakeXrefHolder
=head1 DESCRIPTION
This class is used as a way of getting database entries from a core
database quickly by not having to go through core objects and being able
to do the join using stable IDs alone. At the moment it will return XRefs
linked to the peptide if given a gene or peptide member.
=head1 AUTHOR
Andy Yates (ayatesatebiacuk)
=head1 CONTACT
This modules is part of the EnsEMBL project (http://www.ensembl.org)
Questions can be posted to the dev mailing list: http://lists.ensembl.org/mailman/listinfo/dev
=cut
package Bio::EnsEMBL::Compara::Production::Projection::FakeXrefHolder;
use strict;
use warnings;
use Bio::EnsEMBL::Utils::Argument qw(rearrange);
use Bio::EnsEMBL::Utils::Scalar qw(assert_ref wrap_array);
use Bio::EnsEMBL::Utils::SqlHelper;
use Bio::EnsEMBL::DBEntry;
use Bio::EnsEMBL::OntologyXref;
=head2 new()
Arg[-dbentries] : required; ArrayRef of entries of type DBEntry
Description : New method used for a new instance of the given object.
Required fields are indicated accordingly. Fields are specified
using the Arguments syntax (case insensitive).
=cut
sub new {
my ( $class, @args ) = @_;
my $self = bless( {}, ref($class) || $class );
my ( $dbentries, ) = rearrange( [qw(dbentries )], @args );
assert_ref( $dbentries, 'ARRAY' );
confess('The attribute dbentries must be specified during construction or provide a builder subroutine') if !defined $dbentries;
$self->{dbentries} = $dbentries;
return $self;
}
=head2 dbentries()
Description : Getter. ArrayRef of entries of type DBEntry
=cut
sub dbentries {
my ($self) = @_;
return $self->{dbentries};
}
=head2 get_all_DBEntries()
Arg[1] : String; entry type where the value given is a dbname
Description : Returns all DBEntries or just the DBEntries matching the given
db name
Returntype : ArrayRef of the entries
=cut
sub get_all_DBEntries {
my ($self, $entry_type) = @_;
return $self->dbentries() unless $entry_type;
my @entries;
foreach my $entry (@{$self->dbentries()}) {
if($entry->dbname() eq $entry_type) {
push(@entries, $entry);
}
}
return \@entries;
}
### Factory
=head2 build_display_xref_from_Member()
Arg[1] : Bio::EnsEMBL::Compara::Member; The member to search by
Description : Returns the display DBEntry from the given Member. If the
member was an ENSEMBLGENE we consult the Gene otherwise
for ENSEMBLPEP we consult the Translation (both from core). If
the DBEntry was empty we will return an object
containing no elements
Returntype : Instance of FakeXrefHolder
=cut
sub build_display_xref_from_Member {
my ($class, $member) = @_;
my $display_xref;
my $source_name = $member->source_name();
if($source_name eq 'ENSEMBLGENE') {
$display_xref = $member->get_Gene()->display_xref();
}
elsif($source_name eq 'ENSEMBLPEP') {
$display_xref = $member->get_Translation()->display_xref();
}
else {
throw('I do not understand how to process the source_name '.$source_name);
}
my @entries = (defined $display_xref) ? ($display_xref) : () ;
return $class->new(-DBENTRIES => \@entries);
}
=head2 build_peptide_dbentries_from_member()
Arg[1] : Bio::EnsEMBL::Compara::Member; The member to search by
Arg[2] : String; The dbname to look for. Supports like
Description : Searches for entries linked to the given Member. If given
a gene member it will look for the cannonical links and if
given a peptide member it assumes this is the correct
identifier to use.
Returntype : Instance of FakeXrefHolder
=cut
sub build_peptide_dbentries_from_Member {
my ($class, $member, $db_names) = @_;
if(defined $db_names) {
$db_names = wrap_array($db_names);
}
else {
$db_names = [];
}
#my $peptide_member = ($member->source_name() eq 'ENSEMBLGENE') ? $member->get_canonical_SeqMember() : $member;
my $canonical_member = $member->get_canonical_SeqMember;
my $dbc = $canonical_member->genome_db()->db_adaptor()->dbc();
my $t = Bio::EnsEMBL::Utils::SqlHelper->new(-DB_CONNECTION => $dbc);
my $columns = 'x.xref_id, x.external_db_id, x.dbprimary_acc, x.display_label, x.version, x.description, x.info_type, x.info_text, oxr.linkage_type, ed.db_name, ed.type, ed.db_release, oxr.object_xref_id';
my $xref_join = <<'SQL';
JOIN xref x USING (xref_id)
JOIN external_db ed on (x.external_db_id = ed.external_db_id)
LEFT JOIN ontology_xref oxr ON (ox.object_xref_id = oxr.object_xref_id)
SQL
my $where = 'WHERE si.stable_id =?';
my $translation_sql = <<SQL;
SELECT $columns
FROM translation si
JOIN object_xref ox ON (si.translation_id = ox.ensembl_id AND ox.ensembl_object_type =?)
$xref_join
$where
SQL
my $transcript_sql = <<SQL;
SELECT $columns
FROM transcript si
JOIN object_xref ox ON (si.transcript_id = ox.ensembl_id AND ox.ensembl_object_type =?)
$xref_join
$where
SQL
my $gene_sql = <<SQL;
SELECT $columns
FROM gene si
JOIN object_xref ox ON (si.gene_id = ox.ensembl_id AND ox.ensembl_object_type =?)
$xref_join
$where
SQL
my $sql = {
ENSEMBLGENE => $gene_sql,
ENSEMBLTRANS => $transcript_sql,
ENSEMBLPEP => $translation_sql
}->{$member->source_name()};
my $type = {
ENSEMBLGENE => 'Gene',
ENSEMBLTRANS => 'Transcript',
ENSEMBLPEP => 'Translation'
}->{$member->source_name()};
my $stable_id = {
ENSEMBLGENE => $member->stable_id,
ENSEMBLTRANS => $member->get_canonical_SeqMember->stable_id,
ENSEMBLPEP => $member->get_canonical_SeqMember->stable_id,
}->{$member->source_name()};
my $params = [$type, $stable_id];
if($db_names) {
my @conditions;
foreach my $dbname (@{$db_names}) {
push(@conditions, 'ed.db_name like ?');
push(@{$params}, $dbname);
}
my $joined_condition = join(' OR ', @conditions);
$sql .= " AND ($joined_condition)";
}
my $callback = sub {
my ($row) = @_;
my ($xref_id, $external_db_id, $primary_ac, $display_label,
$version, $description, $info_type, $info_text,
$linkage, $dbname, $type, $db_release,
$ontology_xref_ox_id) = @{$row};
my $hash_to_bless = {
dbID => $xref_id,
primary_id => $primary_ac,
display_id => $display_label,
version => $version,
info_type => $info_type,
info_text => $info_text,
type => $type,
dbname => $dbname,
description => $description,
release => $db_release
};
my $xref;
#It was an OntologyXref if we had linked into ontology_xref
if($ontology_xref_ox_id) {
#only add linkage types if we had some
$hash_to_bless->{linkage_types} = [[$linkage]] if $linkage;
$xref = Bio::EnsEMBL::OntologyXref->new_fast($hash_to_bless);
} else {
$xref = Bio::EnsEMBL::DBEntry->new_fast($hash_to_bless);
}
return $xref;
};
my $entries = $t->execute(-SQL => $sql, -CALLBACK => $callback, -PARAMS => $params);
return $class->new(-DBENTRIES => $entries);
}
1;
| dbolser-ebi/ensembl-compara | modules/Bio/EnsEMBL/Compara/Production/Projection/FakeXrefHolder.pm | Perl | apache-2.0 | 7,856 |
new7(A,B,C,D,E) :- A=0.
new7(A,B,C,D,E) :- F= -1+D, G=1+E, A=< -1, new5(B,C,F,G).
new7(A,B,C,D,E) :- F= -1+D, G=1+E, A>=1, new5(B,C,F,G).
new6(A,B,C,D) :- E=1, C>=1, new7(E,A,B,C,D).
new6(A,B,C,D) :- E=0, C=<0, new7(E,A,B,C,D).
new5(A,B,C,D) :- A-D>=1, new6(A,B,C,D).
new3(A,B,C,D) :- E=1+B, F=1+C, A-B>=1, new3(A,E,F,D).
new3(A,B,C,D) :- E=0, A-B=<0, new5(A,B,C,E).
new2 :- A=0, B=0, new3(C,A,B,D).
new1 :- new2.
false :- new1.
| bishoksan/RAHFT | benchmarks_scp/misc/programs-clp/INVGEN-up.map.c.map.pl | Perl | apache-2.0 | 429 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package storage::hp::p2000::xmlapi::mode::components::fru;
use strict;
use warnings;
my @conditions = (
['^absent$' => 'WARNING'],
['^fault$' => 'CRITICAL'],
['^not available$' => 'UNKNOWN'],
);
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking frus");
$self->{components}->{fru} = {name => 'frus', total => 0, skip => 0};
return if ($self->check_exclude(section => 'fru'));
my $results = $self->{p2000}->get_infos(cmd => 'show frus',
base_type => 'enclosure-fru',
key => 'part-number',
properties_name => '^(fru-status|fru-location)$');
foreach my $part_number (keys %$results) {
my $instance = $results->{$part_number}->{'fru-location'};
next if ($self->check_exclude(section => 'fru', instance => $instance));
$self->{components}->{fru}->{total}++;
my $state = $results->{$part_number}->{'fru-status'};
$self->{output}->output_add(long_msg => sprintf("fru '%s' status is %s.",
$instance, $state)
);
foreach (@conditions) {
if ($state =~ /$$_[0]/i) {
$self->{output}->output_add(severity => $$_[1],
short_msg => sprintf("fru '%s' status is %s",
$instance, $state));
last;
}
}
}
}
1; | s-duret/centreon-plugins | storage/hp/p2000/xmlapi/mode/components/fru.pm | Perl | apache-2.0 | 2,385 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::aws::cloudfront::mode::requests;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
my $instance_mode;
sub prefix_metric_output {
my ($self, %options) = @_;
return "Instance '" . $options{instance_value}->{display} . "' " . $options{instance_value}->{stat} . " ";
}
sub custom_metric_calc {
my ($self, %options) = @_;
$self->{result_values}->{timeframe} = $options{new_datas}->{$self->{instance} . '_timeframe'};
$self->{result_values}->{value} = $options{new_datas}->{$self->{instance} . '_' . $options{extra_options}->{metric} . '_' . $options{extra_options}->{stat}};
$self->{result_values}->{value_per_sec} = $self->{result_values}->{value} / $self->{result_values}->{timeframe};
$self->{result_values}->{stat} = $options{extra_options}->{stat};
$self->{result_values}->{metric} = $options{extra_options}->{metric};
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
return 0;
}
sub custom_metric_threshold {
my ($self, %options) = @_;
my $exit = $self->{perfdata}->threshold_check(value => defined($instance_mode->{option_results}->{per_sec}) ? $self->{result_values}->{value_per_sec} : $self->{result_values}->{value},
threshold => [ { label => 'critical-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat}), exit_litteral => 'critical' },
{ label => 'warning-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat}), exit_litteral => 'warning' } ]);
return $exit;
}
sub custom_metric_perfdata {
my ($self, %options) = @_;
my $extra_label = '';
$extra_label = '_' . lc($self->{result_values}->{display}) if (!defined($options{extra_instance}) || $options{extra_instance} != 0);
$self->{output}->perfdata_add(label => lc($self->{result_values}->{metric}) . "_" . lc($self->{result_values}->{stat}) . $extra_label,
unit => defined($instance_mode->{option_results}->{per_sec}) ? 'requests/s' : 'requests',
value => sprintf("%.2f", defined($instance_mode->{option_results}->{per_sec}) ? $self->{result_values}->{value_per_sec} : $self->{result_values}->{value}),
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat})),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . lc($self->{result_values}->{metric}) . "-" . lc($self->{result_values}->{stat})),
);
}
sub custom_metric_output {
my ($self, %options) = @_;
my $msg = "";
if (defined($instance_mode->{option_results}->{per_sec})) {
my ($value, $unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{value_per_sec});
$msg = $self->{result_values}->{metric} . ": " . $value . "requests/s";
} else {
my ($value, $unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{value});
$msg = $self->{result_values}->{metric} . ": " . $value . "requests";
}
return $msg;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'metric', type => 1, cb_prefix_output => 'prefix_metric_output', message_multiple => "All requests metrics are ok", skipped_code => { -10 => 1 } },
];
foreach my $statistic ('minimum', 'maximum', 'average', 'sum') {
foreach my $metric ('Requests') {
my $entry = { label => lc($metric) . '-' . lc($statistic), set => {
key_values => [ { name => $metric . '_' . $statistic }, { name => 'display' }, { name => 'stat' }, { name => 'timeframe' } ],
closure_custom_calc => $self->can('custom_metric_calc'),
closure_custom_calc_extra_options => { metric => $metric, stat => $statistic },
closure_custom_output => $self->can('custom_metric_output'),
closure_custom_perfdata => $self->can('custom_metric_perfdata'),
closure_custom_threshold_check => $self->can('custom_metric_threshold'),
}
};
push @{$self->{maps_counters}->{metric}}, $entry;
}
}
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments =>
{
"id:s@" => { name => 'id' },
"per-sec" => { name => 'per_sec' },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
if (!defined($self->{option_results}->{id}) || $self->{option_results}->{id} eq '') {
$self->{output}->add_option_msg(short_msg => "Need to specify --id option.");
$self->{output}->option_exit();
}
foreach my $instance (@{$self->{option_results}->{id}}) {
if ($instance ne '') {
push @{$self->{aws_instance}}, $instance;
}
}
$self->{aws_timeframe} = defined($self->{option_results}->{timeframe}) ? $self->{option_results}->{timeframe} : 600;
$self->{aws_period} = defined($self->{option_results}->{period}) ? $self->{option_results}->{period} : 60;
$self->{aws_statistics} = ['Sum'];
if (defined($self->{option_results}->{statistic})) {
$self->{aws_statistics} = [];
foreach my $stat (@{$self->{option_results}->{statistic}}) {
if ($stat ne '') {
push @{$self->{aws_statistics}}, ucfirst(lc($stat));
}
}
}
push @{$self->{aws_metrics}}, 'Requests';
$instance_mode = $self;
}
sub manage_selection {
my ($self, %options) = @_;
my %metric_results;
foreach my $instance (@{$self->{aws_instance}}) {
$metric_results{$instance} = $options{custom}->cloudwatch_get_metrics(
region => $self->{option_results}->{region},
namespace => 'AWS/CloudFront',
dimensions => [ { Name => 'Region', Value => 'Global' }, { Name => 'DistributionId', Value => $instance } ],
metrics => $self->{aws_metrics},
statistics => $self->{aws_statistics},
timeframe => $self->{aws_timeframe},
period => $self->{aws_period},
);
foreach my $metric (@{$self->{aws_metrics}}) {
foreach my $statistic (@{$self->{aws_statistics}}) {
next if (!defined($metric_results{$instance}->{$metric}->{lc($statistic)}) && !defined($self->{option_results}->{zeroed}));
$self->{metric}->{$instance . "_" . lc($statistic)}->{display} = $instance;
$self->{metric}->{$instance . "_" . lc($statistic)}->{stat} = lc($statistic);
$self->{metric}->{$instance . "_" . lc($statistic)}->{timeframe} = $self->{aws_timeframe};
$self->{metric}->{$instance . "_" . lc($statistic)}->{$metric . "_" . lc($statistic)} = defined($metric_results{$instance}->{$metric}->{lc($statistic)}) ? $metric_results{$instance}->{$metric}->{lc($statistic)} : 0;
}
}
}
if (scalar(keys %{$self->{metric}}) <= 0) {
$self->{output}->add_option_msg(short_msg => 'No metrics. Check your options or use --zeroed option to set 0 on undefined values');
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check CloudFront instances requests.
Example:
perl centreon_plugins.pl --plugin=cloud::aws::cloudfront::plugin --custommode=paws --mode=requests --region='eu-west-1'
--id='E8T734E1AF1L4' --statistic='sum' --critical-requests-sum='10' --verbose
See 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/cf-metricscollected.html' for more informations.
Default statistic: 'sum' / Valid statistic: 'sum'.
=over 8
=item B<--id>
Set the instance id (Required) (Can be multiple).
=item B<--warning-$metric$-$statistic$>
Thresholds warning ($metric$ can be: 'requests',
$statistic$ can be: 'minimum', 'maximum', 'average', 'sum').
=item B<--critical-$metric$-$statistic$>
Thresholds critical ($metric$ can be: 'requests',
$statistic$ can be: 'minimum', 'maximum', 'average', 'sum').
=back
=cut
| wilfriedcomte/centreon-plugins | cloud/aws/cloudfront/mode/requests.pm | Perl | apache-2.0 | 9,488 |
package Paws::S3::AbortIncompleteMultipartUpload;
use Moose;
has DaysAfterInitiation => (is => 'ro', isa => 'Int');
1;
### main pod documentation begin ###
=head1 NAME
Paws::S3::AbortIncompleteMultipartUpload
=head1 USAGE
This class represents one of two things:
=head3 Arguments in a call to a service
Use the attributes of this class as arguments to methods. You shouldn't make instances of this class.
Each attribute should be used as a named argument in the calls that expect this type of object.
As an example, if Att1 is expected to be a Paws::S3::AbortIncompleteMultipartUpload object:
$service_obj->Method(Att1 => { DaysAfterInitiation => $value, ..., DaysAfterInitiation => $value });
=head3 Results returned from an API call
Use accessors for each attribute. If Att1 is expected to be an Paws::S3::AbortIncompleteMultipartUpload object:
$result = $service_obj->Method(...);
$result->Att1->DaysAfterInitiation
=head1 DESCRIPTION
Specifies the days since the initiation of an Incomplete Multipart
Upload that Lifecycle will wait before permanently removing all parts
of the upload.
=head1 ATTRIBUTES
=head2 DaysAfterInitiation => Int
Indicates the number of days that must pass since initiation for
Lifecycle to abort an Incomplete Multipart Upload.
=head1 SEE ALSO
This class forms part of L<Paws>, describing an object used in L<Paws::S3>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/S3/AbortIncompleteMultipartUpload.pm | Perl | apache-2.0 | 1,562 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 DESCRIPTION
A database class that creates the DBIC schema in a test database, and then
cleans up after itself.
Convenience methods prevent having to delve into DBIC guts for common activities
=head1 SYNOPSIS
my $testdb = Xref::Test::TestDB->new(
config_file => 'filename_if_not_default.conf',
reuse => 1 # This prevents the test DB cleanup, so you can debug database content
);
=cut
package Xref::Test::TestDB;
use strict;
use warnings;
use Moose;
use namespace::autoclean;
extends 'Xref::DB';
has reuse => (
isa => 'Bool',
is => 'ro',
default => 0,
);
sub _guess_config {
return 'testdb.conf';
}
# On top of default config validator, inject a randomised test DB name
around '_init_config' => sub {
my ($sub, $self, @args) = @_;
my $proto_config = $self->$sub();
if (! exists $proto_config->{db}) {
$proto_config->{db} = sprintf '%s_xref_test_%s',$ENV{USER},int(rand(100000));
$proto_config->{create} = 1;
}
# return $proto_config;
$self->config($proto_config);
return;
};
=head2 DEMOLISH
Description: It's a destructor. It cleans up databases left behind by the test
Behaviour is overridden with $self->reuse(1)
=cut
sub DEMOLISH {
my $self = shift;
if ($self->reuse == 0 && defined $self->config) {
if ( $self->config->{driver} eq 'SQLite') {
unlink $self->config->{file};
} elsif ($self->config->{driver} eq 'mysql') {
$self->schema->storage->dbh->do('drop database '.$self->config->{db});
}
}
return;
}
__PACKAGE__->meta->make_immutable;
1;
| Ensembl/ensembl | misc-scripts/xref_mapping/Xref/Test/TestDB.pm | Perl | apache-2.0 | 2,261 |
package VMOMI::GatewayOperationRefused;
use parent 'VMOMI::GatewayConnectFault';
use strict;
use warnings;
our @class_ancestors = (
'GatewayConnectFault',
'HostConnectFault',
'VimFault',
'MethodFault',
);
our @class_members = ( );
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/GatewayOperationRefused.pm | Perl | apache-2.0 | 468 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::alcatel::omniswitch::snmp::mode::hardware;
use base qw(centreon::plugins::templates::hardware);
use network::alcatel::omniswitch::snmp::mode::components::resources qw(%oids);
sub set_system {
my ($self, %options) = @_;
$self->{cb_hook2} = 'snmp_execute';
$self->{thresholds} = {
admin => [
['^(reset|takeover|resetWithFabric|takeoverWithFabrc)$', 'WARNING'],
['^(powerOff)$', 'CRITICAL'],
['powerOn', 'OK'],
['standby', 'OK'],
],
oper => [
['^(testing)$', 'WARNING'],
['^(unpowered|down|notpresent)$', 'CRITICAL'],
['up', 'OK'],
['secondary', 'OK'],
['master', 'OK'],
['idle', 'OK'],
],
fan => [
['^noStatus$', 'UNKNOWN'],
['^notRunning$', 'CRITICAL'],
['running', 'OK'],
],
};
$self->{components_path} = 'network::alcatel::omniswitch::snmp::mode::components';
$self->{components_module} = ['backplane', 'chassis', 'container', 'fan', 'module', 'other', 'port', 'psu', 'sensor', 'stack', 'unknown'];
}
sub snmp_execute {
my ($self, %options) = @_;
$self->{snmp} = $options{snmp};
$self->{results} = $self->{snmp}->get_multiple_table(oids => [
{ oid => $oids{common}->{entPhysicalClass} },
{ oid => $oids{aos6}->{alaChasEntPhysFanStatus} },
{ oid => $oids{aos7}->{alaChasEntPhysFanStatus} },
]);
$self->{results}->{entity} = $self->{snmp}->get_multiple_table(oids => [
{ oid => $oids{common}->{entPhysicalDescr} },
{ oid => $oids{common}->{entPhysicalName} },
{ oid => $oids{aos6}->{chasEntPhysAdminStatus} },
{ oid => $oids{aos6}->{chasEntPhysOperStatus} },
{ oid => $oids{aos6}->{chasEntPhysPower} },
{ oid => $oids{aos7}->{chasEntPhysAdminStatus} },
{ oid => $oids{aos7}->{chasEntPhysOperStatus} },
{ oid => $oids{aos7}->{chasEntPhysPower} },
], return_type => 1);
$self->{type} = 'aos6';
foreach (keys %{$self->{results}->{entity}}) {
if (/^$oids{aos7}->{entreprise_alcatel_base}\./) {
$self->{type} = 'aos7';
last;
}
}
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {});
return $self;
}
1;
__END__
=head1 MODE
Check status of alcatel hardware (AlcatelIND1Chassis.mib).
=over 8
=item B<--component>
Which component to check (Default: '.*').
Can be: 'other', 'unknown', 'chassis', 'backplane', 'container', 'psu', 'fan',
'sensor', 'module', 'port, 'stack'.
Some not exists ;)
=item B<--filter>
Exclude some parts (comma seperated list) (Example: --filter=fan)
Can also exclude specific instance: --filter=fan,1.2
=item B<--no-component>
Return an error if no compenents are checked.
If total (with skipped) is 0. (Default: 'critical' returns).
=item B<--threshold-overload>
Set to overload default threshold values (syntax: section,[instance,]status,regexp)
It used before default thresholds (order stays).
Example: --threshold-overload='psu.oper,CRITICAL,standby'
=back
=cut
| Tpo76/centreon-plugins | network/alcatel/omniswitch/snmp/mode/hardware.pm | Perl | apache-2.0 | 4,031 |
package VMOMI::OvfConsumerOvfSection;
use parent 'VMOMI::DynamicData';
use strict;
use warnings;
our @class_ancestors = (
'DynamicData',
);
our @class_members = (
['lineNumber', undef, 0, ],
['xml', undef, 0, ],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/OvfConsumerOvfSection.pm | Perl | apache-2.0 | 449 |
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <dev@ensembl.org>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=cut
=head1 NAME
Bio::EnsEMBL::Map::DBSQL::DitagFeatureAdaptor
=head1 SYNOPSIS
my $dfa = $db->get_DitagFeatureAdaptor;
my $ditagFeatures = $dfa->fetch_all_by_Slice( $slice, "SME005" );
foreach my $ditagFeature (@$ditagFeatures) {
print $ditagFeature->ditag_id . " "
. $ditagFeature->slice . " "
. $ditagFeature->start . "-"
. $ditagFeature->end . " "
. $ditagFeature->strand;
}
=head1 DESCRIPTION
Provides database interaction for the Bio::EnsEMBL::Map::DitagFeature
object
=head1 METHODS
=cut
package Bio::EnsEMBL::Map::DBSQL::DitagFeatureAdaptor;
use strict;
use vars ('@ISA');
use Bio::EnsEMBL::Map::Ditag;
use Bio::EnsEMBL::Map::DitagFeature;
use Bio::EnsEMBL::DBSQL::BaseAdaptor;
use Bio::EnsEMBL::Utils::Exception qw( throw warning );
@ISA = qw(Bio::EnsEMBL::DBSQL::BaseAdaptor);
=head2 fetch_all
Arg [1] : none
Example : @all_tags = @{$ditagfeature_adaptor->fetch_all};
Description: Retrieves all ditagFeatures from the database;
Usually not a good idea, use fetch_all_by_Slice instead.
Returntype : listref of Bio::EnsEMBL::Map::DitagFeature
Caller : general
Status : At Risk
=cut
sub fetch_all {
my $self = shift;
my $sth = $self->prepare("SELECT df.ditag_feature_id, df.ditag_id, df.seq_region_id,
df.seq_region_start, df.seq_region_end, df.seq_region_strand,
df.analysis_id, df.hit_start, df.hit_end, df.hit_strand,
df.cigar_line, df.ditag_side, df.ditag_pair_id, d.tag_count
FROM ditag_feature df, ditag d
WHERE df.ditag_id=d.ditag_id" );
$sth->execute;
my $result = $self->_fetch($sth);
return $result;
}
=head2 fetch_by_dbID
Arg [1] : ditagFeature dbID
Example : @my_tags = @{$ditagfeature_adaptor->fetch_by_dbID($my_id)};
Description: Retrieves a ditagFeature from the database.
Returntype : Bio::EnsEMBL::Map::DitagFeature
Caller : general
Status : At Risk
=cut
sub fetch_by_dbID {
my ($self, $dbid) = @_;
my $sth = $self->prepare("SELECT df.ditag_feature_id, df.ditag_id, df.seq_region_id,
df.seq_region_start, df.seq_region_end, df.seq_region_strand,
df.analysis_id, df.hit_start, df.hit_end, df.hit_strand,
df.cigar_line, df.ditag_side, df.ditag_pair_id, d.tag_count
FROM ditag_feature df, ditag d
WHERE df.ditag_id=d.ditag_id AND df.ditag_feature_id = ?" );
$sth->execute($dbid);
my $result = $self->_fetch($sth);
return $result->[0];
}
=head2 fetch_all_by_ditagID
Arg [1] : ditag dbID
Arg [2] : (optional) ditag-pair dbID
Arg [3] : (optional) analysis ID
Example : @my_tags = @{$ditagfeature_adaptor->fetch_all_by_ditag_id($my_id)};
Description: Retrieves all ditagFeatures from the database linking to a specific ditag-id
Returntype : listref of Bio::EnsEMBL::Map::DitagFeature
Caller : general
Status : At Risk
=cut
sub fetch_all_by_ditagID {
my ($self, $ditag_id, $ditag_pair_id, $analysis_id) = @_;
my $arg = $ditag_id;
my $sql = "SELECT df.ditag_feature_id, df.ditag_id, df.seq_region_id,
df.seq_region_start, df.seq_region_end, df.seq_region_strand,
df.analysis_id, df.hit_start, df.hit_end, df.hit_strand,
df.cigar_line, df.ditag_side, df.ditag_pair_id, d.tag_count
FROM ditag_feature df, ditag d
WHERE df.ditag_id=d.ditag_id AND df.ditag_id = ? ";
if($ditag_pair_id){
$sql .= "AND df.ditag_pair_id = ? ";
$arg .= ", $ditag_pair_id";
}
if($analysis_id){
$sql .= "AND df.analysis_id = ? ";
$arg .= ", $analysis_id";
}
$sql .= "ORDER BY df.ditag_pair_id";
my $sth = $self->prepare($sql);
$sth->execute(split(",",$arg));
my $result = $self->_fetch($sth);
return $result;
}
=head2 fetch_all_by_type
Arg [1] : ditag type
Example : @my_tags = @{$ditagfeature_adaptor->fetch_all_by_type($type)};
Description: Retrieves all ditagFeatures from the database linking to a specific ditag-type
Returntype : listref of Bio::EnsEMBL::Map::DitagFeature
Caller : general
Status : At Risk
=cut
sub fetch_all_by_type {
my ($self, $ditag_type) = @_;
my $sth = $self->prepare("SELECT df.ditag_feature_id, df.ditag_id, df.seq_region_id,
df.seq_region_start, df.seq_region_end, df.seq_region_strand,
df.analysis_id, df.hit_start, df.hit_end, df.hit_strand,
df.cigar_line, df.ditag_side, df.ditag_pair_id, d.tag_count
FROM ditag_feature df, ditag d
WHERE df.ditag_id=d.ditag_id AND d.type = ?
ORDER BY df.ditag_id, df.ditag_pair_id" );
$sth->execute($ditag_type);
my $result = $self->_fetch($sth);
return $result;
}
=head2 fetch_all_by_Slice
Arg [1] : Bio::EnsEMBL::Slice
Arg [2] : (optional) ditag type name (specific library) or an aray ref with multiple type names
Arg [3] : (optional) analysis logic_name
Example : $tags = $ditagfeature_adaptor->fetch_all_by_Slice($slice, "SME005");
Description: Retrieves ditagFeatures from the database overlapping a specific region
and (optional) of a specific ditag type or analysis.
Start & end locations are returned in slice coordinates, now.
Returntype : listref of Bio::EnsEMBL::Map::DitagFeatures
Caller : general
Status : At Risk
=cut
sub fetch_all_by_Slice {
my ($self, $slice, $tagtype, $logic_name) = @_;
my @result;
my $moresql;
if(!ref($slice) || !$slice->isa("Bio::EnsEMBL::Slice")) {
throw("Bio::EnsEMBL::Slice argument expected not $slice.");
}
#get affected ditag_feature_ids
my $sql = "SELECT df.ditag_feature_id, df.ditag_id, df.seq_region_id, df.seq_region_start,
df.seq_region_end, df.seq_region_strand, df.analysis_id, df.hit_start, df.hit_end,
df.hit_strand, df.cigar_line, df.ditag_side, df.ditag_pair_id,
d.tag_count
FROM ditag_feature df, ditag d
WHERE df.ditag_id=d.ditag_id";
if($tagtype){
my $tagtypes = '';
#check if array
if(ref $tagtype eq 'ARRAY'){
my @arraytype_mod;
foreach my $arraytype (@$tagtype){ push @arraytype_mod, '"'.$arraytype.'"' }
$tagtypes = join(", ", @arraytype_mod);
}
else{
$tagtypes = '"'.$tagtype.'"';
}
$sql .= " AND d.type IN(".$tagtypes.")";
}
if($logic_name){
my $analysis = $self->db->get_AnalysisAdaptor->fetch_by_logic_name($logic_name);
if(!$analysis) {
return undef;
}
$sql .= " AND df.analysis_id = ".$analysis->dbID();
}
$sql .= " AND df.seq_region_id = ".$slice->get_seq_region_id.
" AND df.seq_region_start <= ".$slice->end.
" AND df.seq_region_end >= ".$slice->start;
my $sth = $self->prepare($sql);
$sth->execute();
my $result = $self->_fetch($sth, $slice);
push(@result, @$result);
return \@result;
}
=head2 fetch_pairs_by_Slice
Arg [1] : Bio::EnsEMBL::Slice
Arg [2] : (optional) ditag type (specific library)
Arg [3] : (optional) analysis logic_name
Example : my $ditagfeatures = $dfa->fetch_pairs_by_Slice($slice);
foreach my $ditagfeature (@$ditagfeatures){
$minstart = $$ditagfeature2{'start'};
$maxend = $$ditagfeature2{'end'};
$bothstrand = $$ditagfeature2{'strand'};
$tag_count = $$ditagfeature2{'tag_count'};
print "$minstart, $maxend, $bothstrand, $tag_count\n";
}
Description: Retrieves ditagFeature information in pairs from the database overlapping a specific region
and (optional) of a specific ditag type or analysis. The absotute start and end points are
fetched.
Slices should be SMALL!
Returntype : array ref with hash ref of artifical DitagFeature object
Caller : general
Status : At Risk
=cut
sub fetch_pairs_by_Slice {
my ($self, $slice, $tagtype, $logic_name) = @_;
my ($tag_id, $pair_id, $seq_region_id, $start, $end, $strand, $analysis_id, $tag_count);
my @result;
my $sql = "SELECT df.ditag_id, df.ditag_pair_id, df.seq_region_id, MIN(df.seq_region_start), ".
"MAX(df.seq_region_end), df.seq_region_strand, df.analysis_id, d.tag_count ".
"FROM ditag_feature df, ditag d ".
"WHERE df.ditag_id=d.ditag_id ";
if($tagtype){
$sql .= "AND d.type = \"".$tagtype."\"";
}
$sql .= " AND df.seq_region_id = ".$slice->get_seq_region_id.
" AND df.seq_region_start <= ".$slice->end.
" AND df.seq_region_end >= ".$slice->start;
if($logic_name){
my $analysis = $self->db->get_AnalysisAdaptor->fetch_by_logic_name($logic_name);
if(!$analysis) {
return undef;
}
$sql .= " AND df.analysis_id = ".$analysis->dbID();
}
$sql .= " GROUP BY df.ditag_id, df.ditag_pair_id;";
my $sth = $self->prepare($sql);
$sth->execute();
$sth->bind_columns( \$tag_id, \$pair_id, \$seq_region_id, \$start, \$end, \$strand, \$analysis_id ,\$tag_count);
while ( $sth->fetch ) {
#convert into relative slice coordinates
if($slice->strand == 1) {
$start = $start - $slice->start + 1;
$end = $end - $slice->start + 1;
}
else{
$start = $slice->end - $end + 1;
$end = $slice->end - $start + 1;
$strand *= -1;
}
my %ditag_feature_pair = (
ditag => $tag_id,
pair_id => $pair_id,
region => $seq_region_id,
start => $start,
end => $end,
strand => $strand,
analysis => $analysis_id,
tag_count => $tag_count
);
push(@result, \%ditag_feature_pair);
}
return \@result;
}
=head2 _fetch
Arg [1] : statement handler
Arg [2] : (optional) target-slice for the feature
Description: generic sql-fetch function for the DitagFeature fetch methods
Returntype : listref of Bio::EnsEMBL::Map::DitagFeatures
Caller : private
Status : At Risk
=cut
sub _fetch {
my ($self, $sth, $dest_slice) = @_;
my ( $tag_id, $mothertag_id, $seqreg, $seqstart, $seqend, $strand, $analysis_id, $hit_start,
$hit_end, $hit_strand, $cigar_line, $ditag_side, $ditag_pair_id, $tag_count );
$sth->bind_columns( \$tag_id, \$mothertag_id, \$seqreg,
\$seqstart, \$seqend, \$strand,
\$analysis_id, \$hit_start, \$hit_end,
\$hit_strand, \$cigar_line, \$ditag_side,
\$ditag_pair_id, \$tag_count );
my @ditag_features;
my $dest_slice_start;
my $dest_slice_end;
my $dest_slice_strand;
if($dest_slice) {
$dest_slice_start = $dest_slice->start();
$dest_slice_end = $dest_slice->end();
$dest_slice_strand = $dest_slice->strand();
}
while ( $sth->fetch ) {
my $analysis_obj = $self->db->get_AnalysisAdaptor->fetch_by_dbID($analysis_id);
my $slice = $self->db->get_SliceAdaptor->fetch_by_seq_region_id($seqreg);
if($dest_slice) {
if($dest_slice_start != 1 || $dest_slice_strand != 1) {
if($dest_slice_strand == 1) {
$seqstart = $seqstart - $dest_slice_start + 1;
$seqend = $seqend - $dest_slice_start + 1;
} else {
my $tmp_seq_region_start = $seqstart;
$seqstart = $dest_slice_end - $seqend + 1;
$seqend = $dest_slice_end - $tmp_seq_region_start + 1;
$strand *= -1;
}
$slice = $dest_slice;
}
}
push @ditag_features,
Bio::EnsEMBL::Map::DitagFeature->new( -dbid => $tag_id,
-slice => $slice,
-start => $seqstart,
-end => $seqend,
-strand => $strand,
-analysis => $analysis_obj,
-hit_start => $hit_start,
-hit_end => $hit_end,
-hit_strand => $hit_strand,
-ditag_id => $mothertag_id,
-cigar_line => $cigar_line,
-ditag_side => $ditag_side,
-ditag_pair_id => $ditag_pair_id,
-ditag => undef,
-tag_count => $tag_count,
-adaptor => $self,
);
}
return \@ditag_features;
}
=head2 sequence
Arg [1] : dbID of DitagFeature
Example : $ditagfeature_adaptor->get_sequence($ditagFeature->dbID)
Description: get the part of the sequence of a ditag,
that is actully aligned to the genome.
Returntype : string
Exceptions : thrown if not all data needed for storing is populated in the
ditag features
Caller : Bio::EnsEMBL::Map::DitagFeature
Status : At Risk
=cut
sub sequence {
my ($self, $dbID) = @_;
my $sequence = undef;
my $db = $self->db() or throw "Couldn t get database connection.";
my $sql = "SELECT d.sequence, df.hit_start, df.hit_end, df.hit_strand ".
"FROM ditag d, ditag_feature df ".
"WHERE df.ditag_id=d.ditag_id and df.ditag_feature_id = ?";
my $sth = $db->dbc->prepare($sql);
$sth->execute( $dbID );
my ($seq, $start, $end, $strand) = $sth->fetchrow_array();
if($seq and $start and $end and $strand){
$sequence = substr($seq, ($start-1), ($end-$strand));
if($strand == -1) {
$sequence =~ tr/acgtrymkswhbvdnxACGTRYMKSWHBVDNX/tgcayrkmswdvbhnxTGCAYRKMSWDVBHNX/;
}
}
return $sequence;
}
=head2 store
Arg [1] : (Array ref of) Bio::EnsEMBL::Map::DitagFeature
Example : $ditagfeature_adaptor->store(@ditag_features);
Description: Stores a single ditagFeature or
a list of ditagFeatures in this database.
Returntype : none
Exceptions : thrown if not all data needed for storing is populated in the
ditag features
Caller : general
Status : At Risk
=cut
sub store {
my ( $self, $ditag_features ) = @_;
if ( ref $ditag_features eq 'ARRAY' ) {
if ( scalar(@$ditag_features) == 0 ) {
throw( "Must call store with ditag_feature or list ref of ditags_features" );
}
} elsif ($ditag_features) {
my @ditag_features;
push @ditag_features, $ditag_features;
$ditag_features = \@ditag_features;
} else {
throw( "Must call store with ditag_feature or list ref of ditag_features." );
}
my $db = $self->db() or throw "Couldn t get database connection.";
my $sth1 = $self->prepare( "INSERT INTO ditag_feature( ditag_id, seq_region_id, seq_region_start,
seq_region_end, seq_region_strand, analysis_id, hit_start, hit_end,
hit_strand, cigar_line, ditag_side, ditag_pair_id )
VALUES( ?,?,?,?,?,?,?,?,?,?,?,? )" );
my $sth2 = $self->prepare( "INSERT INTO ditag_feature( ditag_feature_ID, ditag_id, seq_region_id,
seq_region_start, seq_region_end, seq_region_strand, analysis_id, hit_start,
hit_end, hit_strand, cigar_line, ditag_side, ditag_pair_id )
VALUES( ?,?,?,?,?,?,?,?,?,?,?,?,? )" );
# my $sth3 = $self->prepare( "SELECT COUNT(*) FROM ditag_feature
# WHERE ditag_id = ?" );
TAG:
foreach my $ditag_feature (@$ditag_features) {
if ( !ref $ditag_feature || !$ditag_feature->isa("Bio::EnsEMBL::Map::DitagFeature") ) {
throw( "Object must be an Ensembl DitagFeature, "
. "not a " . ref($ditag_feature) );
}
if ( $ditag_feature->is_stored($db) ) {
warning( "DitagFeature " . $ditag_feature->dbID .
" is already stored in this database,".
" maybe you need to use the update() method?" );
next TAG;
}
if(!$ditag_feature->ditag_id or !($self->db->get_DitagAdaptor->fetch_by_dbID($ditag_feature->ditag_id))){
throw("DitagFeature must be supplied with the id of a corresponding Ditag object.");
}
if(!$ditag_feature->ditag or !$ditag_feature->ditag->isa("Bio::EnsEMBL::Map::Ditag")){
throw("DitagFeature must be linked to a valid Ditag object.");
}
# #check if more than x tags with this ditag id exist
# $sth3->execute( $ditag_feature->ditag_id );
# my ($num) = $sth3->fetchrow_array();
# if ( ($num) and ($num > 1) ) {
# warning( "There are already at least 2 DitagFeatures relating to Ditag ".
# $ditag->ditag_id." stored in this database." );
# if ( $num > 4 ) {
# warning( "not storing" );
# next TAG;
# }
# }
if ( $ditag_feature->dbID ) {
$sth2->bind_param( 1, $ditag_feature->dbID, SQL_INTEGER );
$sth2->bind_param( 2, $ditag_feature->ditag_id, SQL_INTEGER );
$sth2->bind_param( 3, ($ditag_feature->slice->get_seq_region_id), SQL_INTEGER );
$sth2->bind_param( 4, $ditag_feature->start, SQL_INTEGER );
$sth2->bind_param( 5, $ditag_feature->end, SQL_INTEGER );
$sth2->bind_param( 6, $ditag_feature->strand, SQL_VARCHAR );
$sth2->bind_param( 7, $ditag_feature->analysis->dbID, SQL_INTEGER );
$sth2->bind_param( 8, $ditag_feature->hit_start, SQL_INTEGER );
$sth2->bind_param( 9, $ditag_feature->hit_end, SQL_INTEGER );
$sth2->bind_param( 10, $ditag_feature->hit_strand, SQL_VARCHAR );
$sth2->bind_param( 11, $ditag_feature->cigar_line, SQL_VARCHAR );
$sth2->bind_param( 12, $ditag_feature->ditag_side, SQL_VARCHAR );
$sth2->bind_param( 13, $ditag_feature->ditag_pair_id, SQL_VARCHAR );
$sth2->execute();
}
else{
$sth1->bind_param( 1, $ditag_feature->ditag_id, SQL_INTEGER );
$sth1->bind_param( 2, ($ditag_feature->slice->get_seq_region_id), SQL_INTEGER );
$sth1->bind_param( 3, $ditag_feature->start, SQL_INTEGER );
$sth1->bind_param( 4, $ditag_feature->end, SQL_INTEGER );
$sth1->bind_param( 5, $ditag_feature->strand, SQL_VARCHAR );
$sth1->bind_param( 6, $ditag_feature->analysis->dbID, SQL_INTEGER );
$sth1->bind_param( 7, $ditag_feature->hit_start, SQL_INTEGER );
$sth1->bind_param( 8, $ditag_feature->hit_end, SQL_INTEGER );
$sth1->bind_param( 9, $ditag_feature->hit_strand, SQL_VARCHAR );
$sth1->bind_param( 10, $ditag_feature->cigar_line, SQL_VARCHAR );
$sth1->bind_param( 11, $ditag_feature->ditag_side, SQL_VARCHAR );
$sth1->bind_param( 12, $ditag_feature->ditag_pair_id, SQL_VARCHAR );
$sth1->execute();
my $dbID = $sth1->{'mysql_insertid'};
$ditag_feature->dbID($dbID);
$ditag_feature->adaptor($self);
}
}
}
=head2 batch_store
Arg [1] : (Array ref of) Bio::EnsEMBL::Map::DitagFeatures
Arg [2] : bool have_dbIDs
Example : $ditagfeature_adaptor->batch_store(\@ditag_features);
Description: Stores a list of ditagFeatures in this database.
DitagFeatures are expected to have no dbID yet unless flag "have_dbIDs" is true.
They are inserted in one combined INSERT for better performance.
Returntype : none
Exceptions : thrown if not all data needed for storing is given for the
ditag features
Caller : general
Status : At Risk
=cut
sub batch_store {
my ( $self, $ditag_features, $have_dbIDs ) = @_;
my @good_ditag_features;
my ($sql, $sqladd);
my $inserts = 0;
if ( ref $ditag_features eq 'ARRAY' ) {
if ( scalar(@$ditag_features) == 0 ) {
throw( "Must call store with ditag_feature or list ref of ditag_features." );
}
} elsif ($ditag_features) {
my @ditag_features;
push @ditag_features, $ditag_features;
$ditag_features = \@ditag_features;
} else {
throw( "Must call store with ditag_feature or list ref of ditag_features." );
}
my $db = $self->db() or throw "Couldn t get database connection.";
#check whether it s a DitagFeature object and is not stored already
foreach my $ditag_feature (@$ditag_features) {
if ( !ref $ditag_feature || !$ditag_feature->isa("Bio::EnsEMBL::Map::DitagFeature") ) {
throw( "Object must be an Ensembl DitagFeature, "
. "not a " . ref($ditag_feature) );
}
if(!$ditag_feature->ditag_id or !($self->db->get_DitagAdaptor->fetch_by_dbID($ditag_feature->ditag_id))){
throw("DitagFeature must be supplied with the id of a corresponding Ditag object.");
}
if(!$ditag_feature->ditag or !$ditag_feature->ditag->isa("Bio::EnsEMBL::Map::Ditag")){
throw("DitagFeature must be linked to a valid Ditag object.");
}
if ( $ditag_feature->is_stored($db) ) {
warning( "DitagFeature " . $ditag_feature->dbID
. " is already stored in this database." );
next;
}
push(@good_ditag_features, $ditag_feature);
}
$ditag_features = undef;
#create batch INSERT
if($have_dbIDs){
$sql = "INSERT INTO ditag_feature ( ditag_feature_id, ditag_id, seq_region_id, seq_region_start, ".
"seq_region_end, seq_region_strand, analysis_id, hit_start, hit_end, ".
"hit_strand, cigar_line, ditag_side, ditag_pair_id ) VALUES ";
foreach my $ditag_feature (@good_ditag_features) {
$sqladd = "";
if($inserts){ $sqladd = ", " }
$sqladd .= "(". $ditag_feature->ditag_feature_id.", ".$ditag_feature->ditag_id.", ".
($ditag_feature->slice->get_seq_region_id).", ". $ditag_feature->start.", ".
$ditag_feature->end.", '".$ditag_feature->strand."', ".$ditag_feature->analysis->dbID.", ".
$ditag_feature->hit_start.", ".$ditag_feature->hit_end.", '".$ditag_feature->hit_strand.
"', '".$ditag_feature->cigar_line."', '".$ditag_feature->ditag_side."', ".
$ditag_feature->ditag_pair_id.")";
$sql .= $sqladd;
$inserts++;
}
}
else{
$sql = "INSERT INTO ditag_feature ( ditag_id, seq_region_id, seq_region_start, ".
"seq_region_end, seq_region_strand, analysis_id, hit_start, hit_end, ".
"hit_strand, cigar_line, ditag_side, ditag_pair_id ) VALUES ";
foreach my $ditag_feature (@good_ditag_features) {
$sqladd = "";
if($inserts){ $sqladd = ", " }
$sqladd .= "(". $ditag_feature->ditag_id.", ".($ditag_feature->slice->get_seq_region_id).", ".
$ditag_feature->start.", ".$ditag_feature->end.", '".$ditag_feature->strand."', ".
$ditag_feature->analysis->dbID.", ".$ditag_feature->hit_start.", ".$ditag_feature->hit_end.
", '".$ditag_feature->hit_strand."', '".$ditag_feature->cigar_line."', '".
$ditag_feature->ditag_side."', ".$ditag_feature->ditag_pair_id.")";
$sql .= $sqladd;
$inserts++;
}
}
#STORE
if($inserts){
print STDERR "\nHave $inserts Features.\n";
eval{
$db->dbc->do($sql);
};
if($@){
warning("Problem inserting ditag feature batch!".$@."\n");
}
}
else{
warn "Nothing stored!";
}
}
=head2 update
Arg [1] : ditagFeature to update
Description: update an existing ditagFeature with new values
Returntype : 1 on success
Status : At Risk
=cut
sub update {
my ($self, $ditagFeature) = @_;
my $sth = $self->prepare( "UPDATE ditag_feature
SET ditag_id=?, seq_region_id=?, seq_region_start=?, seq_region_end=?,
seq_region_strand=?, analysis_id=?, hit_start=?, hit_end=?, hit_strand=?,
cigar_line=?, ditag_side=?, ditag_pair_id=?
where ditag_feature_id=?;" );
$sth->bind_param(1, $ditagFeature->ditag_id, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->seq_region_id, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->seq_region_start, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->seq_region_end, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->seq_region_strand, SQL_TINYINT);
$sth->bind_param(1, $ditagFeature->hit_start, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->hit_end, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->hit_strand, SQL_TINYINT);
$sth->bind_param(1, $ditagFeature->cigar_line, SQL_LONGVARCHAR);
$sth->bind_param(1, $ditagFeature->ditag_side, SQL_VARCHAR);
$sth->bind_param(1, $ditagFeature->ditag_pair_id, SQL_INTEGER);
$sth->bind_param(1, $ditagFeature->dbID, SQL_INTEGER);
my $result =$sth->execute();
return $result;
}
=head2 list_dbIDs
Args : None
Example : my @feature_ids = @{$dfa->list_dbIDs()};
Description: Gets an array of internal IDs for all DitagFeature objects in
the current database.
Arg[1] : <optional> int. not 0 for the ids to be sorted by the seq_region.
Returntype : List of ints
Exceptions : None
Status : Stable
=cut
sub list_dbIDs {
my ($self, $ordered) = shift;
return $self->_list_dbIDs('ditag_feature', undef, $ordered);
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl/modules/Bio/EnsEMBL/Map/DBSQL/DitagFeatureAdaptor.pm | Perl | apache-2.0 | 26,453 |
#
# Copyright 2014 MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package MongoDB::Op::_GetMore;
# Encapsulate a cursor fetch operation; returns raw results object
# (after inflation from BSON)
use version;
our $VERSION = 'v0.999.999.2'; # TRIAL
use Moose;
use MongoDB::_Types -types;
use Types::Standard -types;
use MongoDB::_Protocol;
use namespace::clean -except => 'meta';
has ns => (
is => 'ro',
isa => Str,
required => 1,
);
has client => (
is => 'ro',
isa => InstanceOf['MongoDB::MongoClient'],
required => 1,
);
has cursor_id => (
is => 'ro',
isa => Str,
required => 1,
);
has batch_size => (
is => 'ro',
isa => Num,
default => 0,
);
with 'MongoDB::Role::_DatabaseOp';
sub execute {
my ( $self, $link ) = @_;
my ( $op_bson, $request_id ) = MongoDB::_Protocol::write_get_more( map { $self->$_ }
qw/ns cursor_id batch_size/ );
my $result =
$self->_query_and_receive( $link, $op_bson, $request_id, $self->bson_codec );
$result->{address} = $link->address;
return $result;
}
1;
| kainwinterheart/mongo-perl-driver | lib/MongoDB/Op/_GetMore.pm | Perl | apache-2.0 | 1,645 |
## OpenXPKI::Workflow::Config
##
package OpenXPKI::Workflow::Context;
use strict;
use warnings;
use English;
use Encode;
use Data::Dumper;
use Workflow 1.39;
use OpenXPKI::Debug;
use OpenXPKI::Exception;
use OpenXPKI::Serialization::Simple;
use OpenXPKI::Server::Context qw( CTX );
use base qw( Workflow::Context );
sub init {
my $self = shift;
$self->{_updated} = {};
$self->{_init} = 0;
##! 1: 'Initialize empty context'
return $self->SUPER::init( @_ );
}
sub reset_updated {
my $self = shift;
$self->{_updated} = {};
return $self;
}
sub param {
my $self = shift;
my @arg = @_;
my $name = shift @arg;
if ( ref $name eq 'HASH' ) {
##! 16: 'Mark updated values from hash: ' . join (",", keys %{$name})
##! 64: 'value hash ' . Dumper $name
map {
$self->param( $_ => $name->{$_} );
} keys %{$name};
return $self->SUPER::param();
} elsif (defined $arg[0]) {
if ($name eq 'workflow_id' && $self->{PARAMS}{'workflow_id'}) {
OpenXPKI::Exception->throw( message => "You are not allowed to set workflow_id in context" );
}
$self->{_updated}->{$name} = 1;
##! 16: 'Mark updated value from scalar: ' . $name
my $value = $arg[0];
##! 64: 'value is ' . Dumper $value
# scalar items are not set with the correct utf8 encoding so we fix them here
# non scalars are magically fixed by the JSON encoding later
# do not run utf8 encoding on binary data
if ( !ref $value && !$self->{_init} && $value !~ m{\x00}xms ) {
eval {
$value = Encode::decode("UTF-8", $value, Encode::LEAVE_SRC | Encode::FB_CROAK);
};
if ($EVAL_ERROR) {
##! 64: 'Decode error on ' . $value
CTX('log')->workflow()->debug("Unable to decode value for $name");
}
}
return $self->SUPER::param( $name => $value );
} elsif ( exists $arg[0] ) {
##! 16: 'Mark updated value (undef) from scalar: ' . $name
$self->{_updated}->{$name} = 1;
# the superclass does not handle key => undef but does handle
# { key => undef } so we translate this here to have the short
# syntax available in our application
return $self->SUPER::param({ $name => undef });
} else {
##! 16: 'Call without value'
return $self->SUPER::param( @_ );
}
}
1;
__END__ | openxpki/openxpki | core/server/OpenXPKI/Workflow/Context.pm | Perl | apache-2.0 | 2,490 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V8::Services::AdService::MutateAdResult;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {
ad => $args->{ad},
resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V8/Services/AdService/MutateAdResult.pm | Perl | apache-2.0 | 1,077 |
% CVS: $Id: bryant.pl,v 1.3 1998/10/19 06:35:06 pets Exp $
% File : bryant
% Authors: Peter Schachte
% Purpose: Manipulate ordered binary decision diagrams
%
% Abstract
%
% This code manipulates ordered binary decision graphs using variations on
% Bryant's algorithms. I've concentrated on being efficient here, at the
% price of using some non-logical Prolog features. In particular, I have
% several var/1 tests. These are necessary to avoid much structure copying.
/*************************************************************************
Introduction
This code manipulates ordered binary decision graphs, also known as Bryant
graphs, using variations on Bryant's algorithms. We represent nodes in a
Bryant graph as one of:
++itemize
'true'
'false'
ite(*Nodenum*, *Variable*, *TrueChild*, *FalseChild*)
--itemize
where
++description
*Nodenum* is an integer uniquely identifying this node
*Variable* is the number of the variable this node decides upon
*TrueChild* is the bryant graph to use if Variable is true
*FalseChild* is the bryant graph to use if Variable is false
--description
Note that 'ite' means "if-then-else," which is what the nodes of a decision
tree mean.
In this code, I've folded the reduction of the Bryant graph into the code for
applying an operation on two graphs. I've also folded variable restriction
(i.e., "existentially quantifying away" variables) into the code for
conjoining two graphs. This avoids building a lot of graph that we're just
going to throw away anyway.
All the code in this files works on reduced ordered binary decision graphs.
If the inputs are not reduced, the outputs may not be.
*************************************************************************/
/*************************************************************************
Disjunction
This code computes the reduced disjunction of two Bryant graphs in a single
step. The reduction of the graph is folded into the construction of the
graph, which is implemented by compose_node/7.
*************************************************************************/
% goal
% to make this useful as a benchmark, we add an initial goal that calls much
% of the code in this file. This code does the work of one of the steps in
% analyzing append/3.
goal :-
ground([I1,I2,I3,I4,I5,I6,I7,I8,I9]),
iff_conj(I1, I2, G1),
iff_conj(I3, I4, G2),
bryant_and(G1, G2, I5, G3),
iff_conj(I6, I7, G4),
bryant_and(I8, G4, G5),
bryant_and(G3, G5, I9, G6),
bryant_or(G5, G6, G),
write(G).
/*RB
goal :-
iff_conj(1, [6,5], G1),
iff_conj(3, [6,4], G2),
bryant_and(G1, G2, 5, G3),
iff_conj(2, [3], G4),
bryant_and(ite(1,1,true,false), G4, G5),
bryant_and(G3, G5, 3, G6),
bryant_or(G5, G6, G),
write(G).
*/
% bryant_or(+Graph1, +Graph2, -Graph)
% bryant_or(+Graph1, +Graph2, -Graph, +Id0, -Id, +-Generated, +-Done)
% Graph is the disjunction of Graph1 and Graph2. Id0 is the first node
% number to use, and Id is the lowest node number not used in Graph.
% Generated stores the nodes we've generated so far in constructing Graph,
% indexed by the node numbers of their two children. This lets us avoid
% generating the same node twice. Done stores the pairs of nodes of Graph1
% and Graph2 we've already disjoined, and the disjunction we've computed for
% them. This lets avoid redoing work. Both Generated and Done can be viewed
% as cheap memoization tables. Both are hash tables represented as terms of
% arity 127, with each argument of the term a list of entries that hash to
% the same hash key. In the case of Generated, the elements of each hash
% bucket are simply nodes; for Done, the elements are result(N1, N2, N)
% terms, where the result of disjoining node N1 with node N2 was node N.
% Both Generated and Done are incomplete structures, ie, the tails of the
% lists representing hash buckets are unbound. This isn't very clean, but is
% much more efficient, in the absence of copy avoidance, than copying large
% terms on each update.
bryant_or(G1, G2, G) :-
functor(Generated, generated, 127),
functor(Done, done, 127),
bryant_or(G1, G2, G, 2, _, Generated, Done).
bryant_or(true, _, G, Id, Id, _, _) :-
!,
G = true.
bryant_or(false, G2, G, Id0, Id, Generated, _) :-
!,
copy_graph(G2, G, Id0, Id, Generated).
bryant_or(G1, G2, G, Id0, Id, Generated, Done) :-
bryant_or_1(G2, G1, G, Id0, Id, Generated, Done).
% bryant_or_1(+Graph2, +Graph1, -Graph, +Id0, -Id, +-Generated, +-Done)
% Same as bryant_or/7, except that first two args are swapped for indexing.
bryant_or_1(true, _, G, Id, Id, _, _) :-
!,
G = true.
bryant_or_1(false, G1, G, Id0, Id, Generated, _) :-
!,
copy_graph(G1, G, Id0, Id, Generated).
bryant_or_1(G2, G1, G, Id0, Id, Generated, Done) :-
G1 = ite(N1,_,_,_),
G2 = ite(N2,_,_,_),
hash_key(N1, N2, Key),
arg(Key, Done, DoneBucket),
member(result(N1,N2,G0), DoneBucket), % lookup or add node
!, % consider no alternative nodes
( nonvar(G0) -> % memoization hit: all done
G = G0,
Id = Id0
; G = G0,
or_nodes(G1, G2, G, Id0, Id, Generated, Done)
% memo miss: do the work
).
% or_nodes(+G1, +G2, -G, +Id0, -Id, +-Generated, +-Done)
% Arguments are as for bryant_or/7. We actually disjoin two nodes. If the
% two nodes represent the same variable, we recursively disjoin their
% respective 'then' and 'else' nodes and construct a node from them.
% Otherwise we recursively disjoin the 'then' and 'else' nodes of the node
% with the earlier variable with the other node, and construct a node from
% them. This is Bryant's algorithm.
or_nodes(G1, G2, G, Id0, Id, Generated, Done) :-
G1 = ite(_,V1,T1,F1),
G2 = ite(_,V2,T2,F2),
( V1 < V2 ->
V = V1,
bryant_or(T1, G2, Gt, Id0, Id1, Generated, Done),
bryant_or(F1, G2, Gf, Id1, Id2, Generated, Done)
; V1 > V2 ->
V = V2,
bryant_or(G1, T2, Gt, Id0, Id1, Generated, Done),
bryant_or(G1, F2, Gf, Id1, Id2, Generated, Done)
; % else V1 =:= V2
V = V1,
bryant_or(T1, T2, Gt, Id0, Id1, Generated, Done),
bryant_or(F1, F2, Gf, Id1, Id2, Generated, Done)
),
compose_node(V, Gt, Gf, G, Id2, Id, Generated).
/*************************************************************************
Conjunction
This code computes the reduced conjunction of two Bryant graphs, possibly with
some variables restricted away, in a single step.
*************************************************************************/
% bryant_and(+Graph1, +Graph2, -Graph)
% bryant_and(+Graph1, +Graph2, +Restriction, -Graph)
% bryant_and(+Graph1, +Graph2, +Mapping, +Restriction, -Graph)
% bryant_and(+Graph1, +Graph2, +Mapping, +Restriction, -Graph, +Id0, -Id,
% +-Generated, +-Done)
% Graph is the conjunction of Graph1 and Graph2. Restriction is the highest
% variable number to include in the resulting graph; higher variables are
% extentially quantified away. This is done by replacing any subgraph that
% is not false with true. Mapping is a term whose arguments specify the
% "real" node numbers to use for nodes in Graph1. Where Graph1 has a node
% with variable number N, instead consider the variable to be the Nth
% argument of Mapping. The arguments of Mapping must appear in strictly
% increasing order. Mapping may also be the atom 'identity', signifying that
% no argument mapping is to be performed. Other arguments are as for
% bryant_or/[3,7].
bryant_and(G1, G2, G) :-
functor(Generated, generated, 127),
functor(Done, done, 127),
bryant_and(G1, G2, identity, 999999, G, 2, _, Generated, Done).
bryant_and(G1, G2, Restriction, G) :-
functor(Generated, generated, 127),
functor(Done, done, 127),
bryant_and(G1, G2, identity, Restriction, G, 2, _, Generated, Done).
bryant_and(G1, G2, Mapping, Restriction, G) :-
functor(Generated, generated, 127),
functor(Done, done, 127),
bryant_and(G1, G2, Mapping, Restriction, G, 2, _, Generated, Done).
bryant_and(true, G2, _, Restriction, G, Id0, Id, Generated, _) :-
!,
restrict_graph(G2, identity, Restriction, G, Id0, Id, Generated).
bryant_and(false, _, _, _, G, Id, Id, _, _) :-
!,
G = false.
bryant_and(G1, G2, Mapping, Restriction, G, Id0, Id, Generated, Done) :-
bryant_and_1(G2, G1, Mapping, Restriction, G, Id0, Id, Generated, Done).
% bryant_and_1(+Graph2, +Graph1, +Mapping, +Restriction, -Graph, +Id0, -Id,
% +-Generated, +-Done)
% Same as bryant_and/9, except that first two args are swapped for indexing.
bryant_and_1(true, G1, Mapping, Restriction, G, Id0, Id, Generated, _) :-
!,
restrict_graph(G1, Mapping, Restriction, G, Id0, Id, Generated).
bryant_and_1(false, _, _, _, G, Id, Id, _, _) :-
!,
G = false.
bryant_and_1(G2, G1, Mapping, Restriction, G, Id0, Id, Generated, Done) :-
G1 = ite(N1,_,_,_),
G2 = ite(N2,_,_,_),
hash_key(N1, N2, Key),
arg(Key, Done, DoneBucket),
member(result(N1,N2,G0), DoneBucket), % lookup or add node
!, % consider no alternative nodes
( nonvar(G0) -> % memoization hit: we've
% already anded these nodes:
% just reuse previous result.
G = G0,
Id = Id0
; G = G0,
and_nodes(G1, G2, Mapping, Restriction, G, Id0, Id, Generated, Done)
% memo miss: do the work
).
% and_nodes(+G1, +G2, +Mapping, +Restriction, -G, +Id0, -Id, +-Generated,
% +-Done)
% Arguments are as for bryant_and/9. We actually conjoin two nodes. If the
% two nodes represent the same variable, we recursively conjoin their
% respective 'then' and 'else' nodes and construct a node from them.
% Otherwise we recursively conjoin the 'then' and 'else' nodes of the node
% with the earlier variable with the other node, and construct a node from
% them. This is Bryant's algorithm.
and_nodes(G1, G2, Mapping, Restriction, G, Id0, Id, Gen, Done) :-
G1 = ite(_,V1a,T1,F1),
G2 = ite(_,V2,T2,F2),
( Mapping == identity ->
V1 = V1a
; arg(V1a, Mapping, V1)
),
( V1 > Restriction, V2 > Restriction ->
Id = Id0,
restricted_and(G1, G2, Mapping, G, Done)
% don't build any nodes
; ( V1 < V2 ->
V = V1,
bryant_and(T1, G2, Mapping, Restriction, Gt, Id0, Id1,
Gen, Done),
bryant_and(F1, G2, Mapping, Restriction, Gf, Id1, Id2,
Gen, Done)
; V1 > V2 ->
V = V2,
bryant_and(G1, T2, Mapping, Restriction, Gt, Id0, Id1,
Gen, Done),
bryant_and(G1, F2, Mapping, Restriction, Gf, Id1, Id2,
Gen, Done)
; % else V1 =:= V2
V = V1,
bryant_and(T1, T2, Mapping, Restriction, Gt, Id0, Id1, Gen,
Done),
bryant_and(F1, F2, Mapping, Restriction, Gf, Id1, Id2, Gen,
Done)
),
compose_node(V, Gt, Gf, G, Id2, Id, Gen)
).
% restricted_and(+G1, +G2, +Mapping, -G, +-Done)
% restricted_and_1(+G2, +G1, +Mapping, -G, +-Done)
% G is true if G1 & G2 is satisifiable, else false. There are two predicates
% in order to get indexing on both G1 and G2. Done and Mapping are as above.
restricted_and(true, G2, _, G, _) :-
!,
( G2 == false ->
G = false
; G = true
).
restricted_and(false, _, _, G, _) :-
!,
G = false.
restricted_and(G1, G2, Mapping, G, Done) :-
restricted_and_1(G2, G1, Mapping, G, Done).
restricted_and_1(true, _, _, G, _) :-
!,
G = true.
restricted_and_1(false, _, _, G, _) :-
!,
G = false.
restricted_and_1(G2, G1, Mapping, G, Done) :-
G1 = ite(N1,_,_,_),
G2 = ite(N2,_,_,_),
hash_key(N1, N2, Key),
arg(Key, Done, DoneBucket),
member(result(N1,N2,G0), DoneBucket), % lookup or add node
!, % consider no alternative nodes
( nonvar(G0) -> % memoization hit: we've
G = G0 % already anded these nodes:
% just reuse previous result.
; G = G0,
restricted_and_nodes(G1, G2, Mapping, G, Done)
% memo miss: do the work
).
% restricted_and_nodes(+G1, +G2, +Mapping, -G, +-Done)
% Just like and_nodes/8, except that G is always either 'true' or 'false'.
% Therefore, no *Id* argument pair is needed, nor the *Generated* table.
% Also, since we know we're restricting away all arguments in G1 and G2, we
% don't need the *Restriction* limit variable.
restricted_and_nodes(G1, G2, Mapping, G, Done) :-
G1 = ite(_,V1a,T1,F1),
G2 = ite(_,V2,T2,F2),
( Mapping == identity ->
V1 = V1a
; arg(V1a, Mapping, V1)
),
( V1 < V2 ->
V = V1,
restricted_and_pair(T1, G2, F1, G2, Mapping, G, Done)
; V1 > V2 ->
V = V2,
restricted_and_pair(G1, T2, G1, F2, Mapping, G, Done)
; % else V1 =:= V2
V = V1,
restricted_and_pair(T1, T2, F1, F2, Mapping, G, Done)
).
% restricted_and_pair(+G1a, +G2a, +G1b, +G2b, +Mapping, -G, +-Done)
% G is true iff there exists some truth value assignment for the variables in
% G1a, G2a, G1b, and G2b such that (G1a & G2a) or (G1b & G2b) is true. Other
% args are as above.
restricted_and_pair(G1a, G2a, G1b, G2b, Mapping, G, Done) :-
restricted_and(G1a, G2a, Mapping, G0, Done),
( G0 == true ->
G = true % don't need to evaluate G1b &
% G2b
; restricted_and(G1b, G2b, Mapping, G, Done)
).
/*************************************************************************
Graph Building
This code is responsible for building reduced Bryant graphs.
*************************************************************************/
% compose_node(+V, +Gt, +Gf, -G, +Id0, -Id, +-Generated)
% G is the node with variable V, then node Gt, and else node Gf. Id0 is the
% first node number to use in G, and Id is the next node number after
% constructing node G. If Gt and Gf are the same, then we just return it as
% the node, since the value of V at this node is immaterial. Generated is
% our memoization table of nodes already generated.
compose_node(V, Gt, Gf, G, Id0, Id, Generated) :-
( same_node(Gt, Gf) -> % graph doesn't depend on
% value of V: elide node
G = Gt,
Id = Id0
; new_node(V, Gt, Gf, G, Id0, Id, Generated)
).
% new_node(+V, +Gt, +Gf, -G, +Id0, -Id, +-Generated)
% Arguments are the same as for compose_node/7. Here we check our
% memoization table Generated to see if we've already generated a node for
% variable V with Gt and Gf as its then and else subnodes. If so, then we
% just return that node, else we must make a new one.
new_node(V, Gt, Gf, G, Id0, Id, Generated) :-
node_id(Gt, Tid),
node_id(Gf, Fid),
hash_node(V, Tid, Fid, Hash),
arg(Hash, Generated, Bucket),
member(G, Bucket),
G = ite(N,V,Gt,Gf), % after member call to avoid
% creating term unless needed
!, % consider no alternative nodes
( nonvar(N) ->
Id = Id0 % reuse existing node
; N = Id0,
Id is Id0+1
).
% copy_graph(+G0, -G, +Id0, -Id, +-Generated)
% G is a copy of graph G0, with nodes numbered from Id0 through Id - 1,
% reusing nodes memoized in Generated, and memoizing all new nodes.
copy_graph(true, true, Id, Id, _).
copy_graph(false, false, Id, Id, _).
copy_graph(ite(_,V,T,F), G, Id0, Id, Generated) :-
copy_graph(T, Gt, Id0, Id1, Generated),
copy_graph(F, Gf, Id1, Id2, Generated),
% since input graph is minimized, T and F can't be the same, so we
% call new_node/7 instead of compose_node/7.
new_node(V, Gt, Gf, G, Id2, Id, Generated).
% restrict_graph(+G0, +Restriction, -G)
% restrict_graph(+G0, +Mapping, +Restriction, -G)
% restrict_graph(+G0, +Mapping, +Restriction, -G, +Id0, -Id, +-Generated)
% G is a copy of graph G0 restricted to variables =< Restriction, with nodes
% numbered from Id0 through Id - 1, and reusing nodes memoized in Generated.
% Mapping is as above.
restrict_graph(G0, Restriction, G) :-
functor(Generated, generated, 127),
restrict_graph(G0, identity, Restriction, G, 2, _, Generated).
restrict_graph(G0, Mapping, Restriction, G) :-
functor(Generated, generated, 127),
restrict_graph(G0, Mapping, Restriction, G, 2, _, Generated).
restrict_graph(true, _, _, true, Id, Id, _).
restrict_graph(false, _, _, false, Id, Id, _).
restrict_graph(ite(_,V0,T,F), Mapping, Restriction, G, Id0, Id, Generated) :-
( Mapping == identity ->
V = V0
; arg(V0, Mapping, V)
),
( V > Restriction ->
% input graph is reduced, so if it's not false, it must have
% at least one true leaf.
G = true,
Id = Id0
; restrict_graph(T, Mapping, Restriction, Gt, Id0, Id1, Generated),
restrict_graph(F, Mapping, Restriction, Gf, Id1, Id2, Generated),
compose_node(V, Gt, Gf, G, Id2, Id, Generated)
).
% iff_conj(+V1, +Vs, -G)
% iff_conj(+V1, +Vs, -G, +Id)
% G is the reduced ordered binary decision graph representing V1 if and only
% if the conjunction of the variables on the list Vs. V1 must not be on the
% list Vs. Id is the lowest node number used in G. This code is very
% specialized, and is optimized to generate this type of graph which we will
% need often.
iff_conj(V1, Vs, G) :-
iff_conj(V1, Vs, G, 2).
iff_conj(V1, Vs, G, Id0) :-
sort(Vs, Sorted), % this removes duplicates
iff_conj_1(Sorted, V1, G, Id0, []).
% iff_conj_1(+Vs, +V1, -G, +Id0, +Node)
% Like iff_conj/4, but Vs is known to be sorted. Node is either [] or a Node
% is a node whose meaning is ~V1 (i.e., ite(_,V1,false,true)). This is the
% only node which can be shared; by explicitly handling this, we avoid a lot
% of the overhead of the usual new_node/7 code.
iff_conj_1([], V1, ite(Id,V1,true,false), Id, _).
iff_conj_1([V|Vs], V1, G, Id0, Node) :-
( V < V1 ->
( Node == [] ->
Node1 = ite(Id0,V1,false,true),
Id1 is Id0 + 1
; Node1 = Node,
Id1 = Id0
),
G = ite(Id1,V,G1,Node1),
Id2 is Id1 + 1,
iff_conj_1(Vs, V1, G1, Id2, Node1)
; G = ite(Id0,V1,Trues,Falses), % V > V1
Id1 is Id0 + 1,
build_and_chain([V|Vs], true, false, Id1, Id2, Trues),
build_and_chain([V|Vs], false, true, Id2, _, Falses)
).
% build_and_chain(+Vs, +Pos, +Neg, +Id0, -Id, -Conj)
% Conj is a reduced bryant graph representing the conjunction, or the
% negation of the conjunction, of the variables on Vs. Pos and Neg determine
% whether it is the conjunction or its negation: Pos is the boolean value for
% when the conjunction is true, and Neg is the value when it is false. They
% each must be 'true' or 'false', and must not be the same. Id0 is the
% lowest node number used in G, and Id is 1 + the greatest.
build_and_chain([], Pos, _, Id, Id, Pos).
build_and_chain([V|Vs], Pos, Neg, Id0, Id, ite(Id0,V,Conj,Neg)) :-
Id1 is Id0 + 1,
build_and_chain(Vs, Pos, Neg, Id1, Id, Conj).
/*************************************************************************
Comparing Graphs
*************************************************************************/
% identical_graphs(+G1, +G2)
% identical_graphs(+G1, +G2, +-Compared)
% Graphs G1 and G2 represent the same boolean function. Compared is a
% memoization table of nodes we've already compared and know to be identical.
% Since the input graphs are known to be acyclical, we don't need to worry
% about loops.
identical_graphs(G1, G2) :-
functor(Compared, compared, 127),
identical_graphs(G1, G2, Compared).
identical_graphs(true, true, _).
identical_graphs(false, false, _).
identical_graphs(ite(N1,V,T1,F1), ite(N2,V,T2,F2), Compared) :-
hash_key(N1, N2, Hash),
arg(Hash, Compared, Bucket),
member(N1=X, Bucket), % delay binding X to see if
!, % N1=N2 was already in bucket
( nonvar(X) -> % it was:
X=N2 % X must be N2
; X=N2, % it wasn't: add it, and
identical_graphs(T1, T2, Compared), % make sure T1=T2 and
identical_graphs(F1, F2, Compared) % F1=F2.
).
% same_node(+N1, +N2)
% Nodes N1 and N2 are the same node. This only works if they're nodes of the
% same graph, and assumes that identical graphs have the same root node ids.
% This, of course, is the invariant we are working to enforce.
same_node(true, true).
same_node(false, false).
same_node(ite(N,_,_,_), ite(N,_,_,_)).
/*************************************************************************
Graph Handling Primitives
*************************************************************************/
% node_id(+Node, -Id)
% Id is the integer id of Node. true and false are automatically numbered 1
% and 0, respectively.
node_id(true, 1).
node_id(false, 0).
node_id(ite(Id,_,_,_), Id).
% hash_key(+N1, +N2, -Hash)
% Hash is a numeric hash key constructed from node ids N1 and N2.
hash_key(N1, N2, Hash) :-
Hash is 1+(((N1+17)*(N2+19)) mod 127).
% hash_node(+V, +N1, +N2, -Hash)
% Hash is a numeric hash key constructed from variable number V and node ids
% N1 and N2.
hash_node(V, N1, N2, Hash) :-
Hash is 1+(((V+7)*(N1+13)*(N2+17)) mod 127).
% member(+X, +-Y)
% X is a member of list Y.
member(X, [X|_]).
member(X, [_|Y]) :-
member(X, Y).
/*************************************************************************
Test Cases
The test case from Bryant's paper:
++verbatim
?- bryant_or(ite(2,0,ite(3,2,false,true),true),
ite(2,1,ite(3,2,true,false),false), G).
G = ite(4,0,ite(3,1,true,ite(2,2,false,true)),true)
--verbatim
Other tests:
(X & (Y<->Z)) | ((X&Y) <-> Z) ==> ((X&Y) <-> Z)
++verbatim
?- NOTZ = ite(5,2,false,true),
bryant_or(ite(2,0,ite(3,1,ite(4,2,true,false),ite(5,2,false,true)),false),
ite(2,0,ite(3,1,ite(4,2,true,false),NOTZ),NOTZ),
G).
NOTZ = ite(5,2,false,true),
G = ite(5,0,ite(4,1,ite(2,2,true,false),ite(3,2,false,true)),ite(3,2,false,true))
--verbatim
(X<->~Y) & Z ==>
++verbatim
?- bryant_and(ite(2,0,ite(3,1,false,true),ite(4,1,true,false)),
ite(2,2,true,false), G).
G = ite(5,0,ite(3,1,false,ite(2,2,true,false)),ite(4,1,ite(2,2,true,false),false))
--verbatim
Big test:
++verbatim
?- iff_conj(0,[3,4],G1), iff_conj(2,[3,5],G2),
bryant_and(G1,G2,G3),
G4 = ite(6,1,ite(3,4,ite(2,5,true,false),false),ite(5,4,ite(4,5,false,true),false)),
bryant_and(G3,G4,2,G).
--verbatim
*************************************************************************/
| pschachte/groundness | benchmarks/bryant.pl | Perl | apache-2.0 | 21,927 |
%% :- module(modules, [], [assertions]).
:- use_package([assertions]).
:- comment(title, "The module system").
:- comment(author,"Daniel Cabeza").
:- comment(usage, "Modules are an intrinsic feature of CIAO, so nothing
special has to be done to use them.").
:- comment(module, "Modularity is a basic notion in a modern computer
language. Modules allow dividing programs in several parts, which
have its own independent name spaces. The module system in CIAO, as
in many other Prolog implementations, is procedure based. This means
that predicate names are local to a module, but functor/atom names in
data are shared.
The predicates visible in a module are the predicates defined in that
module, plus the predicates imported from other modules. Only
predicates exported by a module can be imported from other modules.
The default module of a given predicate name is the local one if the
predicate is defined locally, else the first module from which the
predicate is imported. To refer to a predicate from a module which
is not the default for that predicate the name has to be module
@cindex{module qualification}qualified. A module qualified predicate
name has the form @var{Module}:@var{Predicate} as in the call
@tt{debugger:debug_module(M)}. Note that this does not allow having
access to predicates not imported, nor defining clauses of other
modules.
When a module exports a predicate which is not defined in the module,
but imported from other module, a @index{bridge predicate} is
automatically added which connects the exported predicate and the
imported predicate. Note that this makes that the exported predicate
does not inherit the dynamic properties of the imported predicate.
All predicates defined in files with no module declaration belong to
a special module called @cindex{user module} @tt{user}, and all are
implicitly exported. This allows dividing programs in several files
without being aware of the module system at all. Note that this
feature is only supported for compatibility reasons, being its use
discouraged. Many attractive compilation features of CIAO cannot be
performed in @tt{user} modules.
The case of multifile predicates (defined with the declaration
@decl{multifile/1}) is also special. Multifile predicates can be
defined by clauses in several modules, and all modules which define a
predicate as multifile can use that predicate. The name space of
multifile predicates is independent, as if they belonged to special
module @tt{multifile}.
Every @tt{user} or module file imports implicitly a number of modules
called @concept{builtin modules}. They are imported after all other
importations of the module, allowing thus redefining any of their
predicates (with the exception of @pred{true/0}) by defining local
versions or importing them from other modules. Importing explicitly
from a builtin module, however, disables the implicit importation of
the rest (this feature is used by package @lib{library(pure)} to
define pure prolog code).").
:- true decl module(Name, Exports, Packages)
: modulename * list(predname) * list(sourcename)
# "Declares a module of name @var{Name} which exports the
predicates in @var{Exports}, and uses the packages in
@var{Packages}. @var{Name} must match with the name of the
file where the module resides, without extension. For each
source in @var{Packages}, a @concept{package file} is
included, as if by an @decl{include/1} declaration. If the
source is specified with a @concept{path alias}, this is
the file included, if it is an atom, the library paths are
searched. Package files provide functionalities by
declaring imports from other modules, defining operators, new
declarations, translations of code, etc.
This directive must appear the first in the file.
Also, if the compiler finds an unknown declaration as the
first term in a file, the name of the declaration is regarded
as a package library to be included, and the arguments of the
declaration (if present) are interpreted like the arguments of
@decl{module/3}.".
:- true decl module(Name, Exports) : modulename * list(predname)
# "Same as directive @decl{module/3}, with an implicit package
@tt{iso}, which enables to include @concept{ISO-Prolog}
compatible code (compatibility not 100\% yet).".
:- true decl export(Exports) : list(predname)
# "Adds @var{Exports} to the set of exported predicates.".
:- true decl use_module(Module, Imports) : sourcename * list(predname)
# "Specifies that this code imports from the module defined in
@var{Module} the predicates in @var{Imports}. The imported
predicates must be exported by the other module.".
:- true decl use_module(Module) : sourcename
# "Specifies that this code imports from the module defined in
@var{Module} all the predicates exported by it. The previous
version with the explicit import list is preferred to this as
it minimizes the chances to have to recompile this code if the
other module changes.".
:- true decl import(Module, Imports) : modulename * list(predname)
# "Declares that this code imports from the module with name
@var{Module} the predicates in @var{Imports}.
@bf{Important note:} this declaration is intended to be used
when the current module or the imported module is going to
be dynamically loaded, and so the compiler does not include
the code of the imported module in the current executable
(if only because the compiler cannot know the location of
the module file at the time of compilation). For the same
reason the predicates imported are not checked to be
exported by @var{Module}. Its use in other cases is
strongly discouraged.".
:- true decl meta_predicate(MetaSpecs) : sequence(metaspec)
# "Specifies that the predicates in @var{MetaSpecs} have
arguments which represent predicates and thus have to be
module expanded. The directive is not mandatory in programs
which do not use modules. This directive is defined as a
prefix operator.".
:- comment(doinclude, modulename/1).
:- prop modulename(M) # "@var{M} is a module name (an atom).".
modulename(M) :- atm(M).
:- comment(doinclude, metaspec/1). %%%%%% add pred(N) %%%%%%
:- comment(metaspec/1, "A meta-predicate specification for a predicate
is the functor of that predicate applied to atoms which
represent the kind of module expansion that should be done with
the arguments. Possible contents are represented as:
@begin{itemize}
@item @tt{goal} This argument will be a term denoting a goal
(either a simple or complex one) which will be called. For
commpatibility reasons it can be named as @tt{:} as well.
@item @tt{clause} This argument will be a term denoting a clause.
@item @tt{fact} This argument should be instantiated to a term
denoting a fact (head-only clause).
@item @tt{spec} This argument should be instantiated to a predicate
name, as Functor/Arity.
@item @tt{pred(@em{N})} This argument should be instantiated to
a predicate construct to be called by means of a
@tt{call/@em{N}} predicate call (see @pred{call/2}). Thus, it
should be an atom equal to the name of a predicate of arity
@em{N}, or a structure with functor the name of a predicate of
arity @em{M} (greater than @em{N}) and with @em{M}-@em{N}
arguments.
@item @tt{?,+,-,_} This other values denote that this argument is not
module expanded.
@end{itemize}").
:- prop metaspec(M) # "@var{M} is a meta-predicate specification.".
metaspec(M) :- struct(M).
| leuschel/ecce | www/CiaoDE/ciao/lib/assertions/test/modules_user.pl | Perl | apache-2.0 | 8,134 |
package API::DeliveryService;
#
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
#
# JvD Note: you always want to put Utils as the first use. Sh*t don't work if it's after the Mojo lines.
use UI::Utils;
use Mojo::Base 'Mojolicious::Controller';
use Data::Dumper;
use POSIX qw(strftime);
use Utils::Helper::Datasource;
use Time::HiRes qw(gettimeofday tv_interval);
use Math::Round qw(nearest);
my $valid_server_types = {
edge => "EDGE",
mid => "MID",
};
# this structure maps the above types to the allowed metrics below
my $valid_metric_types = {
origin_tps => "mid",
ooff => "mid",
};
sub delivery_services {
my $self = shift;
my $id = $self->param('id');
my $helper = new Utils::Helper( { mojo => $self } );
if ( defined($id) && $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) || &is_oper($self) ) {
return $self->get_data();
}
else {
return $self->forbidden();
}
}
if ( defined($id) && !$helper->is_valid_delivery_service($id) ) {
return $self->not_found();
}
else {
return $self->get_data();
}
}
sub get_data {
my $self = shift;
my $id = $self->param('id');
my @data;
my $portal_role = $self->db->resultset('Role')->search( { name => 'portal' } )->get_column('id')->single();
my $tm_user = $self->db->resultset('TmUser')->search( { username => $self->current_user()->{username} } )->single();
my $tm_user_id = $tm_user->id;
my @ds_ids = ();
if ( defined($tm_user_id) && $tm_user->role->id eq $portal_role ) {
@ds_ids = $self->db->resultset('DeliveryserviceTmuser')->search( { tm_user_id => $tm_user_id } )->get_column('deliveryservice')->all();
}
else {
@ds_ids = $self->db->resultset('Deliveryservice')->search(undef)->get_column('id')->all();
}
my %ds_hash = map { $_ => 1 } @ds_ids;
my $rs;
if ( defined($id) ) {
$rs = $self->db->resultset("Deliveryservice")->search( { id => $id }, { prefetch => ['deliveryservice_regexes'] } );
}
else {
$rs = $self->db->resultset("Deliveryservice")->search( undef, { prefetch => ['deliveryservice_regexes'], order_by => 'xml_id' } );
}
while ( my $row = $rs->next ) {
next if ( defined($tm_user_id) && !defined( $ds_hash{ $row->id } ) );
my $re_rs = $row->deliveryservice_regexes;
my @matchlist = ();
while ( my $re_row = $re_rs->next ) {
push(
@matchlist, {
type => $re_row->regex->type->name,
pattern => $re_row->regex->pattern,
setNumber => $re_row->set_number,
}
);
}
push(
@data, {
"id" => $row->id,
"xmlId" => $row->xml_id,
"dscp" => $row->dscp,
"signed" => \$row->signed,
"qstringIgnore" => $row->qstring_ignore,
"geoLimit" => $row->geo_limit,
"httpBypassFqdn" => $row->http_bypass_fqdn,
"dnsBypassIp" => $row->dns_bypass_ip,
"dnsBypassIp6" => $row->dns_bypass_ip6,
"dnsBypassTtl" => $row->dns_bypass_ttl,
"orgServerFqdn" => $row->org_server_fqdn,
"ccrDnsTtl" => $row->ccr_dns_ttl,
"type" => $row->type->name,
"profileName" => $row->profile->name,
"profileDescription" => $row->profile->description,
"globalMaxMbps" => $row->global_max_mbps,
"globalMaxTps" => $row->global_max_tps,
"headerRewrite" => $row->edge_header_rewrite,
"edgeHeaderRewrite" => $row->edge_header_rewrite,
"midHeaderRewrite" => $row->mid_header_rewrite,
"regexRemap" => $row->regex_remap,
"longDesc" => $row->long_desc,
"longDesc1" => $row->long_desc_1,
"longDesc2" => $row->long_desc_2,
"maxDnsAnswers" => $row->max_dns_answers,
"infoUrl" => $row->info_url,
"missLat" => $row->miss_lat,
"missLong" => $row->miss_long,
"checkPath" => $row->check_path,
"matchList" => \@matchlist,
"active" => \$row->active,
"protocol" => $row->protocol,
"ipv6RoutingEnabled" => \$row->ipv6_routing_enabled,
"rangeRequestHandling" => $row->range_request_handling,
"cacheurl" => $row->cacheurl,
}
);
}
return $self->success( \@data );
}
sub get_summary {
my $self = shift;
my $metric_type = $self->param("metric");
my $start = $self->param("start");
my $end = $self->param("end");
my $interval = $self->param("interval");
my $window_start = $self->param("window_start");
my $window_end = $self->param("window_end");
my $location = "all"; # NOTE: We can easily turn this into a param in the future if/when necessary
my $id = $self->param('id');
my $helper = new Utils::Helper( { mojo => $self } );
if ( $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) ) {
my $result = $self->db->resultset("Deliveryservice")->search( { id => $self->param('id') } )->single();
my $param =
$self->db->resultset('ProfileParameter')
->search( { -and => [ 'parameter.name' => 'CDN_name', 'parameter.name' => 'CDN_name', 'me.profile' => $result->profile->id ] },
{ prefetch => [ 'parameter', 'profile' ] } )->single();
my $cdn_name = $param->parameter->value;
my $match = $cdn_name . ":" . $result->xml_id . ":all:all:" . $metric_type;
my $data = $self->get_stats( $match, $start, $end, $interval, $window_start, $window_end );
if ( defined($data) && ref($data) eq "HASH" && exists( $data->{summary} ) ) {
$self->success( { summary => $data->{summary} } );
}
else {
$self->success( get_summary_zero_values() );
}
}
else {
$self->forbidden();
}
}
else {
$self->success( {} );
}
}
sub get_summary_zero_values {
my $response = ();
$response->{"ninetyFifth"} = 0;
$response->{"average"} = 0;
$response->{"min"} = 0;
$response->{"max"} = 0;
$response->{"total"} = 0;
return $response;
}
sub routing {
my $self = shift;
# get and pass { cdn_name => $foo } into get_routing_stats
my $id = $self->param('id');
my $helper = new Utils::Helper( { mojo => $self } );
if ( $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) ) {
my $result = $self->db->resultset("Deliveryservice")->search( { id => $self->param('id') } )->single();
my $param =
$self->db->resultset('ProfileParameter')
->search( { -and => [ 'parameter.name' => 'CDN_name', 'parameter.name' => 'CDN_name', 'me.profile' => $result->profile->id ] },
{ prefetch => [ 'parameter', 'profile' ] } )->single();
my $cdn_name = $param->parameter->value;
my $stat_key = lc( $result->type->name ) . "Map"; # dnsMap/httpMap in /crs/stats
my $re_rs = $result->deliveryservice_regexes;
my @patterns;
while ( my $re_row = $re_rs->next ) {
push( @patterns, $re_row->regex->pattern );
}
$self->get_routing_stats( { stat_key => $stat_key, patterns => \@patterns, cdn_name => $cdn_name } );
}
else {
$self->forbidden();
}
}
else {
$self->not_found();
}
}
sub metrics {
my $self = shift;
my $id = $self->param("id");
my $metric = $self->param("metric");
my $start = $self->param("start"); # start time in secs since 1970
my $end = $self->param("end"); # end time in secs since 1970
my $stats_only = $self->param("stats") || 0; # stats only
my $data_only = $self->param("data") || 0; # data only
my $type = $self->param("server_type"); # mid or edge
my $config = $self->get_config($metric);
my $helper = new Utils::Helper::Datasource( { mojo => $self } );
if ( $valid_server_types->{$type} && defined($config) && $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) ) {
$start =~ s/\.\d+$//g;
$end =~ s/\.\d+$//g;
for my $kvp ( @{ $config->{get_kvp}->( $helper->get_delivery_service_name($id), $valid_server_types->{$type}, $start, $end ) } ) {
$helper->kv( $kvp->{key}, $kvp->{value} );
}
return $self->build_etl_metrics_response( $helper, $config, $start, $end, $stats_only, $data_only );
}
else {
$self->forbidden();
}
}
else {
$self->success( get_zero_values( $stats_only, $data_only ) );
}
}
sub get_zero_values {
my $stats_only = shift;
my $data_only = shift;
my $response = ();
$response->{"stats"}{"95thPercentile"} = 0;
$response->{"stats"}{"98thPercentile"} = 0;
$response->{"stats"}{"5thPercentile"} = 0;
$response->{"stats"}{"mean"} = 0;
$response->{"stats"}{"count"} = 0;
$response->{"stats"}{"min"} = 0;
$response->{"stats"}{"max"} = 0;
$response->{"stats"}{"sum"} = 0;
$response->{"data"} = [];
if ($stats_only) {
delete( $response->{"data"} );
}
elsif ($data_only) {
delete( $response->{"stats"} );
}
return [$response];
}
sub capacity {
my $self = shift;
# get and pass { cdn_name => $foo } into get_cache_capacity
my $id = $self->param('id');
my $helper = new Utils::Helper( { mojo => $self } );
if ( $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) ) {
my $result = $self->db->resultset("Deliveryservice")->search( { id => $self->param('id') } )->single();
my $param =
$self->db->resultset('ProfileParameter')
->search( { -and => [ 'parameter.name' => 'CDN_name', 'parameter.name' => 'CDN_name', 'me.profile' => $result->profile->id ] },
{ prefetch => [ 'parameter', 'profile' ] } )->single();
my $cdn_name = $param->parameter->value;
$self->get_cache_capacity( { delivery_service => $result->xml_id, cdn_name => $cdn_name } );
}
else {
$self->forbidden();
}
}
else {
$self->not_found();
}
}
sub health {
my $self = shift;
my $id = $self->param('id');
my $helper = new Utils::Helper( { mojo => $self } );
if ( $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) ) {
my $result = $self->db->resultset("Deliveryservice")->search( { id => $self->param('id') } )->single();
my $param =
$self->db->resultset('ProfileParameter')
->search( { -and => [ 'parameter.name' => 'CDN_name', 'parameter.name' => 'CDN_name', 'me.profile' => $result->profile->id ] },
{ prefetch => [ 'parameter', 'profile' ] } )->single();
my $cdn_name = $param->parameter->value;
return ( $self->get_cache_health( { server_type => "caches", delivery_service => $result->xml_id, cdn_name => $cdn_name } ) );
}
else {
$self->forbidden();
}
}
else {
$self->not_found();
}
}
sub state {
my $self = shift;
my $id = $self->param('id');
my $helper = new Utils::Helper( { mojo => $self } );
if ( $helper->is_valid_delivery_service($id) ) {
if ( $helper->is_delivery_service_assigned($id) || &is_oper($self) ) {
my $result = $self->db->resultset("Deliveryservice")->search( { id => $self->param('id') } )->single();
my $param =
$self->db->resultset('ProfileParameter')
->search( { -and => [ 'parameter.name' => 'CDN_name', 'parameter.name' => 'CDN_name', 'me.profile' => $result->profile->id ] },
{ prefetch => [ 'parameter', 'profile' ] } )->single();
my $cdn_name = $param->parameter->value;
my $ds_name = $result->xml_id;
my $rascal_data = $self->get_rascal_state_data( { type => "RASCAL", state_type => "deliveryServices", cdn_name => $cdn_name } );
# scalar refs get converted into json booleans
my $data = {
enabled => \0,
failover => {
enabled => \0,
configured => \0,
destination => undef,
locations => []
}
};
if ( exists( $rascal_data->{$cdn_name} ) && exists( $rascal_data->{$cdn_name}->{state}->{$ds_name} ) ) {
my $health_config = $self->get_health_config($cdn_name);
my $c = $rascal_data->{$cdn_name}->{config}->{deliveryServices}->{$ds_name};
my $r = $rascal_data->{$cdn_name}->{state}->{$ds_name};
if ( exists( $health_config->{deliveryServices}->{$ds_name} ) ) {
my $h = $health_config->{deliveryServices}->{$ds_name};
if ( $h->{status} eq "REPORTED" ) {
$data->{enabled} = \1;
}
if ( !$r->{isAvailable} ) {
$data->{failover}->{enabled} = \1;
$data->{failover}->{locations} = $r->{disabledLocations};
}
if ( exists( $h->{"health.threshold.total.kbps"} ) ) {
# get current kbps, calculate percent used
$data->{failover}->{configured} = \1;
push( @{ $data->{failover}->{limits} }, { metric => "total_kbps", limit => $h->{"health.threshold.total.kbps"} } );
}
if ( exists( $h->{"health.threshold.total.tps_total"} ) ) {
# get current tps, calculate percent used
$data->{failover}->{configured} = \1;
push( @{ $data->{failover}->{limits} }, { metric => "total_tps", limit => $h->{"health.threshold.total.tps_total"} } );
}
if ( exists( $c->{bypassDestination} ) ) {
my @k = keys( %{ $c->{bypassDestination} } );
my $type = shift(@k);
my $location = undef;
if ( $type eq "DNS" ) {
$location = $c->{bypassDestination}->{$type}->{ip};
}
elsif ( $type eq "HTTP" ) {
my $port = ( exists( $c->{bypassDestination}->{$type}->{port} ) ) ? ":" . $c->{bypassDestination}->{$type}->{port} : "";
$location = sprintf( "http://%s%s", $c->{bypassDestination}->{$type}->{fqdn}, $port );
}
$data->{failover}->{destination} = {
type => $type,
location => $location
};
}
}
}
$self->success($data);
}
else {
$self->forbidden();
}
}
else {
$self->not_found();
}
}
sub peakusage {
my $self = shift;
my $dsid = $self->param('ds');
my $cachegroup_name = $self->param('name');
my $peak_usage_type = $self->param('peak_usage_type');
my $start = $self->param('start');
my $end = $self->param('end');
my $interval = $self->param('interval');
my $helper = new Utils::Helper( { mojo => $self } );
if ( $helper->is_valid_delivery_service($dsid) ) {
if ( $helper->is_delivery_service_assigned($dsid) ) {
return $self->get_ds_usage( $dsid, $cachegroup_name, $peak_usage_type, $start, $end, $interval );
}
else {
return $self->forbidden();
}
}
else {
$self->success( {} );
}
}
1;
| cjqian/traffic_control | traffic_ops/app/lib/API/DeliveryService.pm | Perl | apache-2.0 | 15,024 |
use IO::Scalar;
my $dump_str;
my $io = IO::Scalar->new(\$dump_str);
my $oio = select($io);
print '<pre>',"\n"; # goes to $dump_str
$d->dumpvalue(\$someref); # as does this
print '</pre>'; # and this too
select($oio); # old filehandle
print $dump_str; # stdout again when you want it to
| jmcveigh/komodo-tools | scripts/perl/inspect_your_data_structures/dump_to_string.pl | Perl | bsd-2-clause | 327 |
#
# Copyright (c) 1997-2012 The Protein Laboratory, University of Copenhagen
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
# SUCH DAMAGE.
#
# Created by Anton Berezin <tobez@plab.ku.dk>
#
# $Id$
package Prima;
use strict;
require DynaLoader;
use vars qw($VERSION @ISA $__import @preload);
@ISA = qw(DynaLoader);
sub dl_load_flags { 0x00 }
$VERSION = '1.36';
bootstrap Prima $VERSION;
unless ( UNIVERSAL::can('Prima', 'init')) {
$::application = 0;
return 0;
}
$::application = undef;
require Prima::Classes;
sub parse_argv
{
my %options = Prima::options();
my @ret;
for ( my $i = 0; $i < @_; $i++) {
if ( $_[$i] =~ m/^--(?:([^\=]+)\=)?(.*)$/) {
my ( $option, $value) = ( defined( $1) ? ( $1, $2) : ( $2, undef));
last unless defined($option);
if ( $option eq 'help') {
my @options = Prima::options();
printf " --%-10s - %s\n", shift @options, shift @options
while @options;
exit(0);
}
next unless exists $options{$option};
Prima::options( $option, $value);
} else {
push @ret, $_[$i];
}
}
return @ret;
}
{
my ( $i, $skip_argv, @argv);
for ( $i = 0; $i < @preload; $i++) {
if ( $preload[$i] eq 'argv') {
push @argv, $preload[++$i];
} elsif ( $preload[$i] eq 'noargv') {
$skip_argv++;
}
}
parse_argv( @argv) if @argv;
@ARGV = parse_argv( @ARGV) if @ARGV and not $skip_argv;
}
Prima::init($VERSION);
sub END
{
&Prima::cleanup() if UNIVERSAL::can('Prima', 'cleanup');
}
sub run
{
die "Prima was not properly initialized\n" unless $::application;
$::application-> go if $::application-> alive;
$::application = undef if $::application and not $::application->alive;
}
sub import
{
my @module = @_;
while (@module) {
my $module = shift @module;
my %parameters = ();
%parameters = %{shift @module} if @module && ref($module[0]) eq 'HASH';
next if $module eq 'Prima' || $module eq '';
$module = "Prima::$module" unless $module =~ /^Prima::/;
$__import = caller;
if ( $module) {
eval "use $module \%parameters;";
die $@ if $@;
}
$__import = 0;
}
}
1;
__END__
=pod
=head1 NAME
Prima - a perl graphic toolkit
=head1 SYNOPSIS
use Prima qw(Application Buttons);
new Prima::MainWindow(
text => 'Hello world!',
size => [ 200, 200],
)-> insert( Button =>
centered => 1,
text => 'Hello world!',
onClick => sub { $::application-> close },
);
run Prima;
=head1 DESCRIPTION
The toolkit is combined from two basic set of classes - core and external. The
core classes are coded in C and form a base line for every Prima object
written in perl. The usage of C is possible together with the toolkit; however,
its full power is revealed in the perl domain. The external classes present
easily expandable set of widgets, written completely in perl and communicating
with the system using Prima library calls.
The core classes form an hierarchy, which is displayed below:
Prima::Object
Prima::Component
Prima::AbstractMenu
Prima::AccelTable
Prima::Menu
Prima::Popup
Prima::Clipboard
Prima::Drawable
Prima::DeviceBitmap
Prima::Printer
Prima::Image
Prima::Icon
Prima::File
Prima::Timer
Prima::Widget
Prima::Application
Prima::Window
The external classes are derived from these; the list of widget classes
can be found below in L</SEE ALSO>.
=head1 BASIC PROGRAM
The very basic code shown in L<"SYNOPSIS"> is explained here.
The code creates a window with 'Hello,
world' title and a centered button with the same text. The program
terminates after the button is pressed.
A basic construct for a program written with Prima obviously requires
use Prima;
code; however, the effective programming requires usage of the other
modules, for example, C<Prima::Buttons>, which contains set of
button widgets. C<Prima.pm> module can be
invoked with a list of such modules, which makes the construction
use Prima;
use Prima::Application;
use Prima::Buttons;
shorter by using the following scheme:
use Prima qw(Application Buttons);
Another basic issue is the event loop, which is called by
run Prima;
sentence and requires a C<Prima::Application> object to be created beforehand.
Invoking C<Prima::Application> standard module is one of the possible ways to
create an application object. The program usually terminates after the event loop
is finished.
The window is created by invoking
new Prima::Window();
or
Prima::Window-> create()
code with the additional parameters. Actually, all Prima objects are created by such a
scheme. The class name is passed as the first parameter, and a custom set
of parameters is passed afterwards. These parameters are usually
represented in a hash syntax, although actually passed as an array.
The hash syntax is preferred for the code readability:
$new_object = new Class(
parameter => value,
parameter => value,
...
);
Here, parameters are the class properties names, and differ from class to
class. Classes often have common properties, primarily due to the
object inheritance.
In the example, the following properties are set :
Window::text
Window::size
Button::text
Button::centered
Button::onClick
Property values can be of any type, given that they are scalar. As depicted
here, C<::text> property accepts a string, C<::size> - an anonymous array
of two integers and C<onClick> - a sub.
onXxxx are special properties that form a class of I<events>,
which share the C<new>/C<create> syntax, and are additive when
the regular properties are substitutive (read more in L<Prima::Object>).
Events are called in the object context when a specific condition occurs.
The C<onClick> event here, for example, is called when the
user presses (or otherwise activates) the button.
=head1 API
This section describes miscellaneous methods, registered in C<Prima::>
namespace.
=over
=item message TEXT
Displays a system message box with TEXT.
=item run
Enters the program event loop. The loop is ended when C<Prima::Application>'s C<destroy>
or C<close> method is called.
=item parse_argv @ARGS
Parses prima options from @ARGS, returns unparsed arguments.
=back
=head1 OPTIONS
Prima applications do not have a portable set of arguments; it depends on the
particular platform. Run
perl -e '$ARGV[0]=q(--help); require Prima'
or any Prima program with C<--help> argument to get the list of supported
arguments. Programmaticaly, setting and obtaining these options can be done
by using C<Prima::options> routine.
In cases where Prima argument parsing conflicts with application options, use
L<Prima::noARGV> to disable automatic parsing; also see L<parse_argv>.
Alternatively, the construct
BEGIN { local @ARGV; require Prima; }
will also do.
=head1 SEE ALSO
The toolkit documentation is divided by several
subjects, and the information can
be found in the following files:
=over
=item Tutorials
L<Prima::tutorial> - introductory tutorial
=item Core toolkit classes
L<Prima::Object> - basic object concepts, properties, events
L<Prima::Classes> - binder module for the core classes
L<Prima::Drawable> - 2-D graphic interface
L<Prima::Image> - bitmap routines
L<Prima::image-load> - image subsystem and file operations
L<Prima::Widget> - window management
=over 2
=item *
L<Prima::Widget::pack> - Tk::pack geometry manager
=item *
L<Prima::Widget::place> - Tk::place geometry manager
=back
L<Prima::Window> - top-level window management
L<Prima::Clipboard> - GUI interprocess data exchange
L<Prima::Menu> - pull-down and pop-up menu objects
L<Prima::Timer> - programmable periodical events
L<Prima::Application> - root of widget objects hierarchy
L<Prima::Printer> - system printing services
L<Prima::File> - asynchronous stream I/O
=item Widget library
L<Prima::Buttons> - buttons and button grouping widgets
L<Prima::Calendar> - calendar widget
L<Prima::ComboBox> - combo box widget
L<Prima::DetailedList> - multi-column list viewer with controlling header widget
L<Prima::DetailedOutline> - a multi-column outline viewer with controlling header widget
L<Prima::DockManager> - advanced dockable widgets
L<Prima::Docks> - dockable widgets
L<Prima::Edit> - text editor widget
L<Prima::ExtLists> - listbox with checkboxes
L<Prima::FrameSet> - frameset widget class
L<Prima::Grids> - grid widgets
L<Prima::Header> - a multi-tabbed header widget
L<Prima::HelpViewer> - the built-in POD file browser
L<Prima::Image::TransparencyControl> - standard dialog for transparent color index selection
L<Prima::ImageViewer> - bitmap viewer
L<Prima::InputLine> - input line widget
L<Prima::KeySelector> - key combination widget and routines
L<Prima::Label> - static text widget
L<Prima::Lists> - user-selectable item list widgets
L<Prima::MDI> - top-level windows emulation classes
L<Prima::Notebooks> - multipage widgets
L<Prima::Outlines> - tree view widgets
L<Prima::PodView> - POD browser widget
L<Prima::ScrollBar> - scroll bars
L<Prima::ScrollWidget> - scrollable generic document widget
L<Prima::Sliders> - sliding bars, spin buttons and input lines, dial widget etc.
L<Prima::StartupWindow> - a simplistic startup banner window
L<Prima::TextView> - rich text browser widget
L<Prima::Themes> - widget themes manager
=item Standard dialogs
L<Prima::ColorDialog> - color selection facilities
L<Prima::EditDialog> - find and replace dialogs
L<Prima::FileDialog> - file system related widgets and dialogs
L<Prima::FontDialog> - font dialog
L<Prima::ImageDialog> - image file open and save dialogs
L<Prima::MsgBox> - message and input dialog boxes
L<Prima::PrintDialog> - standard printer setup dialog
L<Prima::StdDlg> - wrapper module to the toolkit standard dialogs
=item Visual Builder
L<VB> - Visual Builder for the Prima toolkit
L<Prima::VB::VBLoader> - Visual Builder file loader
L<cfgmaint> - configuration tool for Visual Builder
L<Prima::VB::CfgMaint> - maintains visual builder widget palette configuration
=item PostScript printer interface
L<Prima::PS::Drawable> - PostScript interface to C<Prima::Drawable>
L<Prima::PS::Encodings> - latin-based encodings
L<Prima::PS::Fonts> - PostScript device fonts metrics
L<Prima::PS::Printer> - PostScript interface to C<Prima::Printer>
=item C interface to the toolkit
L<Prima::internals> - Internal architecture
L<Prima::codecs> - Step-by-step image codec creation
L<gencls> - C<gencls>, a class compiler tool.
=item Miscellaneous
L<Prima::faq> - frequently asked questions
L<Prima::Const> - predefined toolkit constants
L<Prima::EventHook> - event filtering
L<Prima::Image::AnimateGIF> - animate gif files
L<Prima::IniFile> - support of Windows-like initialization files
L<Prima::IntUtils> - internal functions
L<Prima::StdBitmap> - shared access to the standard toolkit bitmaps
L<Prima::Stress> - stress test module
L<Prima::Tie> - tie widget properties to scalars or arrays
L<Prima::Utils> - miscellaneous routines
L<Prima::Widgets> - miscellaneous widget classes
L<Prima::gp-problems> - Graphic subsystem portability issues
L<Prima::X11> - usage guide for X11 environment
=item Class information
The Prima manual pages often provide information for more than one Prima class.
To quickly find out the manual page of a desired class, as well as display the
inheritance information, use C<p-class> command. The command can produce output in
text and pod formats; the latter feature is used by the standard Prima documentation
viewer C<podview> ( see File/Run/p-class ).
=back
=head1 COPYRIGHT
Copyright 1997-2003 The Protein Laboratory, University of Copenhagen. All
rights reserved.
Copyright 2004-2012 Dmitry Karasik. All rights reserved.
This program is distributed under the BSD License.
=head1 AUTHORS
Dmitry Karasik E<lt>dmitry@karasik.eu.orgE<gt>,
Anton Berezin E<lt>tobez@tobez.orgE<gt>,
Vadim Belman E<lt>voland@lflat.orgE<gt>,
=cut
| run4flat/Primo | Prima.pm | Perl | bsd-2-clause | 13,143 |
# ABSTRACT: Allows reviewers to select a commit for auditing
package Git::Code::Review::Command::pick;
use strict;
use warnings;
use CLI::Helpers qw(:all);
use Git::Code::Review::Utilities qw(:all);
use Git::Code::Review -command;
use Git::Code::Review::Notify qw(notify_enabled);
# Globals
my $AUDITDIR = gcr_dir();
my %CFG = gcr_config();
my $PROFILE = gcr_profile();
my %LABELS = (
approve => "[Approve] this commit.",
concerns => "Raise a [concern] with this commit.",
resign => "[Resign] from this commit.",
move => "[Move] this commit to another profile.",
skip => "Skip (just exits unlocking the commit.)",
_view => "(View) Commit again.",
_file => "(View) A file mentioned in the commit.",
);
my %ACTIONS = (
approve => \&approve,
concerns => \&concerns,
resign => \&resign,
move => \&move,
skip => \&skip,
);
my @_incomplete;
foreach my $action (keys %LABELS) {
next if exists $ACTIONS{$action};
debug("PICK|$action - Missing Action, but have label.") unless index($action,'_') == 0;
push @_incomplete, $action;
}
delete $ACTIONS{$_} for @_incomplete;
# Resignations
my $resigned_file;
my %_resigned;
sub opt_spec {
return (
['order:s', "How to order the commits picked: random, asc, or desc (Default: random)", {default=>'random'}],
['since|s:s', "Commit start date, none if not specified", {default => "0000-00-00"}],
['until|u:s', "Commit end date, none if not specified", {default => "9999-99-99"}],
);
}
sub description {
my $DESC = <<" EOH";
Reviewers performing the audit use the 'pick' command to lock a commit for review.
The command use Term::ReadLine to prompt the end-user for answers to how to handle
the commit.
You can optionally pass a SHA1 of a commit in the 'review' state that you
haven't authored to review a specific commit, e.g.
git code-review pick <SHA1>
EOH
$DESC =~ s/^[ ]{4}//mg;
return $DESC;
}
sub execute {
my($cmd,$opt,$args) = @_;
die "Not initialized, run git-code-review init!" unless gcr_is_initialized();
notify_enabled();
# Grab the audit repo handle, reset
my $audit = gcr_repo();
gcr_reset();
# Get a listing of available commits;
my @locked = $audit->run('ls-files', File::Spec->catdir('Locked',$CFG{user}));
my $commit;
if( @locked ) {
output({color=>'red'}, "You are currently locking commits, ignoring picklist. Will continue in 1 second.");
sleep 1;
$commit = gcr_commit_info($locked[0]);
if( @locked > 1 ) {
$commit = gcr_commit_info(
prompt("!! You are currently locking the following commits, select one to action: ", menu => \@locked)
);
}
}
elsif(ref $args eq 'ARRAY' && @$args) {
($commit) = map { $_=gcr_commit_info($_) } $audit->run('ls-files', "*$args->[0]*.patch");
die "no valid commits found matching $args->[0]" unless defined $commit;
die "Commit not in review state, it is in '$commit->{state}'" unless $commit->{state} eq 'review';
if( $commit->{author} eq $CFG{user} ) {
output({stderr=>1,color=>'red'}, "Nice try! You can't review your own commits.");
exit 1;
}
}
else {
# Generate an ordered picklist w/o my commits and w/o my resignations
my @picklist = sort { $a->{date} cmp $b->{date} }
grep { $_->{date} ge $opt->{since} && $_->{date} le $opt->{until} }
map { $_=gcr_commit_info($_) }
grep { /^$PROFILE/ && gcr_not_resigned($_) && gcr_not_authored($_) }
$audit->run('ls-files', '*Review*');
if(!@picklist) {
output({color=>'green'},"All reviews completed on profile: $PROFILE!");
exit 0;
}
else {
output({color=>"cyan"}, sprintf("+ Picklist currently contains %d commits.",scalar(@picklist)));
}
my %idx = (
asc => 0,
desc => -1,
random => int(rand(@picklist)),
);
$commit = exists $idx{lc $opt->{order}} ? $picklist[$idx{lc $opt->{order}}] : $picklist[$idx{random}];
}
# Move to the locked state
gcr_change_state($commit,'locked', { skip => 'true', message => 'Locked.' });
# Only show "move" unless we have > 1 profile
my @profiles = gcr_profiles();
delete $LABELS{move} unless @profiles > 1;
# Show the Commit
my $action ='_view';
do{
# View Files
if($action eq '_view') {
gcr_view_commit($commit);
}
elsif($action eq '_file') {
gcr_view_commit_files($commit);
}
# Choose next action.
$action = prompt("Action?", menu => \%LABELS);
} until $action !~ /^_/;
output({color=>'cyan'}, "We are going to $action $commit->{base}");
$ACTIONS{$action}->($commit);
}
sub resign {
my ($commit) = @_;
my $reason = prompt("Why are you resigning for this commit? ", menu => [
q{No experience with systems covered.},
q{I am the author.},
q{other},
]);
if( $reason eq 'other' ) {
$reason = prompt("Explain: ", validate => { "Please type at least 5 characters." => sub { length $_ > 5; } });
}
# Make sure git status is clean
my $audit = gcr_repo();
gcr_reset();
# Create resignation directory
gcr_mkdir('Resigned');
$resigned_file ||= File::Spec->catfile($AUDITDIR,'Resigned',$CFG{user});
open(my $fh, '>>', $resigned_file) or die "unable to open $resigned_file for appending: $!";
print $fh "$commit->{base}\n";
close $fh;
$_resigned{$commit->{base}} = 1;
debug($audit->run('add',File::Spec->catfile('Resigned',$CFG{user})));
gcr_change_state($commit,'review','Unlocked due to resignation.');
}
sub skip {
my ($commit) = @_;
verbose("+ Skipping $commit->{base}");
gcr_change_state($commit,'review','Unlocked due to skip.');
}
sub approve {
my ($commit) = @_;
my %reasons = (
cosmetic => "Cosmetic change only, no functional difference.",
correct => "Calculations are all accurate.",
outofbounds => "Changes are not in the bounds for the audit.",
other => 'Other (requires explanation)',
);
my $reason = prompt("Why are you approving this commit?", menu => \%reasons);
my $details = $reasons{$reason};
if ($reason eq 'other') {
$details = prompt("Explain: ", validate => { "Really, not even 10 characters? " => sub { length $_ > 10; } });
}
verbose("+ Approving $commit->{sha1} for $reason");
gcr_change_state($commit, approved => { reason => $reason, message => $details } );
}
sub concerns {
my ($commit) = @_;
my %reasons = (
incorrect => "Calculations are incorrect.",
unclear => "Code is not clear, requires more information from the author.",
other => 'Other',
);
my $reason = prompt("Why are you raising a concern with this commit?",menu => \%reasons);
my $details = prompt("Explain: ", validate => { "Really, not even 10 characters? " => sub { length $_ > 10; } });
verbose("+ Raising concern with $commit->{base} for $reason");
gcr_change_state($commit, concerns => { reason => "$reason", message => join "\n",$reasons{$reason},$details });
# Do notify by email
Git::Code::Review::Notify::notify(concerns => {
priority => 'high',
commit => $commit,
reason => {
short => $reason,
details => $details,
},
});
}
sub move {
my ($commit) = @_;
verbose("+ Moving $commit->{base}");
my $profiles = gcr_profiles();
my $to = prompt("Which profile are you moving this commit to?", menu => $profiles);
my $details = prompt("Why are you moving this to $to: ", validate => { "Really, not even 10 characters? " => sub { length $_ > 10; } });
gcr_change_profile($commit,$to,$details);
}
1;
__END__
=pod
=encoding UTF-8
=head1 NAME
Git::Code::Review::Command::pick - Allows reviewers to select a commit for auditing
=head1 VERSION
version 1.6
=head1 AUTHOR
Brad Lhotsky <brad@divisionbyzero.net>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2014 by Brad Lhotsky.
This is free software, licensed under:
The (three-clause) BSD License
=cut
| gitpan/Git-Code-Review | lib/Git/Code/Review/Command/pick.pm | Perl | bsd-3-clause | 8,452 |
/* Part of SWI-Prolog
Author: Jan Wielemaker
E-mail: J.Wielemaker@vu.nl
WWW: http://www.swi-prolog.org
Copyright (c) 2010-2021, VU University Amsterdam
CWI, Amsterdam
SWI-Prolog Solutions b.v.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(ansi_term,
[ ansi_format/3, % +Attr, +Format, +Args
ansi_get_color/2, % +Which, -rgb(R,G,B)
ansi_hyperlink/2, % +Stream,+Location
ansi_hyperlink/3 % +Stream,+URL,+Label
]).
:- autoload(library(error),[domain_error/2,must_be/2]).
:- autoload(library(lists),[append/3]).
:- if(exists_source(library(time))).
:- autoload(library(time),[call_with_time_limit/2]).
:- endif.
/** <module> Print decorated text to ANSI consoles
This library allows for exploiting the color and attribute facilities of
most modern terminals using ANSI escape sequences. This library provides
the following:
- ansi_format/3 allows writing messages to the terminal with ansi
attributes.
- It defines the hook prolog:message_line_element/2, which provides
ansi attributes and hyperlinks for print_message/2.
The behavior of this library is controlled by two Prolog flags:
- `color_term`
When `true`, activate the color output for this library. Otherwise
simply call format/3.
- `hyperlink_term`
Emit terminal hyperlinks for url(Location) and url(URL, Label)
elements of Prolog messages.
@see http://en.wikipedia.org/wiki/ANSI_escape_code
*/
:- multifile
prolog:console_color/2, % +Term, -AnsiAttrs
supports_get_color/0,
hyperlink/2. % +Stream, +Spec
color_term_flag_default(true) :-
stream_property(user_input, tty(true)),
stream_property(user_error, tty(true)),
stream_property(user_output, tty(true)),
\+ getenv('TERM', dumb),
!.
color_term_flag_default(false).
init_color_term_flag :-
color_term_flag_default(Default),
create_prolog_flag(color_term, Default,
[ type(boolean),
keep(true)
]),
create_prolog_flag(hyperlink_term, false,
[ type(boolean),
keep(true)
]).
:- init_color_term_flag.
:- meta_predicate
keep_line_pos(+, 0).
:- multifile
user:message_property/2.
%! ansi_format(+ClassOrAttributes, +Format, +Args) is det.
%
% Format text with ANSI attributes. This predicate behaves as
% format/2 using Format and Args, but if the =current_output= is a
% terminal, it adds ANSI escape sequences according to Attributes.
% For example, to print a text in bold cyan, do
%
% ==
% ?- ansi_format([bold,fg(cyan)], 'Hello ~w', [world]).
% ==
%
% Attributes is either a single attribute, a list thereof or a term
% that is mapped to concrete attributes based on the current theme
% (see prolog:console_color/2). The attribute names are derived from
% the ANSI specification. See the source for sgr_code/2 for details.
% Some commonly used attributes are:
%
% - bold
% - underline
% - fg(Color), bg(Color), hfg(Color), hbg(Color)
% For fg(Color) and bg(Color), the colour name can be '#RGB' or
% '#RRGGBB'
% - fg8(Spec), bg8(Spec)
% 8-bit color specification. Spec is a colour name, h(Color)
% or an integer 0..255.
% - fg(R,G,B), bg(R,G,B)
% 24-bit (direct color) specification. The components are
% integers in the range 0..255.
%
% Defined color constants are below. =default= can be used to
% access the default color of the terminal.
%
% - black, red, green, yellow, blue, magenta, cyan, white
%
% ANSI sequences are sent if and only if
%
% - The =current_output= has the property tty(true) (see
% stream_property/2).
% - The Prolog flag =color_term= is =true=.
ansi_format(Attr, Format, Args) :-
ansi_format(current_output, Attr, Format, Args).
ansi_format(Stream, Class, Format, Args) :-
stream_property(Stream, tty(true)),
current_prolog_flag(color_term, true),
!,
class_attrs(Class, Attr),
phrase(sgr_codes_ex(Attr), Codes),
atomic_list_concat(Codes, ;, Code),
with_output_to(
Stream,
( keep_line_pos(current_output, format('\e[~wm', [Code])),
format(Format, Args),
keep_line_pos(current_output, format('\e[0m'))
)
),
flush_output.
ansi_format(Stream, _Attr, Format, Args) :-
format(Stream, Format, Args).
sgr_codes_ex(X) -->
{ var(X),
!,
instantiation_error(X)
}.
sgr_codes_ex([]) -->
!.
sgr_codes_ex([H|T]) -->
!,
sgr_codes_ex(H),
sgr_codes_ex(T).
sgr_codes_ex(Attr) -->
( { sgr_code(Attr, Code) }
-> ( { is_list(Code) }
-> list(Code)
; [Code]
)
; { domain_error(sgr_code, Attr) }
).
list([]) --> [].
list([H|T]) --> [H], list(T).
%! sgr_code(+Name, -Code)
%
% True when code is the Select Graphic Rendition code for Name.
% The defined names are given below. Note that most terminals only
% implement this partially.
%
% | reset | all attributes off |
% | bold | |
% | faint | |
% | italic | |
% | underline | |
% | blink(slow) | |
% | blink(rapid) | |
% | negative | |
% | conceal | |
% | crossed_out | |
% | font(primary) | |
% | font(N) | Alternate font (1..8) |
% | fraktur | |
% | underline(double) | |
% | intensity(normal) | |
% | fg(Name) | Color name |
% | bg(Name) | Color name |
% | framed | |
% | encircled | |
% | overlined | |
% | ideogram(underline) | |
% | right_side_line | |
% | ideogram(underline(double)) | |
% | right_side_line(double) | |
% | ideogram(overlined) | |
% | left_side_line | |
% | ideogram(stress_marking) | |
% | -Off | Switch attributes off |
% | hfg(Name) | Color name |
% | hbg(Name) | Color name |
%
% @see http://en.wikipedia.org/wiki/ANSI_escape_code
sgr_code(reset, 0).
sgr_code(bold, 1).
sgr_code(faint, 2).
sgr_code(italic, 3).
sgr_code(underline, 4).
sgr_code(blink(slow), 5).
sgr_code(blink(rapid), 6).
sgr_code(negative, 7).
sgr_code(conceal, 8).
sgr_code(crossed_out, 9).
sgr_code(font(primary), 10) :- !.
sgr_code(font(N), C) :-
C is 10+N.
sgr_code(fraktur, 20).
sgr_code(underline(double), 21).
sgr_code(intensity(normal), 22).
sgr_code(fg(Name), C) :-
( ansi_color(Name, N)
-> C is N+30
; rgb(Name, R, G, B)
-> sgr_code(fg(R,G,B), C)
).
sgr_code(bg(Name), C) :-
!,
( ansi_color(Name, N)
-> C is N+40
; rgb(Name, R, G, B)
-> sgr_code(bg(R,G,B), C)
).
sgr_code(framed, 51).
sgr_code(encircled, 52).
sgr_code(overlined, 53).
sgr_code(ideogram(underline), 60).
sgr_code(right_side_line, 60).
sgr_code(ideogram(underline(double)), 61).
sgr_code(right_side_line(double), 61).
sgr_code(ideogram(overlined), 62).
sgr_code(left_side_line, 62).
sgr_code(ideogram(stress_marking), 64).
sgr_code(-X, Code) :-
off_code(X, Code).
sgr_code(hfg(Name), C) :-
ansi_color(Name, N),
C is N+90.
sgr_code(hbg(Name), C) :-
!,
ansi_color(Name, N),
C is N+100.
sgr_code(fg8(Name), [38,5,N]) :-
ansi_color8(Name, N).
sgr_code(bg8(Name), [48,5,N]) :-
ansi_color8(Name, N).
sgr_code(fg(R,G,B), [38,2,R,G,B]) :-
between(0, 255, R),
between(0, 255, G),
between(0, 255, B).
sgr_code(bg(R,G,B), [48,2,R,G,B]) :-
between(0, 255, R),
between(0, 255, G),
between(0, 255, B).
off_code(italic_and_franktur, 23).
off_code(underline, 24).
off_code(blink, 25).
off_code(negative, 27).
off_code(conceal, 28).
off_code(crossed_out, 29).
off_code(framed, 54).
off_code(overlined, 55).
ansi_color8(h(Name), N) :-
!,
ansi_color(Name, N0),
N is N0+8.
ansi_color8(Name, N) :-
atom(Name),
!,
ansi_color(Name, N).
ansi_color8(N, N) :-
between(0, 255, N).
ansi_color(black, 0).
ansi_color(red, 1).
ansi_color(green, 2).
ansi_color(yellow, 3).
ansi_color(blue, 4).
ansi_color(magenta, 5).
ansi_color(cyan, 6).
ansi_color(white, 7).
ansi_color(default, 9).
rgb(Name, R, G, B) :-
atom_codes(Name, [0'#,R1,R2,G1,G2,B1,B2]),
hex_color(R1,R2,R),
hex_color(G1,G2,G),
hex_color(B1,B2,B).
rgb(Name, R, G, B) :-
atom_codes(Name, [0'#,R1,G1,B1]),
hex_color(R1,R),
hex_color(G1,G),
hex_color(B1,B).
hex_color(D1,D2,V) :-
code_type(D1, xdigit(V1)),
code_type(D2, xdigit(V2)),
V is 16*V1+V2.
hex_color(D1,V) :-
code_type(D1, xdigit(V1)),
V is 16*V1+V1.
%! prolog:console_color(+Term, -AnsiAttributes) is semidet.
%
% Hook that allows for mapping abstract terms to concrete ANSI
% attributes. This hook is used by _theme_ files to adjust the
% rendering based on user preferences and context. Defaults are
% defined in the file `boot/messages.pl`.
%
% @see library(theme/dark) for an example implementation and the Term
% values used by the system messages.
/*******************************
* HOOK *
*******************************/
%! prolog:message_line_element(+Stream, +Term) is semidet.
%
% Hook implementation that deals with ansi(+Attr, +Fmt, +Args) in
% message specifications.
prolog:message_line_element(S, ansi(Class, Fmt, Args)) :-
class_attrs(Class, Attr),
ansi_format(S, Attr, Fmt, Args).
prolog:message_line_element(S, ansi(Class, Fmt, Args, Ctx)) :-
class_attrs(Class, Attr),
ansi_format(S, Attr, Fmt, Args),
( nonvar(Ctx),
Ctx = ansi(_, RI-RA)
-> keep_line_pos(S, format(S, RI, RA))
; true
).
prolog:message_line_element(S, url(Location)) :-
ansi_hyperlink(S, Location).
prolog:message_line_element(S, url(URL, Label)) :-
ansi_hyperlink(S, URL, Label).
prolog:message_line_element(S, begin(Level, Ctx)) :-
level_attrs(Level, Attr),
stream_property(S, tty(true)),
current_prolog_flag(color_term, true),
!,
( is_list(Attr)
-> sgr_codes(Attr, Codes),
atomic_list_concat(Codes, ;, Code)
; sgr_code(Attr, Code)
),
keep_line_pos(S, format(S, '\e[~wm', [Code])),
Ctx = ansi('\e[0m', '\e[0m\e[~wm'-[Code]).
prolog:message_line_element(S, end(Ctx)) :-
nonvar(Ctx),
Ctx = ansi(Reset, _),
keep_line_pos(S, write(S, Reset)).
sgr_codes([], []).
sgr_codes([H0|T0], [H|T]) :-
sgr_code(H0, H),
sgr_codes(T0, T).
level_attrs(Level, Attrs) :-
user:message_property(Level, color(Attrs)),
!.
level_attrs(Level, Attrs) :-
class_attrs(message(Level), Attrs).
class_attrs(Class, Attrs) :-
user:message_property(Class, color(Attrs)),
!.
class_attrs(Class, Attrs) :-
prolog:console_color(Class, Attrs),
!.
class_attrs(Class, Attrs) :-
'$messages':default_theme(Class, Attrs),
!.
class_attrs(Attrs, Attrs).
%! ansi_hyperlink(+Stream, +Location) is det.
%! ansi_hyperlink(+Stream, +URL, +Label) is det.
%
% Create a hyperlink for a terminal emulator. The file is fairly easy,
% but getting the line and column across is not as there seems to be
% no established standard. The current implementation emits, i.e.,
% inserting a capital ``L`` before the line.
%
% ``file://AbsFileName[#LLine[:Column]]``
%
% @see https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda
ansi_hyperlink(Stream, Location) :-
hyperlink(Stream, url(Location)),
!.
ansi_hyperlink(Stream, File:Line:Column) :-
!,
( url_file_name(URI, File)
-> format(Stream, '\e]8;;~w#~d:~d\e\\~w:~d:~d\e]8;;\e\\',
[ URI, Line, Column, File, Line, Column ])
; format(Stream, '~w:~w:~w', [File, Line, Column])
).
ansi_hyperlink(Stream, File:Line) :-
!,
( url_file_name(URI, File)
-> format(Stream, '\e]8;;~w#~w\e\\~w:~d\e]8;;\e\\',
[ URI, Line, File, Line ])
; format(Stream, '~w:~w', [File, Line])
).
ansi_hyperlink(Stream, File) :-
( url_file_name(URI, File)
-> format(Stream, '\e]8;;~w\e\\~w\e]8;;\e\\',
[ URI, File ])
; format(Stream, '~w', [File])
).
ansi_hyperlink(Stream, URL, Label) :-
hyperlink(Stream, url(URL, Label)),
!.
ansi_hyperlink(Stream, URL, Label) :-
( current_prolog_flag(hyperlink_term, true)
-> format(Stream, '\e]8;;~w\e\\~w\e]8;;\e\\',
[ URL, Label ])
; format(Stream, '~w', [Label])
).
%! hyperlink(+Stream, +Spec) is semidet.
%
% Multifile hook that may be used to redefine ansi_hyperlink/2,3. If
% this predicate succeeds the system assumes the link has been written
% to Stream.
%
% @arg Spec is either url(Location) or url(URL, Label). See
% ansi_hyperlink/2,3 for details.
:- dynamic has_lib_uri/1 as volatile.
url_file_name(URL, File) :-
current_prolog_flag(hyperlink_term, true),
( has_lib_uri(true)
-> uri_file_name(URL, File)
; exists_source(library(uri))
-> use_module(library(uri), [uri_file_name/2]),
uri_file_name(URL, File),
asserta(has_lib_uri(true))
; asserta(has_lib_uri(false)),
fail
).
%! keep_line_pos(+Stream, :Goal)
%
% Run goal without changing the position information on Stream. This
% is used to avoid that the exchange of ANSI sequences modifies the
% notion of, notably, the `line_pos` notion.
keep_line_pos(S, G) :-
stream_property(S, position(Pos)),
!,
setup_call_cleanup(
stream_position_data(line_position, Pos, LPos),
G,
set_stream(S, line_position(LPos))).
keep_line_pos(_, G) :-
call(G).
%! ansi_get_color(+Which, -RGB) is semidet.
%
% Obtain the RGB color for an ANSI color parameter. Which is either a
% color alias or an integer ANSI color id. Defined aliases are
% `foreground` and `background`. This predicate sends a request to the
% console (`user_output`) and reads the reply. This assumes an `xterm`
% compatible terminal.
%
% @arg RGB is a term rgb(Red,Green,Blue). The color components are
% integers in the range 0..65535.
:- if(current_predicate(call_with_time_limit/2)).
ansi_get_color(Which0, RGB) :-
stream_property(user_input, tty(true)),
stream_property(user_output, tty(true)),
stream_property(user_error, tty(true)),
supports_get_color,
( color_alias(Which0, Which)
-> true
; must_be(between(0,15),Which0)
-> Which = Which0
),
catch(keep_line_pos(user_output,
ansi_get_color_(Which, RGB)),
time_limit_exceeded,
no_xterm).
supports_get_color :-
getenv('TERM', Term),
sub_atom(Term, 0, _, _, xterm),
\+ getenv('TERM_PROGRAM', 'Apple_Terminal').
color_alias(foreground, 10).
color_alias(background, 11).
ansi_get_color_(Which, rgb(R,G,B)) :-
format(codes(Id), '~w', [Which]),
hex4(RH),
hex4(GH),
hex4(BH),
phrase(("\e]", Id, ";rgb:", RH, "/", GH, "/", BH, "\a"), Pattern),
call_with_time_limit(0.05,
with_tty_raw(exchange_pattern(Which, Pattern))),
!,
hex_val(RH, R),
hex_val(GH, G),
hex_val(BH, B).
no_xterm :-
print_message(warning, ansi(no_xterm_get_colour)),
fail.
hex4([_,_,_,_]).
hex_val([D1,D2,D3,D4], V) :-
code_type(D1, xdigit(V1)),
code_type(D2, xdigit(V2)),
code_type(D3, xdigit(V3)),
code_type(D4, xdigit(V4)),
V is (V1<<12)+(V2<<8)+(V3<<4)+V4.
exchange_pattern(Which, Pattern) :-
format(user_output, '\e]~w;?\a', [Which]),
flush_output(user_output),
read_pattern(user_input, Pattern, []).
read_pattern(From, Pattern, NotMatched0) :-
copy_term(Pattern, TryPattern),
append(Skip, Rest, NotMatched0),
append(Rest, RestPattern, TryPattern),
!,
echo(Skip),
try_read_pattern(From, RestPattern, NotMatched, Done),
( Done == true
-> Pattern = TryPattern
; read_pattern(From, Pattern, NotMatched)
).
%! try_read_pattern(+From, +Pattern, -NotMatched)
try_read_pattern(_, [], [], true) :-
!.
try_read_pattern(From, [H|T], [C|RT], Done) :-
get_code(C),
( C = H
-> try_read_pattern(From, T, RT, Done)
; RT = [],
Done = false
).
echo([]).
echo([H|T]) :-
put_code(user_output, H),
echo(T).
:- else.
ansi_get_color(_Which0, _RGB) :-
fail.
:- endif.
:- multifile prolog:message//1.
prolog:message(ansi(no_xterm_get_colour)) -->
[ 'Terminal claims to be xterm compatible,'-[], nl,
'but does not report colour info'-[]
].
| josd/eye | eye-wasm/swipl-wasm/home/library/ansi_term.pl | Perl | mit | 18,693 |
#!/usr/bin/perl
# Copyright 2010-2011 Microsoft Corporation
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
# WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
# MERCHANTABLITY OR NON-INFRINGEMENT.
# See the Apache 2 License for the specific language governing permissions and
# limitations under the License.
# converts an utt2spk file to a spk2utt file.
# Takes input from the stdin or from a file argument;
# output goes to the standard out.
if ( @ARGV > 1 ) {
die "Usage: utt2spk_to_spk2utt.pl [ utt2spk ] > spk2utt";
}
while(<>){
@A = split(" ", $_);
@A == 2 || die "Invalid line in utt2spk file: $_";
($u,$s) = @A;
if(!$seen_spk{$s}) {
$seen_spk{$s} = 1;
push @spklist, $s;
}
$uttlist{$s} = $uttlist{$s} . "$u ";
}
foreach $s (@spklist) {
$l = $uttlist{$s};
$l =~ s: $::; # remove trailing space.
print "$s $l\n";
}
| chagge/Kaldi-timit | s5/utils/utt2spk_to_spk2utt.pl | Perl | apache-2.0 | 1,234 |
package #
Date::Manip::Offset::off487;
# Copyright (c) 2008-2013 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Mon Jun 3 12:55:41 EDT 2013
# Data version: tzdata2013c
# Code version: tzcode2013c
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.orgtz
use strict;
use warnings;
require 5.010000;
our ($VERSION);
$VERSION='6.40';
END { undef $VERSION; }
our ($Offset,%Offset);
END {
undef $Offset;
undef %Offset;
}
$Offset = '-14:17:00';
%Offset = (
0 => [
'pacific/saipan',
],
);
1;
| nriley/Pester | Source/Manip/Offset/off487.pm | Perl | bsd-2-clause | 852 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.2.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
05B8
05C7
END
| Bjay1435/capstone | rootfs/usr/share/perl/5.18.2/unicore/lib/Ccc/CCC18.pl | Perl | mit | 437 |
#!/usr/bin/env perl
use Net::SNMP::Security::USM 4.0;
use warnings;
use strict;
sub myHelp() {
printf "\n _____ _____ _____ _____ _____ \n";
printf "| __| | | | _ |_ _ ___| | |\n";
printf "|__ | | | | | | | __| | | | -|\n";
printf "|_____|_|___|_|_|_|__| |___|_|_|__|__| Key List Generator v1.0\n\n";
printf "snmpunk-klg.pl <authProto> <wordList> <msgAuthoritativeEngineID> [privProto]\n";
printf " - authProto : md5|sha\n";
printf " - wordList : wordlist file\n";
printf " - msgAuthoritativeEngineID : field in SNMPv3 packet\n";
printf " - privProto : des|des3|aes\n\n";
exit 1;
}
if(@ARGV<3 || @ARGV>4) {
myHelp();
}
open (MYFILE, $ARGV[1]);
while (<MYFILE>) {
$_ =~ s/\r|\n//g;
my ($usm,$err) = Net::SNMP::Security::USM->new(
-authoritative => 1,
-username => '0x0ff',
-authprotocol => $ARGV[0],
-authpassword => $_,
-engineid => $ARGV[2],
-privprotocol => (@ARGV > 3) ? $ARGV[3] : 'des',
-privpassword => $_);
if (!defined $usm) {
printf "$err\n";
exit(1);
}
# Without privProto
if(@ARGV<4) {
printf "%s:%s:00\n", $_, unpack('H*', $usm->auth_key());
}
# With privProto
else {
printf "%s:%s:%s\n", $_, unpack('H*', $usm->auth_key()), unpack('H*', $usm->priv_key());
}
}
| AnyMaster/Ctoollhu | snmpunk/snmpunk-klg.pl | Perl | mit | 1,379 |
#! /usr/bin/perl -w
#
# Copyright 2005-2016 The Mumble Developers. All rights reserved.
# Use of this source code is governed by a BSD-style license
# that can be found in the LICENSE file at the root of the
# Mumble source tree or at <https://www.mumble.info/LICENSE>.
use strict;
use warnings;
use Carp;
use POSIX;
my $ver;
system("rm mumble-*");
chdir("scripts");
system("bash mkini.sh");
chdir("..");
if ($#ARGV < 0) {
open(F, "git describe origin/master|") or croak "Failed to get version string";
while (<F>) {
chomp();
s/^(.+)-([0-9]+)-(g.+)$/$1|$2|$3/;
s/-/~/;
s/\|/-/g;
$ver = $_;
}
close(F);
print "REVISION $ver\n";
} elsif ($#ARGV == 0) {
$ver = $ARGV[0];
}
print "Adjusting Version.cpp\n";
open(F, "<src/Version.h") or croak "Could not open src/Version.h for reading";
my @lines = <F>;
close(F);
my $content = join('', @lines);
$content =~ s/(\#ifndef MUMBLE_VERSION)/$1\n\#define MUMBLE_VERSION $ver\n\#endif\n$1/;
open(F, ">src/Version.h") or croak "Could not open src/Version.h for writing";
print F $content;
close(F);
print "Compressing tree\n";
my $ballname = "mumble-${ver}";
my $exclusions = join(" --exclude=", ("",
"*/.git*",
# Exclude the archive we are currently writing to
"${ballname}.*",
# Exclude files with Debian FSG licensing issues (#1230)
"${ballname}/3rdparty/speex-src/doc/draft-herlein-avt-rtp-speex-00.txt",
"${ballname}/3rdparty/speex-src/doc/draft-herlein-speex-rtp-profile-02.txt",
"${ballname}/3rdparty/speex-src/doc/draft-herlein-speex-rtp-profile-03.txt",
"${ballname}/3rdparty/speex-src/doc/draft-ietf-avt-rtp-speex-00.txt",
"${ballname}/3rdparty/speex-src/doc/draft-ietf-avt-rtp-speex-01-tmp.txt",
"${ballname}/3rdparty/speex-src/doc/draft-ietf-avt-rtp-speex-05-tmp.txt",
"${ballname}/3rdparty/speex-src/doc/manual.lyx",
"${ballname}/3rdparty/celt-0.11.0-src/doc/ietf/draft-valin-celt-rtp-profile-01.txt",
"${ballname}/3rdparty/celt-0.7.0-src/doc/ietf/draft-valin-celt-rtp-profile-01.txt"
)
);
system("mkdir ${ballname}") == 0 or croak "Could not create target directory ${ballname}";
system("mv * ${ballname}/");
eval {
system("tar ${exclusions} -zchvf ${ballname}.tar.gz ${ballname}") == 0 or croak "Failed to create tar.gz";
system("zip -9 -r ${exclusions} ${ballname}.zip ${ballname}") == 0 or croak "Failed to create zip";
};
system("mv ${ballname}/* .");
system("rmdir ${ballname}");
print "Done\n";
| SuperNascher/mumble | scripts/release.pl | Perl | bsd-3-clause | 2,467 |
#!/usr/bin/env perl
use strict;
use warnings;
use FindBin;
use File::Basename;
use Cwd;
use Carp;
use Getopt::Long qw(:config no_ignore_case bundling pass_through);
my $usage = <<__EOUSAGE__;
######################################################################
#
# Required:
# --genome <string> target genome to align to
# --transcripts <string> cdna sequences to align
#
# Optional:
# -N <int> number of top hits (default: 1)
# -I <int> max intron length
# --CPU <int> number of threads (default: 2)
# --no_chimera do not report chimeric alignmetnts
# --SAM output in SAM format
#
#######################################################################
__EOUSAGE__
;
my ($genome, $transcriptDB, $max_intron);
my $CPU = 2;
my $help_flag;
my $number_top_hits = 1;
my $no_chimera_flag = 0;
my $SAM_flag = 0;
&GetOptions( 'h' => \$help_flag,
'genome=s' => \$genome,
'transcripts=s' => \$transcriptDB,
'I=i' => \$max_intron,
'CPU=i' => \$CPU,
'N=i' => \$number_top_hits,
'no_chimera' => \$no_chimera_flag,
'SAM' => \$SAM_flag,
);
unless ($genome && $transcriptDB) {
die $usage;
}
main: {
my $genomeName = basename($genome);
my $genomeDir = $genomeName . ".gmap";
my $genomeBaseDir = dirname($genome);
my $cwd = cwd();
unless (-d "$genomeBaseDir/$genomeDir") {
#my $cmd = "gmap_build -D $genomeBaseDir -d $genomeBaseDir/$genomeDir -k 13 $genome >&2";
my $cmd = "gmap_build -D $genomeBaseDir -T $genomeBaseDir -d $genomeDir -k 13 $genome >&2";
&process_cmd($cmd);
}
## run GMAP
my $num_gmap_top_hits = $number_top_hits;
if ((! $no_chimera_flag) && $num_gmap_top_hits == 1) {
$num_gmap_top_hits = 0; # reports two hits if chimera with this setting.
}
my $format = ($SAM_flag) ? "samse" : "3";
my $cmd = "gmap -D $genomeBaseDir -d $genomeDir $transcriptDB -f $format -n $num_gmap_top_hits -x 50 -t $CPU -B 5 ";
if ($max_intron) {
$cmd .= " --intronlength=$max_intron ";
}
&process_cmd($cmd);
exit(0);
}
####
sub process_cmd {
my ($cmd) = @_;
print STDERR "CMD: $cmd\n";
#return;
my $ret = system($cmd);
if ($ret) {
die "Error, cmd: $cmd died with ret ($ret)";
}
return;
}
| HPCHub/trinityrnaseq | util/misc/process_GMAP_alignments_gff3_chimeras_ok.pl | Perl | bsd-3-clause | 2,420 |
#############################################################################
# Pod/InputObjects.pm -- package which defines objects for input streams
# and paragraphs and commands when parsing POD docs.
#
# Copyright (C) 1996-2000 by Bradford Appleton. All rights reserved.
# This file is part of "PodParser". PodParser is free software;
# you can redistribute it and/or modify it under the same terms
# as Perl itself.
#############################################################################
package Pod::InputObjects;
use strict;
use vars qw($VERSION);
$VERSION = '1.51'; ## Current version of this package
require 5.005; ## requires this Perl version or later
#############################################################################
=head1 NAME
Pod::InputObjects - objects representing POD input paragraphs, commands, etc.
=head1 SYNOPSIS
use Pod::InputObjects;
=head1 REQUIRES
perl5.004, Carp
=head1 EXPORTS
Nothing.
=head1 DESCRIPTION
This module defines some basic input objects used by B<Pod::Parser> when
reading and parsing POD text from an input source. The following objects
are defined:
=begin __PRIVATE__
=over 4
=item package B<Pod::InputSource>
An object corresponding to a source of POD input text. It is mostly a
wrapper around a filehandle or C<IO::Handle>-type object (or anything
that implements the C<getline()> method) which keeps track of some
additional information relevant to the parsing of PODs.
=back
=end __PRIVATE__
=over 4
=item package B<Pod::Paragraph>
An object corresponding to a paragraph of POD input text. It may be a
plain paragraph, a verbatim paragraph, or a command paragraph (see
L<perlpod>).
=item package B<Pod::InteriorSequence>
An object corresponding to an interior sequence command from the POD
input text (see L<perlpod>).
=item package B<Pod::ParseTree>
An object corresponding to a tree of parsed POD text. Each "node" in
a parse-tree (or I<ptree>) is either a text-string or a reference to
a B<Pod::InteriorSequence> object. The nodes appear in the parse-tree
in the order in which they were parsed from left-to-right.
=back
Each of these input objects are described in further detail in the
sections which follow.
=cut
#############################################################################
package Pod::InputSource;
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head1 B<Pod::InputSource>
This object corresponds to an input source or stream of POD
documentation. When parsing PODs, it is necessary to associate and store
certain context information with each input source. All of this
information is kept together with the stream itself in one of these
C<Pod::InputSource> objects. Each such object is merely a wrapper around
an C<IO::Handle> object of some kind (or at least something that
implements the C<getline()> method). They have the following
methods/attributes:
=end __PRIVATE__
=cut
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head2 B<new()>
my $pod_input1 = Pod::InputSource->new(-handle => $filehandle);
my $pod_input2 = new Pod::InputSource(-handle => $filehandle,
-name => $name);
my $pod_input3 = new Pod::InputSource(-handle => \*STDIN);
my $pod_input4 = Pod::InputSource->new(-handle => \*STDIN,
-name => "(STDIN)");
This is a class method that constructs a C<Pod::InputSource> object and
returns a reference to the new input source object. It takes one or more
keyword arguments in the form of a hash. The keyword C<-handle> is
required and designates the corresponding input handle. The keyword
C<-name> is optional and specifies the name associated with the input
handle (typically a file name).
=end __PRIVATE__
=cut
sub new {
## Determine if we were called via an object-ref or a classname
my $this = shift;
my $class = ref($this) || $this;
## Any remaining arguments are treated as initial values for the
## hash that is used to represent this object. Note that we default
## certain values by specifying them *before* the arguments passed.
## If they are in the argument list, they will override the defaults.
my $self = { -name => '(unknown)',
-handle => undef,
-was_cutting => 0,
@_ };
## Bless ourselves into the desired class and perform any initialization
bless $self, $class;
return $self;
}
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head2 B<name()>
my $filename = $pod_input->name();
$pod_input->name($new_filename_to_use);
This method gets/sets the name of the input source (usually a filename).
If no argument is given, it returns a string containing the name of
the input source; otherwise it sets the name of the input source to the
contents of the given argument.
=end __PRIVATE__
=cut
sub name {
(@_ > 1) and $_[0]->{'-name'} = $_[1];
return $_[0]->{'-name'};
}
## allow 'filename' as an alias for 'name'
*filename = \&name;
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head2 B<handle()>
my $handle = $pod_input->handle();
Returns a reference to the handle object from which input is read (the
one used to contructed this input source object).
=end __PRIVATE__
=cut
sub handle {
return $_[0]->{'-handle'};
}
##---------------------------------------------------------------------------
=begin __PRIVATE__
=head2 B<was_cutting()>
print "Yes.\n" if ($pod_input->was_cutting());
The value of the C<cutting> state (that the B<cutting()> method would
have returned) immediately before any input was read from this input
stream. After all input from this stream has been read, the C<cutting>
state is restored to this value.
=end __PRIVATE__
=cut
sub was_cutting {
(@_ > 1) and $_[0]->{-was_cutting} = $_[1];
return $_[0]->{-was_cutting};
}
##---------------------------------------------------------------------------
#############################################################################
package Pod::Paragraph;
##---------------------------------------------------------------------------
=head1 B<Pod::Paragraph>
An object representing a paragraph of POD input text.
It has the following methods/attributes:
=cut
##---------------------------------------------------------------------------
=head2 Pod::Paragraph-E<gt>B<new()>
my $pod_para1 = Pod::Paragraph->new(-text => $text);
my $pod_para2 = Pod::Paragraph->new(-name => $cmd,
-text => $text);
my $pod_para3 = new Pod::Paragraph(-text => $text);
my $pod_para4 = new Pod::Paragraph(-name => $cmd,
-text => $text);
my $pod_para5 = Pod::Paragraph->new(-name => $cmd,
-text => $text,
-file => $filename,
-line => $line_number);
This is a class method that constructs a C<Pod::Paragraph> object and
returns a reference to the new paragraph object. It may be given one or
two keyword arguments. The C<-text> keyword indicates the corresponding
text of the POD paragraph. The C<-name> keyword indicates the name of
the corresponding POD command, such as C<head1> or C<item> (it should
I<not> contain the C<=> prefix); this is needed only if the POD
paragraph corresponds to a command paragraph. The C<-file> and C<-line>
keywords indicate the filename and line number corresponding to the
beginning of the paragraph
=cut
sub new {
## Determine if we were called via an object-ref or a classname
my $this = shift;
my $class = ref($this) || $this;
## Any remaining arguments are treated as initial values for the
## hash that is used to represent this object. Note that we default
## certain values by specifying them *before* the arguments passed.
## If they are in the argument list, they will override the defaults.
my $self = {
-name => undef,
-text => (@_ == 1) ? shift : undef,
-file => '<unknown-file>',
-line => 0,
-prefix => '=',
-separator => ' ',
-ptree => [],
@_
};
## Bless ourselves into the desired class and perform any initialization
bless $self, $class;
return $self;
}
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<cmd_name()>
my $para_cmd = $pod_para->cmd_name();
If this paragraph is a command paragraph, then this method will return
the name of the command (I<without> any leading C<=> prefix).
=cut
sub cmd_name {
(@_ > 1) and $_[0]->{'-name'} = $_[1];
return $_[0]->{'-name'};
}
## let name() be an alias for cmd_name()
*name = \&cmd_name;
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<text()>
my $para_text = $pod_para->text();
This method will return the corresponding text of the paragraph.
=cut
sub text {
(@_ > 1) and $_[0]->{'-text'} = $_[1];
return $_[0]->{'-text'};
}
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<raw_text()>
my $raw_pod_para = $pod_para->raw_text();
This method will return the I<raw> text of the POD paragraph, exactly
as it appeared in the input.
=cut
sub raw_text {
return $_[0]->{'-text'} unless (defined $_[0]->{'-name'});
return $_[0]->{'-prefix'} . $_[0]->{'-name'} .
$_[0]->{'-separator'} . $_[0]->{'-text'};
}
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<cmd_prefix()>
my $prefix = $pod_para->cmd_prefix();
If this paragraph is a command paragraph, then this method will return
the prefix used to denote the command (which should be the string "="
or "==").
=cut
sub cmd_prefix {
return $_[0]->{'-prefix'};
}
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<cmd_separator()>
my $separator = $pod_para->cmd_separator();
If this paragraph is a command paragraph, then this method will return
the text used to separate the command name from the rest of the
paragraph (if any).
=cut
sub cmd_separator {
return $_[0]->{'-separator'};
}
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<parse_tree()>
my $ptree = $pod_parser->parse_text( $pod_para->text() );
$pod_para->parse_tree( $ptree );
$ptree = $pod_para->parse_tree();
This method will get/set the corresponding parse-tree of the paragraph's text.
=cut
sub parse_tree {
(@_ > 1) and $_[0]->{'-ptree'} = $_[1];
return $_[0]->{'-ptree'};
}
## let ptree() be an alias for parse_tree()
*ptree = \&parse_tree;
##---------------------------------------------------------------------------
=head2 $pod_para-E<gt>B<file_line()>
my ($filename, $line_number) = $pod_para->file_line();
my $position = $pod_para->file_line();
Returns the current filename and line number for the paragraph
object. If called in a list context, it returns a list of two
elements: first the filename, then the line number. If called in
a scalar context, it returns a string containing the filename, followed
by a colon (':'), followed by the line number.
=cut
sub file_line {
my @loc = ($_[0]->{'-file'} || '<unknown-file>',
$_[0]->{'-line'} || 0);
return (wantarray) ? @loc : join(':', @loc);
}
##---------------------------------------------------------------------------
#############################################################################
package Pod::InteriorSequence;
##---------------------------------------------------------------------------
=head1 B<Pod::InteriorSequence>
An object representing a POD interior sequence command.
It has the following methods/attributes:
=cut
##---------------------------------------------------------------------------
=head2 Pod::InteriorSequence-E<gt>B<new()>
my $pod_seq1 = Pod::InteriorSequence->new(-name => $cmd
-ldelim => $delimiter);
my $pod_seq2 = new Pod::InteriorSequence(-name => $cmd,
-ldelim => $delimiter);
my $pod_seq3 = new Pod::InteriorSequence(-name => $cmd,
-ldelim => $delimiter,
-file => $filename,
-line => $line_number);
my $pod_seq4 = new Pod::InteriorSequence(-name => $cmd, $ptree);
my $pod_seq5 = new Pod::InteriorSequence($cmd, $ptree);
This is a class method that constructs a C<Pod::InteriorSequence> object
and returns a reference to the new interior sequence object. It should
be given two keyword arguments. The C<-ldelim> keyword indicates the
corresponding left-delimiter of the interior sequence (e.g. 'E<lt>').
The C<-name> keyword indicates the name of the corresponding interior
sequence command, such as C<I> or C<B> or C<C>. The C<-file> and
C<-line> keywords indicate the filename and line number corresponding
to the beginning of the interior sequence. If the C<$ptree> argument is
given, it must be the last argument, and it must be either string, or
else an array-ref suitable for passing to B<Pod::ParseTree::new> (or
it may be a reference to a Pod::ParseTree object).
=cut
sub new {
## Determine if we were called via an object-ref or a classname
my $this = shift;
my $class = ref($this) || $this;
## See if first argument has no keyword
if (((@_ <= 2) or (@_ % 2)) and $_[0] !~ /^-\w/) {
## Yup - need an implicit '-name' before first parameter
unshift @_, '-name';
}
## See if odd number of args
if ((@_ % 2) != 0) {
## Yup - need an implicit '-ptree' before the last parameter
splice @_, $#_, 0, '-ptree';
}
## Any remaining arguments are treated as initial values for the
## hash that is used to represent this object. Note that we default
## certain values by specifying them *before* the arguments passed.
## If they are in the argument list, they will override the defaults.
my $self = {
-name => (@_ == 1) ? $_[0] : undef,
-file => '<unknown-file>',
-line => 0,
-ldelim => '<',
-rdelim => '>',
@_
};
## Initialize contents if they havent been already
my $ptree = $self->{'-ptree'} || new Pod::ParseTree();
if ( ref $ptree =~ /^(ARRAY)?$/ ) {
## We have an array-ref, or a normal scalar. Pass it as an
## an argument to the ptree-constructor
$ptree = new Pod::ParseTree($1 ? [$ptree] : $ptree);
}
$self->{'-ptree'} = $ptree;
## Bless ourselves into the desired class and perform any initialization
bless $self, $class;
return $self;
}
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<cmd_name()>
my $seq_cmd = $pod_seq->cmd_name();
The name of the interior sequence command.
=cut
sub cmd_name {
(@_ > 1) and $_[0]->{'-name'} = $_[1];
return $_[0]->{'-name'};
}
## let name() be an alias for cmd_name()
*name = \&cmd_name;
##---------------------------------------------------------------------------
## Private subroutine to set the parent pointer of all the given
## children that are interior-sequences to be $self
sub _set_child2parent_links {
my ($self, @children) = @_;
## Make sure any sequences know who their parent is
for (@children) {
next unless (length and ref and ref ne 'SCALAR');
if (UNIVERSAL::isa($_, 'Pod::InteriorSequence') or
UNIVERSAL::can($_, 'nested'))
{
$_->nested($self);
}
}
}
## Private subroutine to unset child->parent links
sub _unset_child2parent_links {
my $self = shift;
$self->{'-parent_sequence'} = undef;
my $ptree = $self->{'-ptree'};
for (@$ptree) {
next unless (length and ref and ref ne 'SCALAR');
$_->_unset_child2parent_links()
if UNIVERSAL::isa($_, 'Pod::InteriorSequence');
}
}
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<prepend()>
$pod_seq->prepend($text);
$pod_seq1->prepend($pod_seq2);
Prepends the given string or parse-tree or sequence object to the parse-tree
of this interior sequence.
=cut
sub prepend {
my $self = shift;
$self->{'-ptree'}->prepend(@_);
_set_child2parent_links($self, @_);
return $self;
}
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<append()>
$pod_seq->append($text);
$pod_seq1->append($pod_seq2);
Appends the given string or parse-tree or sequence object to the parse-tree
of this interior sequence.
=cut
sub append {
my $self = shift;
$self->{'-ptree'}->append(@_);
_set_child2parent_links($self, @_);
return $self;
}
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<nested()>
$outer_seq = $pod_seq->nested || print "not nested";
If this interior sequence is nested inside of another interior
sequence, then the outer/parent sequence that contains it is
returned. Otherwise C<undef> is returned.
=cut
sub nested {
my $self = shift;
(@_ == 1) and $self->{'-parent_sequence'} = shift;
return $self->{'-parent_sequence'} || undef;
}
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<raw_text()>
my $seq_raw_text = $pod_seq->raw_text();
This method will return the I<raw> text of the POD interior sequence,
exactly as it appeared in the input.
=cut
sub raw_text {
my $self = shift;
my $text = $self->{'-name'} . $self->{'-ldelim'};
for ( $self->{'-ptree'}->children ) {
$text .= (ref $_) ? $_->raw_text : $_;
}
$text .= $self->{'-rdelim'};
return $text;
}
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<left_delimiter()>
my $ldelim = $pod_seq->left_delimiter();
The leftmost delimiter beginning the argument text to the interior
sequence (should be "<").
=cut
sub left_delimiter {
(@_ > 1) and $_[0]->{'-ldelim'} = $_[1];
return $_[0]->{'-ldelim'};
}
## let ldelim() be an alias for left_delimiter()
*ldelim = \&left_delimiter;
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<right_delimiter()>
The rightmost delimiter beginning the argument text to the interior
sequence (should be ">").
=cut
sub right_delimiter {
(@_ > 1) and $_[0]->{'-rdelim'} = $_[1];
return $_[0]->{'-rdelim'};
}
## let rdelim() be an alias for right_delimiter()
*rdelim = \&right_delimiter;
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<parse_tree()>
my $ptree = $pod_parser->parse_text($paragraph_text);
$pod_seq->parse_tree( $ptree );
$ptree = $pod_seq->parse_tree();
This method will get/set the corresponding parse-tree of the interior
sequence's text.
=cut
sub parse_tree {
(@_ > 1) and $_[0]->{'-ptree'} = $_[1];
return $_[0]->{'-ptree'};
}
## let ptree() be an alias for parse_tree()
*ptree = \&parse_tree;
##---------------------------------------------------------------------------
=head2 $pod_seq-E<gt>B<file_line()>
my ($filename, $line_number) = $pod_seq->file_line();
my $position = $pod_seq->file_line();
Returns the current filename and line number for the interior sequence
object. If called in a list context, it returns a list of two
elements: first the filename, then the line number. If called in
a scalar context, it returns a string containing the filename, followed
by a colon (':'), followed by the line number.
=cut
sub file_line {
my @loc = ($_[0]->{'-file'} || '<unknown-file>',
$_[0]->{'-line'} || 0);
return (wantarray) ? @loc : join(':', @loc);
}
##---------------------------------------------------------------------------
=head2 Pod::InteriorSequence::B<DESTROY()>
This method performs any necessary cleanup for the interior-sequence.
If you override this method then it is B<imperative> that you invoke
the parent method from within your own method, otherwise
I<interior-sequence storage will not be reclaimed upon destruction!>
=cut
sub DESTROY {
## We need to get rid of all child->parent pointers throughout the
## tree so their reference counts will go to zero and they can be
## garbage-collected
_unset_child2parent_links(@_);
}
##---------------------------------------------------------------------------
#############################################################################
package Pod::ParseTree;
##---------------------------------------------------------------------------
=head1 B<Pod::ParseTree>
This object corresponds to a tree of parsed POD text. As POD text is
scanned from left to right, it is parsed into an ordered list of
text-strings and B<Pod::InteriorSequence> objects (in order of
appearance). A B<Pod::ParseTree> object corresponds to this list of
strings and sequences. Each interior sequence in the parse-tree may
itself contain a parse-tree (since interior sequences may be nested).
=cut
##---------------------------------------------------------------------------
=head2 Pod::ParseTree-E<gt>B<new()>
my $ptree1 = Pod::ParseTree->new;
my $ptree2 = new Pod::ParseTree;
my $ptree4 = Pod::ParseTree->new($array_ref);
my $ptree3 = new Pod::ParseTree($array_ref);
This is a class method that constructs a C<Pod::Parse_tree> object and
returns a reference to the new parse-tree. If a single-argument is given,
it must be a reference to an array, and is used to initialize the root
(top) of the parse tree.
=cut
sub new {
## Determine if we were called via an object-ref or a classname
my $this = shift;
my $class = ref($this) || $this;
my $self = (@_ == 1 and ref $_[0]) ? $_[0] : [];
## Bless ourselves into the desired class and perform any initialization
bless $self, $class;
return $self;
}
##---------------------------------------------------------------------------
=head2 $ptree-E<gt>B<top()>
my $top_node = $ptree->top();
$ptree->top( $top_node );
$ptree->top( @children );
This method gets/sets the top node of the parse-tree. If no arguments are
given, it returns the topmost node in the tree (the root), which is also
a B<Pod::ParseTree>. If it is given a single argument that is a reference,
then the reference is assumed to a parse-tree and becomes the new top node.
Otherwise, if arguments are given, they are treated as the new list of
children for the top node.
=cut
sub top {
my $self = shift;
if (@_ > 0) {
@{ $self } = (@_ == 1 and ref $_[0]) ? ${ @_ } : @_;
}
return $self;
}
## let parse_tree() & ptree() be aliases for the 'top' method
*parse_tree = *ptree = \⊤
##---------------------------------------------------------------------------
=head2 $ptree-E<gt>B<children()>
This method gets/sets the children of the top node in the parse-tree.
If no arguments are given, it returns the list (array) of children
(each of which should be either a string or a B<Pod::InteriorSequence>.
Otherwise, if arguments are given, they are treated as the new list of
children for the top node.
=cut
sub children {
my $self = shift;
if (@_ > 0) {
@{ $self } = (@_ == 1 and ref $_[0]) ? ${ @_ } : @_;
}
return @{ $self };
}
##---------------------------------------------------------------------------
=head2 $ptree-E<gt>B<prepend()>
This method prepends the given text or parse-tree to the current parse-tree.
If the first item on the parse-tree is text and the argument is also text,
then the text is prepended to the first item (not added as a separate string).
Otherwise the argument is added as a new string or parse-tree I<before>
the current one.
=cut
use vars qw(@ptree); ## an alias used for performance reasons
sub prepend {
my $self = shift;
local *ptree = $self;
for (@_) {
next unless length;
if (@ptree && !(ref $ptree[0]) && !(ref $_)) {
$ptree[0] = $_ . $ptree[0];
}
else {
unshift @ptree, $_;
}
}
}
##---------------------------------------------------------------------------
=head2 $ptree-E<gt>B<append()>
This method appends the given text or parse-tree to the current parse-tree.
If the last item on the parse-tree is text and the argument is also text,
then the text is appended to the last item (not added as a separate string).
Otherwise the argument is added as a new string or parse-tree I<after>
the current one.
=cut
sub append {
my $self = shift;
local *ptree = $self;
my $can_append = @ptree && !(ref $ptree[-1]);
for (@_) {
if (ref) {
push @ptree, $_;
}
elsif(!length) {
next;
}
elsif ($can_append) {
$ptree[-1] .= $_;
}
else {
push @ptree, $_;
}
}
}
=head2 $ptree-E<gt>B<raw_text()>
my $ptree_raw_text = $ptree->raw_text();
This method will return the I<raw> text of the POD parse-tree
exactly as it appeared in the input.
=cut
sub raw_text {
my $self = shift;
my $text = '';
for ( @$self ) {
$text .= (ref $_) ? $_->raw_text : $_;
}
return $text;
}
##---------------------------------------------------------------------------
## Private routines to set/unset child->parent links
sub _unset_child2parent_links {
my $self = shift;
local *ptree = $self;
for (@ptree) {
next unless (defined and length and ref and ref ne 'SCALAR');
$_->_unset_child2parent_links()
if UNIVERSAL::isa($_, 'Pod::InteriorSequence');
}
}
sub _set_child2parent_links {
## nothing to do, Pod::ParseTrees cant have parent pointers
}
=head2 Pod::ParseTree::B<DESTROY()>
This method performs any necessary cleanup for the parse-tree.
If you override this method then it is B<imperative>
that you invoke the parent method from within your own method,
otherwise I<parse-tree storage will not be reclaimed upon destruction!>
=cut
sub DESTROY {
## We need to get rid of all child->parent pointers throughout the
## tree so their reference counts will go to zero and they can be
## garbage-collected
_unset_child2parent_links(@_);
}
#############################################################################
=head1 SEE ALSO
B<Pod::InputObjects> is part of the L<Pod::Parser> distribution.
See L<Pod::Parser>, L<Pod::Select>
=head1 AUTHOR
Please report bugs using L<http://rt.cpan.org>.
Brad Appleton E<lt>bradapp@enteract.comE<gt>
=cut
1;
| leighpauls/k2cro4 | third_party/perl/perl/lib/Pod/InputObjects.pm | Perl | bsd-3-clause | 27,347 |
###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite locale
# generator (0.05). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate-from-cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file root.xml
# The source file version number was 1.192, generated on
# 2009/06/15 21:39:59.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::root;
use strict;
use warnings;
use utf8;
use base 'DateTime::Locale::Base';
sub cldr_version { return "1\.7\.1" }
{
my $am_pm_abbreviated = [ "AM", "PM" ];
sub am_pm_abbreviated { return $am_pm_abbreviated }
}
{
my $date_format_full = "EEEE\,\ y\ MMMM\ dd";
sub date_format_full { return $date_format_full }
}
{
my $date_format_long = "y\ MMMM\ d";
sub date_format_long { return $date_format_long }
}
{
my $date_format_medium = "y\ MMM\ d";
sub date_format_medium { return $date_format_medium }
}
{
my $date_format_short = "yyyy\-MM\-dd";
sub date_format_short { return $date_format_short }
}
{
my $datetime_format = "\{1\}\ \{0\}";
sub datetime_format { return $datetime_format }
}
{
my $day_format_abbreviated = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_format_abbreviated { return $day_format_abbreviated }
}
{
my $day_format_narrow = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_format_narrow { return $day_format_narrow }
}
{
my $day_format_wide = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_format_wide { return $day_format_wide }
}
{
my $day_stand_alone_abbreviated = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_stand_alone_abbreviated { return $day_stand_alone_abbreviated }
}
{
my $day_stand_alone_narrow = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_stand_alone_narrow { return $day_stand_alone_narrow }
}
{
my $day_stand_alone_wide = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_stand_alone_wide { return $day_stand_alone_wide }
}
{
my $era_abbreviated = [ "BCE", "CE" ];
sub era_abbreviated { return $era_abbreviated }
}
{
my $era_narrow = [ "BCE", "CE" ];
sub era_narrow { return $era_narrow }
}
{
my $era_wide = [ "BCE", "CE" ];
sub era_wide { return $era_wide }
}
{
my $first_day_of_week = "1";
sub first_day_of_week { return $first_day_of_week }
}
{
my $month_format_abbreviated = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_format_abbreviated { return $month_format_abbreviated }
}
{
my $month_format_narrow = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_format_narrow { return $month_format_narrow }
}
{
my $month_format_wide = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_format_wide { return $month_format_wide }
}
{
my $month_stand_alone_abbreviated = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_stand_alone_abbreviated { return $month_stand_alone_abbreviated }
}
{
my $month_stand_alone_narrow = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_stand_alone_narrow { return $month_stand_alone_narrow }
}
{
my $month_stand_alone_wide = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_stand_alone_wide { return $month_stand_alone_wide }
}
{
my $quarter_format_abbreviated = [ "Q1", "Q2", "Q3", "Q4" ];
sub quarter_format_abbreviated { return $quarter_format_abbreviated }
}
{
my $quarter_format_narrow = [ "1", "2", "3", "4" ];
sub quarter_format_narrow { return $quarter_format_narrow }
}
{
my $quarter_format_wide = [ "Q1", "Q2", "Q3", "Q4" ];
sub quarter_format_wide { return $quarter_format_wide }
}
{
my $quarter_stand_alone_abbreviated = [ "Q1", "Q2", "Q3", "Q4" ];
sub quarter_stand_alone_abbreviated { return $quarter_stand_alone_abbreviated }
}
{
my $quarter_stand_alone_narrow = [ "1", "2", "3", "4" ];
sub quarter_stand_alone_narrow { return $quarter_stand_alone_narrow }
}
{
my $quarter_stand_alone_wide = [ "Q1", "Q2", "Q3", "Q4" ];
sub quarter_stand_alone_wide { return $quarter_stand_alone_wide }
}
{
my $time_format_full = "HH\:mm\:ss\ zzzz";
sub time_format_full { return $time_format_full }
}
{
my $time_format_long = "HH\:mm\:ss\ z";
sub time_format_long { return $time_format_long }
}
{
my $time_format_medium = "HH\:mm\:ss";
sub time_format_medium { return $time_format_medium }
}
{
my $time_format_short = "HH\:mm";
sub time_format_short { return $time_format_short }
}
{
my $_default_date_format_length = "medium";
sub _default_date_format_length { return $_default_date_format_length }
}
{
my $_default_time_format_length = "medium";
sub _default_time_format_length { return $_default_time_format_length }
}
{
my $_format_for_EEEd = "d\ EEE";
sub _format_for_EEEd { return $_format_for_EEEd }
}
{
my $_format_for_Hm = "H\:mm";
sub _format_for_Hm { return $_format_for_Hm }
}
{
my $_format_for_Hms = "H\:mm\:ss";
sub _format_for_Hms { return $_format_for_Hms }
}
{
my $_format_for_M = "L";
sub _format_for_M { return $_format_for_M }
}
{
my $_format_for_MEd = "E\,\ M\-d";
sub _format_for_MEd { return $_format_for_MEd }
}
{
my $_format_for_MMM = "LLL";
sub _format_for_MMM { return $_format_for_MMM }
}
{
my $_format_for_MMMEd = "E\ MMM\ d";
sub _format_for_MMMEd { return $_format_for_MMMEd }
}
{
my $_format_for_MMMMEd = "E\ MMMM\ d";
sub _format_for_MMMMEd { return $_format_for_MMMMEd }
}
{
my $_format_for_MMMMd = "MMMM\ d";
sub _format_for_MMMMd { return $_format_for_MMMMd }
}
{
my $_format_for_MMMd = "MMM\ d";
sub _format_for_MMMd { return $_format_for_MMMd }
}
{
my $_format_for_Md = "M\-d";
sub _format_for_Md { return $_format_for_Md }
}
{
my $_format_for_d = "d";
sub _format_for_d { return $_format_for_d }
}
{
my $_format_for_hm = "h\:mm\ a";
sub _format_for_hm { return $_format_for_hm }
}
{
my $_format_for_hms = "h\:mm\:ss\ a";
sub _format_for_hms { return $_format_for_hms }
}
{
my $_format_for_ms = "mm\:ss";
sub _format_for_ms { return $_format_for_ms }
}
{
my $_format_for_y = "y";
sub _format_for_y { return $_format_for_y }
}
{
my $_format_for_yM = "y\-M";
sub _format_for_yM { return $_format_for_yM }
}
{
my $_format_for_yMEd = "EEE\,\ y\-M\-d";
sub _format_for_yMEd { return $_format_for_yMEd }
}
{
my $_format_for_yMMM = "y\ MMM";
sub _format_for_yMMM { return $_format_for_yMMM }
}
{
my $_format_for_yMMMEd = "EEE\,\ y\ MMM\ d";
sub _format_for_yMMMEd { return $_format_for_yMMMEd }
}
{
my $_format_for_yMMMM = "y\ MMMM";
sub _format_for_yMMMM { return $_format_for_yMMMM }
}
{
my $_format_for_yQ = "y\ Q";
sub _format_for_yQ { return $_format_for_yQ }
}
{
my $_format_for_yQQQ = "y\ QQQ";
sub _format_for_yQQQ { return $_format_for_yQQQ }
}
{
my $_available_formats =
{
"EEEd" => "d\ EEE",
"Hm" => "H\:mm",
"Hms" => "H\:mm\:ss",
"M" => "L",
"MEd" => "E\,\ M\-d",
"MMM" => "LLL",
"MMMEd" => "E\ MMM\ d",
"MMMMEd" => "E\ MMMM\ d",
"MMMMd" => "MMMM\ d",
"MMMd" => "MMM\ d",
"Md" => "M\-d",
"d" => "d",
"hm" => "h\:mm\ a",
"hms" => "h\:mm\:ss\ a",
"ms" => "mm\:ss",
"y" => "y",
"yM" => "y\-M",
"yMEd" => "EEE\,\ y\-M\-d",
"yMMM" => "y\ MMM",
"yMMMEd" => "EEE\,\ y\ MMM\ d",
"yMMMM" => "y\ MMMM",
"yQ" => "y\ Q",
"yQQQ" => "y\ QQQ"
};
sub _available_formats { return $_available_formats }
}
{
my $glibc_date_format = "\%m\/\%d\/\%y";
sub glibc_date_format { return $glibc_date_format }
}
{
my $glibc_date_1_format = "\%a\ \%b\ \%e\ \%H\:\%M\:\%S\ \%Z\ \%Y";
sub glibc_date_1_format { return $glibc_date_1_format }
}
{
my $glibc_datetime_format = "\%a\ \%b\ \%e\ \%H\:\%M\:\%S\ \%Y";
sub glibc_datetime_format { return $glibc_datetime_format }
}
{
my $glibc_time_format = "\%H\:\%M\:\%S";
sub glibc_time_format { return $glibc_time_format }
}
{
my $glibc_time_12_format = "\%I\:\%M\:\%S\ \%p";
sub glibc_time_12_format { return $glibc_time_12_format }
}
1;
__END__
=pod
=encoding utf8
=head1 NAME
DateTime::Locale::root
=head1 SYNOPSIS
use DateTime;
my $dt = DateTime->now( locale => 'root' );
print $dt->month_name();
=head1 DESCRIPTION
This is the DateTime locale package for Root.
=head1 DATA
It contains the following data.
=head2 Days
=head3 Wide (format)
2
3
4
5
6
7
1
=head3 Abbreviated (format)
2
3
4
5
6
7
1
=head3 Narrow (format)
2
3
4
5
6
7
1
=head3 Wide (stand-alone)
2
3
4
5
6
7
1
=head3 Abbreviated (stand-alone)
2
3
4
5
6
7
1
=head3 Narrow (stand-alone)
2
3
4
5
6
7
1
=head2 Months
=head3 Wide (format)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Abbreviated (format)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Narrow (format)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Wide (stand-alone)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Abbreviated (stand-alone)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Narrow (stand-alone)
1
2
3
4
5
6
7
8
9
10
11
12
=head2 Quarters
=head3 Wide (format)
Q1
Q2
Q3
Q4
=head3 Abbreviated (format)
Q1
Q2
Q3
Q4
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
Q1
Q2
Q3
Q4
=head3 Abbreviated (stand-alone)
Q1
Q2
Q3
Q4
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide
BCE
CE
=head3 Abbreviated
BCE
CE
=head3 Narrow
BCE
CE
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = 3, 2008 2 05
1995-12-22T09:05:02 = 6, 1995 12 22
-0010-09-15T04:44:23 = 7, -10 9 15
=head3 Long
2008-02-05T18:30:30 = 2008 2 5
1995-12-22T09:05:02 = 1995 12 22
-0010-09-15T04:44:23 = -10 9 15
=head3 Medium
2008-02-05T18:30:30 = 2008 2 5
1995-12-22T09:05:02 = 1995 12 22
-0010-09-15T04:44:23 = -10 9 15
=head3 Short
2008-02-05T18:30:30 = 2008-02-05
1995-12-22T09:05:02 = 1995-12-22
-0010-09-15T04:44:23 = -010-09-15
=head3 Default
2008-02-05T18:30:30 = 2008 2 5
1995-12-22T09:05:02 = 1995 12 22
-0010-09-15T04:44:23 = -10 9 15
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Short
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head3 Default
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = 3, 2008 2 05 18:30:30 UTC
1995-12-22T09:05:02 = 6, 1995 12 22 09:05:02 UTC
-0010-09-15T04:44:23 = 7, -10 9 15 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 2008 2 5 18:30:30 UTC
1995-12-22T09:05:02 = 1995 12 22 09:05:02 UTC
-0010-09-15T04:44:23 = -10 9 15 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 2008 2 5 18:30:30
1995-12-22T09:05:02 = 1995 12 22 09:05:02
-0010-09-15T04:44:23 = -10 9 15 04:44:23
=head3 Short
2008-02-05T18:30:30 = 2008-02-05 18:30
1995-12-22T09:05:02 = 1995-12-22 09:05
-0010-09-15T04:44:23 = -010-09-15 04:44
=head3 Default
2008-02-05T18:30:30 = 2008 2 5 18:30:30
1995-12-22T09:05:02 = 1995 12 22 09:05:02
-0010-09-15T04:44:23 = -10 9 15 04:44:23
=head2 Available Formats
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 EEEd (d EEE)
2008-02-05T18:30:30 = 5 3
1995-12-22T09:05:02 = 22 6
-0010-09-15T04:44:23 = 15 7
=head3 Hm (H:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 9:05
-0010-09-15T04:44:23 = 4:44
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 PM
1995-12-22T09:05:02 = 9:05 AM
-0010-09-15T04:44:23 = 4:44 AM
=head3 Hms (H:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 9:05:02
-0010-09-15T04:44:23 = 4:44:23
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 PM
1995-12-22T09:05:02 = 9:05:02 AM
-0010-09-15T04:44:23 = 4:44:23 AM
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 Md (M-d)
2008-02-05T18:30:30 = 2-5
1995-12-22T09:05:02 = 12-22
-0010-09-15T04:44:23 = 9-15
=head3 MEd (E, M-d)
2008-02-05T18:30:30 = 3, 2-5
1995-12-22T09:05:02 = 6, 12-22
-0010-09-15T04:44:23 = 7, 9-15
=head3 MMM (LLL)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 MMMd (MMM d)
2008-02-05T18:30:30 = 2 5
1995-12-22T09:05:02 = 12 22
-0010-09-15T04:44:23 = 9 15
=head3 MMMEd (E MMM d)
2008-02-05T18:30:30 = 3 2 5
1995-12-22T09:05:02 = 6 12 22
-0010-09-15T04:44:23 = 7 9 15
=head3 MMMMd (MMMM d)
2008-02-05T18:30:30 = 2 5
1995-12-22T09:05:02 = 12 22
-0010-09-15T04:44:23 = 9 15
=head3 MMMMEd (E MMMM d)
2008-02-05T18:30:30 = 3 2 5
1995-12-22T09:05:02 = 6 12 22
-0010-09-15T04:44:23 = 7 9 15
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y)
2008-02-05T18:30:30 = 2008
1995-12-22T09:05:02 = 1995
-0010-09-15T04:44:23 = -10
=head3 yM (y-M)
2008-02-05T18:30:30 = 2008-2
1995-12-22T09:05:02 = 1995-12
-0010-09-15T04:44:23 = -10-9
=head3 yMEd (EEE, y-M-d)
2008-02-05T18:30:30 = 3, 2008-2-5
1995-12-22T09:05:02 = 6, 1995-12-22
-0010-09-15T04:44:23 = 7, -10-9-15
=head3 yMMM (y MMM)
2008-02-05T18:30:30 = 2008 2
1995-12-22T09:05:02 = 1995 12
-0010-09-15T04:44:23 = -10 9
=head3 yMMMEd (EEE, y MMM d)
2008-02-05T18:30:30 = 3, 2008 2 5
1995-12-22T09:05:02 = 6, 1995 12 22
-0010-09-15T04:44:23 = 7, -10 9 15
=head3 yMMMM (y MMMM)
2008-02-05T18:30:30 = 2008 2
1995-12-22T09:05:02 = 1995 12
-0010-09-15T04:44:23 = -10 9
=head3 yQ (y Q)
2008-02-05T18:30:30 = 2008 1
1995-12-22T09:05:02 = 1995 4
-0010-09-15T04:44:23 = -10 3
=head3 yQQQ (y QQQ)
2008-02-05T18:30:30 = 2008 Q1
1995-12-22T09:05:02 = 1995 Q4
-0010-09-15T04:44:23 = -10 Q3
=head2 Miscellaneous
=head3 Prefers 24 hour time?
Yes
=head3 Local first day of the week
2
=head1 SUPPORT
See L<DateTime::Locale>.
=head1 AUTHOR
Dave Rolsky <autarch@urth.org>
=head1 COPYRIGHT
Copyright (c) 2008 David Rolsky. All rights reserved. This program is
free software; you can redistribute it and/or modify it under the same
terms as Perl itself.
This module was generated from data provided by the CLDR project, see
the LICENSE.cldr in this distribution for details on the CLDR data's
license.
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/DateTime/Locale/root.pm | Perl | mit | 15,529 |
# Pintos helper subroutines.
# Number of bytes available for the loader at the beginning of the MBR.
# Kernel command-line arguments follow the loader.
our $LOADER_SIZE = 314;
# Partition types.
my (%role2type) = (KERNEL => 0x20,
FILESYS => 0x21,
SCRATCH => 0x22,
SWAP => 0x23);
my (%type2role) = reverse %role2type;
# Order of roles within a given disk.
our (@role_order) = qw (KERNEL FILESYS SCRATCH SWAP);
# Partitions.
#
# Valid keys are KERNEL, FILESYS, SCRATCH, SWAP. Only those
# partitions which are in use are included.
#
# Each value is a reference to a hash. If the partition's contents
# are to be obtained from a file (that will be copied into a new
# virtual disk), then the hash contains:
#
# FILE => name of file from which the partition's contents are copied
# (perhaps "/dev/zero"),
# OFFSET => offset in bytes in FILE,
# BYTES => size in bytes of contents from FILE,
#
# If the partition is taken from a virtual disk directly, then it
# contains the following. The same keys are also filled in once a
# file-based partition has been copied into a new virtual disk:
#
# DISK => name of virtual disk file,
# START => sector offset of start of partition within DISK,
# SECTORS => number of sectors of partition within DISK, which is usually
# greater than round_up (BYTES, 512) due to padding.
our (%parts);
# set_part($opt, $arg)
#
# For use as a helper function for Getopt::Long::GetOptions to set
# disk sources.
sub set_part {
my ($opt, $arg) = @_;
my ($role, $source) = $opt =~ /^([a-z]+)(?:-([a-z]+))?/ or die;
$role = uc $role;
$source = 'FILE' if $source eq '';
die "can't have two sources for \L$role\E partition"
if exists $parts{$role};
do_set_part ($role, $source, $arg);
}
# do_set_part($role, $source, $arg)
#
# Sets partition $role as coming from $source (one of 'file', 'from',
# or 'size'). $arg is a file name for 'file' or 'from', a size in
# megabytes for 'size'.
sub do_set_part {
my ($role, $source, $arg) = @_;
my ($p) = $parts{$role} = {};
if ($source eq 'file') {
if (read_mbr ($arg)) {
print STDERR "warning: $arg looks like a partitioned disk ";
print STDERR "(did you want --$role-from=$arg or --disk=$arg?)\n"
}
$p->{FILE} = $arg;
$p->{OFFSET} = 0;
$p->{BYTES} = -s $arg;
} elsif ($source eq 'from') {
my (%pt) = read_partition_table ($arg);
my ($sp) = $pt{$role};
die "$arg: does not contain \L$role\E partition\n" if !defined $sp;
$p->{FILE} = $arg;
$p->{OFFSET} = $sp->{START} * 512;
$p->{BYTES} = $sp->{SECTORS} * 512;
} elsif ($source eq 'size') {
$arg =~ /^\d+(\.\d+)?|\.\d+$/ or die "$arg: not a valid size in MB\n";
$p->{FILE} = "/dev/zero";
$p->{OFFSET} = 0;
$p->{BYTES} = ceil ($arg * 1024 * 1024);
} else {
die;
}
}
# set_geometry('HEADS,SPT')
# set_geometry('zip')
#
# For use as a helper function for Getopt::Long::GetOptions to set
# disk geometry.
sub set_geometry {
local ($_) = $_[1];
if ($_ eq 'zip') {
@geometry{'H', 'S'} = (64, 32);
} else {
@geometry{'H', 'S'} = /^(\d+)[,\s]+(\d+)$/
or die "bad syntax for geometry\n";
$geometry{H} <= 255 or die "heads limited to 255\n";
$geometry{S} <= 63 or die "sectors per track limited to 63\n";
}
}
# set_align('bochs|full|none')
#
# For use as a helper function for Getopt::Long::GetOptions to set
# partition alignment.
sub set_align {
$align = $_[1];
die "unknown alignment type \"$align\"\n"
if $align ne 'bochs' && $align ne 'full' && $align ne 'none';
}
# assemble_disk(%args)
#
# Creates a virtual disk $args{DISK} containing the partitions
# described by @args{KERNEL, FILESYS, SCRATCH, SWAP}.
#
# Required arguments:
# DISK => output disk file name
# HANDLE => output file handle (will be closed)
#
# Normally at least one of the following is included:
# KERNEL, FILESYS, SCRATCH, SWAP => {input:
# FILE => file to read,
# OFFSET => byte offset in file,
# BYTES => byte count from file,
#
# output:
# DISK => output disk file name,
# START => sector offset in DISK,
# SECTORS => sector count in DISK},
#
# Optional arguments:
# ALIGN => 'bochs' (default), 'full', or 'none'
# GEOMETRY => {H => heads, S => sectors per track} (default 16, 63)
# FORMAT => 'partitioned' (default) or 'raw'
# LOADER => $LOADER_SIZE-byte string containing the loader binary
# ARGS => ['arg 1', 'arg 2', ...]
sub assemble_disk {
my (%args) = @_;
my (%geometry) = $args{GEOMETRY} || (H => 16, S => 63);
my ($align); # Align partition start, end to cylinder boundary?
my ($pad); # Pad end of disk out to cylinder boundary?
if (!defined ($args{ALIGN}) || $args{ALIGN} eq 'bochs') {
$align = 0;
$pad = 1;
} elsif ($args{ALIGN} eq 'full') {
$align = 1;
$pad = 0;
} elsif ($args{ALIGN} eq 'none') {
$align = $pad = 0;
} else {
die;
}
my ($format) = $args{FORMAT} || 'partitioned';
die if $format ne 'partitioned' && $format ne 'raw';
# Check that we have apartitions to copy in.
my $part_cnt = grep (defined ($args{$_}), keys %role2type);
die "must have exactly one partition for raw output\n"
if $format eq 'raw' && $part_cnt != 1;
# Calculate the disk size.
my ($total_sectors) = 0;
if ($format eq 'partitioned') {
$total_sectors += $align ? $geometry{S} : 1;
}
for my $role (@role_order) {
my ($p) = $args{$role};
next if !defined $p;
die if $p->{DISK};
my ($bytes) = $p->{BYTES};
my ($start) = $total_sectors;
my ($end) = $start + div_round_up ($bytes, 512);
$end = round_up ($end, cyl_sectors (%geometry)) if $align;
$p->{DISK} = $args{DISK};
$p->{START} = $start;
$p->{SECTORS} = $end - $start;
$total_sectors = $end;
}
# Write the disk.
my ($disk_fn) = $args{DISK};
my ($disk) = $args{HANDLE};
if ($format eq 'partitioned') {
# Pack loader into MBR.
my ($loader) = $args{LOADER} || "\xcd\x18";
my ($mbr) = pack ("a$LOADER_SIZE", $loader);
$mbr .= make_kernel_command_line (@{$args{ARGS}});
# Pack partition table into MBR.
$mbr .= make_partition_table (\%geometry, \%args);
# Add signature to MBR.
$mbr .= pack ("v", 0xaa55);
die if length ($mbr) != 512;
write_fully ($disk, $disk_fn, $mbr);
write_zeros ($disk, $disk_fn, 512 * ($geometry{S} - 1)) if $align;
}
for my $role (@role_order) {
my ($p) = $args{$role};
next if !defined $p;
my ($source);
my ($fn) = $p->{FILE};
open ($source, '<', $fn) or die "$fn: open: $!\n";
if ($p->{OFFSET}) {
sysseek ($source, $p->{OFFSET}, 0) == $p->{OFFSET}
or die "$fn: seek: $!\n";
}
copy_file ($source, $fn, $disk, $disk_fn, $p->{BYTES});
close ($source) or die "$fn: close: $!\n";
write_zeros ($disk, $disk_fn, $p->{SECTORS} * 512 - $p->{BYTES});
}
if ($pad) {
my ($pad_sectors) = round_up ($total_sectors, cyl_sectors (%geometry));
write_zeros ($disk, $disk_fn, ($pad_sectors - $total_sectors) * 512);
}
close ($disk) or die "$disk: close: $!\n";
}
# make_partition_table({H => heads, S => sectors}, {KERNEL => ..., ...})
#
# Creates and returns a partition table for the given partitions and
# disk geometry.
sub make_partition_table {
my ($geometry, $partitions) = @_;
my ($table) = '';
for my $role (@role_order) {
defined (my $p = $partitions->{$role}) or next;
my $end = $p->{START} + $p->{SECTORS} - 1;
my $bootable = $role eq 'KERNEL';
$table .= pack ("C", $bootable ? 0x80 : 0); # Bootable?
$table .= pack_chs ($p->{START}, $geometry); # CHS of partition start
$table .= pack ("C", $role2type{$role}); # Partition type
$table .= pack_chs($end, $geometry); # CHS of partition end
$table .= pack ("V", $p->{START}); # LBA of partition start
$table .= pack ("V", $p->{SECTORS}); # Length in sectors
die if length ($table) % 16;
}
return pack ("a64", $table);
}
# make_kernel_command_line(@args)
#
# Returns the raw bytes to write to an MBR at offset $LOADER_SIZE to
# set a Pintos kernel command line.
sub make_kernel_command_line {
my (@args) = @_;
my ($args) = join ('', map ("$_\0", @args));
die "command line exceeds 128 bytes" if length ($args) > 128;
return pack ("V a128", scalar (@args), $args);
}
# copy_file($from_handle, $from_file_name, $to_handle, $to_file_name, $size)
#
# Copies $size bytes from $from_handle to $to_handle.
# $from_file_name and $to_file_name are used in error messages.
sub copy_file {
my ($from_handle, $from_file_name, $to_handle, $to_file_name, $size) = @_;
while ($size > 0) {
my ($chunk_size) = 4096;
$chunk_size = $size if $chunk_size > $size;
$size -= $chunk_size;
my ($data) = read_fully ($from_handle, $from_file_name, $chunk_size);
write_fully ($to_handle, $to_file_name, $data);
}
}
# read_fully($handle, $file_name, $bytes)
#
# Reads exactly $bytes bytes from $handle and returns the data read.
# $file_name is used in error messages.
sub read_fully {
my ($handle, $file_name, $bytes) = @_;
my ($data);
my ($read_bytes) = sysread ($handle, $data, $bytes);
die "$file_name: read: $!\n" if !defined $read_bytes;
die "$file_name: unexpected end of file\n" if $read_bytes != $bytes;
return $data;
}
# write_fully($handle, $file_name, $data)
#
# Write $data to $handle.
# $file_name is used in error messages.
sub write_fully {
my ($handle, $file_name, $data) = @_;
my ($written_bytes) = syswrite ($handle, $data);
die "$file_name: write: $!\n" if !defined $written_bytes;
die "$file_name: short write\n" if $written_bytes != length $data;
}
sub write_zeros {
my ($handle, $file_name, $size) = @_;
while ($size > 0) {
my ($chunk_size) = 4096;
$chunk_size = $size if $chunk_size > $size;
$size -= $chunk_size;
write_fully ($handle, $file_name, "\0" x $chunk_size);
}
}
# div_round_up($x,$y)
#
# Returns $x / $y, rounded up to the nearest integer.
# $y must be an integer.
sub div_round_up {
my ($x, $y) = @_;
return int ((ceil ($x) + $y - 1) / $y);
}
# round_up($x, $y)
#
# Returns $x rounded up to the nearest multiple of $y.
# $y must be an integer.
sub round_up {
my ($x, $y) = @_;
return div_round_up ($x, $y) * $y;
}
# cyl_sectors(H => heads, S => sectors)
#
# Returns the number of sectors in a cylinder of a disk with the given
# geometry.
sub cyl_sectors {
my (%geometry) = @_;
return $geometry{H} * $geometry{S};
}
# read_loader($file_name)
#
# Reads and returns the first $LOADER_SIZE bytes in $file_name.
# If $file_name is undefined, tries to find the default loader.
# Makes sure that the loader is a reasonable size.
sub read_loader {
my ($name) = @_;
$name = find_file ("loader.bin") if !defined $name;
die "Cannot find loader\n" if !defined $name;
my ($handle);
open ($handle, '<', $name) or die "$name: open: $!\n";
-s $handle == $LOADER_SIZE || -s $handle == 512
or die "$name: must be exactly $LOADER_SIZE or 512 bytes long\n";
$loader = read_fully ($handle, $name, $LOADER_SIZE);
close ($handle) or die "$name: close: $!\n";
return $loader;
}
# pack_chs($lba, {H => heads, S => sectors})
#
# Converts logical sector $lba to a 3-byte packed geometrical sector
# in the format used in PC partition tables (see [Partitions]) and
# returns the geometrical sector as a 3-byte string.
sub pack_chs {
my ($lba, $geometry) = @_;
my ($cyl, $head, $sect) = lba_to_chs ($lba, $geometry);
return pack ("CCC", $head, $sect | (($cyl >> 2) & 0xc0), $cyl & 0xff);
}
# lba_to_chs($lba, {H => heads, S => sectors})
#
# Returns the geometrical sector corresponding to logical sector $lba
# given the specified geometry.
sub lba_to_chs {
my ($lba, $geometry) = @_;
my ($hpc) = $geometry->{H};
my ($spt) = $geometry->{S};
# Source:
# http://en.wikipedia.org/wiki/CHS_conversion
use integer;
my $cyl = $lba / ($hpc * $spt);
my $temp = $lba % ($hpc * $spt);
my $head = $temp / $spt;
my $sect = $temp % $spt + 1;
# Source:
# http://www.cgsecurity.org/wiki/Intel_Partition_Table
if ($cyl <= 1023) {
return ($cyl, $head, $sect);
} else {
return (1023, 254, 63); ## or should this be (1023, $hpc, $spt)?
}
}
# read_mbr($file)
#
# Tries to read an MBR from $file. Returns the 512-byte MBR if
# successful, otherwise numeric 0.
sub read_mbr {
my ($file) = @_;
my ($retval) = 0;
open (FILE, '<', $file) or die "$file: open: $!\n";
if (-s FILE == 0) {
die "$file: file has zero size\n";
} elsif (-s FILE >= 512) {
my ($mbr);
sysread (FILE, $mbr, 512) == 512 or die "$file: read: $!\n";
$retval = $mbr if unpack ("v", substr ($mbr, 510)) == 0xaa55;
}
close (FILE);
return $retval;
}
# interpret_partition_table($mbr, $disk)
#
# Parses the partition-table in the specified 512-byte $mbr and
# returns the partitions. $disk is used for error messages.
sub interpret_partition_table {
my ($mbr, $disk) = @_;
my (%parts);
for my $i (0...3) {
my ($bootable, $valid, $type, $lba_start, $lba_length)
= unpack ("C X V C x3 V V", substr ($mbr, 446 + 16 * $i, 16));
next if !$valid;
(print STDERR "warning: invalid partition entry $i in $disk\n"),
next if $bootable != 0 && $bootable != 0x80;
my ($role) = $type2role{$type};
(printf STDERR "warning: non-Pintos partition type 0x%02x in %s\n",
$type, $disk),
next if !defined $role;
(print STDERR "warning: duplicate \L$role\E partition in $disk\n"),
next if exists $parts{$role};
$parts{$role} = {START => $lba_start,
SECTORS => $lba_length};
}
return %parts;
}
# find_file($base_name)
#
# Looks for a file named $base_name in a couple of likely spots. If
# found, returns the name; otherwise, returns undef.
sub find_file {
my ($base_name) = @_;
-e && return $_ foreach $base_name, "build/$base_name";
return undef;
}
# read_partition_table($file)
#
# Reads a partition table from $file and returns the parsed
# partitions. Dies if partitions can't be read.
sub read_partition_table {
my ($file) = @_;
my ($mbr) = read_mbr ($file);
die "$file: not a partitioned disk\n" if !$mbr;
return interpret_partition_table ($mbr, $file);
}
# max(@args)
#
# Returns the numerically largest value in @args.
sub max {
my ($max) = $_[0];
foreach (@_[1..$#_]) {
$max = $_ if $_ > $max;
}
return $max;
}
1;
| pindexis/pintos-tn | src/utils/Pintos.pm | Perl | apache-2.0 | 14,562 |
# $Id: Number.pm 709 2008-01-29 21:01:32Z pajas $
package XML::LibXML::Number;
use XML::LibXML::Boolean;
use XML::LibXML::Literal;
use strict;
use vars qw ($VERSION);
$VERSION = "1.66"; # VERSION TEMPLATE: DO NOT CHANGE
use overload
'""' => \&value,
'0+' => \&value,
'<=>' => \&cmp;
sub new {
my $class = shift;
my $number = shift;
if ($number !~ /^\s*(-\s*)?(\d+(\.\d*)?|\.\d+)\s*$/) {
$number = undef;
}
else {
$number =~ s/\s+//g;
}
bless \$number, $class;
}
sub as_string {
my $self = shift;
defined $$self ? $$self : 'NaN';
}
sub as_xml {
my $self = shift;
return "<Number>" . (defined($$self) ? $$self : 'NaN') . "</Number>\n";
}
sub value {
my $self = shift;
$$self;
}
sub cmp {
my $self = shift;
my ($other, $swap) = @_;
if ($swap) {
return $other <=> $$self;
}
return $$self <=> $other;
}
sub evaluate {
my $self = shift;
$self;
}
sub to_boolean {
my $self = shift;
return $$self ? XML::LibXML::Boolean->True : XML::LibXML::Boolean->False;
}
sub to_literal { XML::LibXML::Literal->new($_[0]->as_string); }
sub to_number { $_[0]; }
sub string_value { return $_[0]->value }
1;
__END__
=head1 NAME
XML::LibXML::Number - Simple numeric values.
=head1 DESCRIPTION
This class holds simple numeric values. It doesn't support -0, +/- Infinity,
or NaN, as the XPath spec says it should, but I'm not hurting anyone I don't think.
=head1 API
=head2 new($num)
Creates a new XML::LibXML::Number object, with the value in $num. Does some
rudimentary numeric checking on $num to ensure it actually is a number.
=head2 value()
Also as overloaded stringification. Returns the numeric value held.
=cut
| leighpauls/k2cro4 | third_party/cygwin/lib/perl5/vendor_perl/5.10/i686-cygwin/XML/LibXML/Number.pm | Perl | bsd-3-clause | 1,752 |
package AutoSsl;
use strict;
use warnings;
use Cwd qw(abs_path);
use Expect;
use File::Basename;
use File::Find qw(finddepth);
use File::Spec::Functions qw(canonpath);
use File::stat;
sub setup {
our $ngrok = Expect->spawn("ngrok http 9080 --log stdout --log-format logfmt --log-level debug") or die "failed to spawn ngrok: $!";
$ngrok->log_stdout(0);
$ngrok->expect(10, "-re", "Hostname:([a-z0-9]+.ngrok.io)") or die "failed to find hostname for ngrok";
$ENV{TEST_NGINX_ROOT_DIR} ||= dirname(dirname(dirname(abs_path(__FILE__))));
$ENV{TEST_NGINX_NGROK_HOSTNAME} = ($ngrok->matchlist())[0] or die "failed to extract hostname for ngrok";
$ENV{TEST_NGINX_RESTY_AUTO_SSL_DIR} ||= "/tmp/resty-auto-ssl-test";
$ENV{TEST_NGINX_RESOLVER} ||= "8.8.8.8 8.8.4.4";
# If the tests have previously been run, wipe out any test data.
if(-d $ENV{TEST_NGINX_RESTY_AUTO_SSL_DIR}) {
# Keep existing account keys registered with Let's Encrypt for about 1 day.
# This prevents us from running into Let's Encrypt's account registration
# rate limits (that are still low enough in the Let's Encrypt staging
# environment that we can hit these on staging).
#
# But still re-register once a day to deal with issues like new license
# terms, where old accounts may behave differently than new accounts
# (https://community.letsencrypt.org/t/lets-encrypt-subscriber-agreement-v1-1-1/17409/7).
my $existing_accounts_path = canonpath("$ENV{TEST_NGINX_RESTY_AUTO_SSL_DIR}/letsencrypt/accounts");
my $keep_existing_accounts = 0;
if(-d $existing_accounts_path) {
my $current_time = time;
my $account_time = stat($existing_accounts_path)->mtime;
my $time_diff = 60 * 60 * 23; # 23 hours
if($current_time - $account_time < $time_diff) {
$keep_existing_accounts = 1;
}
}
my $wanted = sub {
my $find_path = canonpath($File::Find::name);
my $delete = 1;
# Keep recent account files.
if($keep_existing_accounts) {
if(index($existing_accounts_path, $find_path) != -1 || index($find_path, $existing_accounts_path) != -1) {
$delete = 0;
}
}
# Always keep the root directory for the "worker-perms" test, so we
# retain the special permissions created on this by sudo in the Makefile.
if($find_path eq "/tmp/resty-auto-ssl-test-worker-perms") {
$delete = 0;
}
if($delete) {
if(-d $find_path) {
rmdir $find_path;
} else {
unlink $find_path;
}
}
};
finddepth(\&$wanted, $ENV{TEST_NGINX_RESTY_AUTO_SSL_DIR});
}
}
1;
| UseFedora/lua-resty-auto-ssl | t/inc/setup.pl | Perl | mit | 2,640 |
#!/usr/bin/perl
#
# ***** BEGIN LICENSE BLOCK *****
# Zimbra Collaboration Suite Server
# Copyright (C) 2006, 2007, 2008, 2009, 2010 Zimbra, Inc.
#
# The contents of this file are subject to the Zimbra Public License
# Version 1.3 ("License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.zimbra.com/license.
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied.
# ***** END LICENSE BLOCK *****
#
use strict;
use Migrate;
my $CONCURRENCY = 10;
my $ID = 14; # mail_item_id of new folder
my $METADATA = 'd1:ai1e4:unxti14e1:vi9e2:vti5ee';
my $NOW = time();
Migrate::verifySchemaVersion(29);
bumpUpMailboxChangeCheckpoints();
#
# Rename existing 'Chats' folder, if there is one
#
my @sqlRename;
my %mailboxes = Migrate::getMailboxes();
foreach my $mboxId (sort(keys %mailboxes)) {
my $gid = $mailboxes{$mboxId};
my $sql = renameExistingChatsFolder($mboxId, $gid);
push(@sqlRename, $sql);
}
Migrate::runSqlParallel($CONCURRENCY, @sqlRename);
#
# Create a new 'Chats' folder
#
my %uniqueGroups;
foreach my $gid (values %mailboxes) {
if (!exists($uniqueGroups{$gid})) {
$uniqueGroups{$gid} = $gid;
}
}
my @sqlInsert;
my @groups = sort(keys %uniqueGroups);
foreach my $gid (sort @groups) {
my $sql = createChatsFolder($gid);
push(@sqlInsert, $sql);
}
Migrate::runSqlParallel($CONCURRENCY, @sqlInsert);
#foreach my $cur (sort(keys %mailboxes)) {
# createIMsFolder("mboxgroup".$mailboxes{$cur}, $cur);
#}
Migrate::updateSchemaVersion(29, 30);
exit(0);
#####################
# Increment change_checkpoint column for all rows in mailbox table.
# This SQL must be executed immediately rather than queued.
sub bumpUpMailboxChangeCheckpoints() {
my $sql =<<_SQL_;
UPDATE mailbox
SET change_checkpoint = change_checkpoint + 100;
_SQL_
Migrate::runSql($sql);
}
# Rename any existing Chats folder at root level.
#
# Renaming is done per mailbox rather than per group to force the use of
# the (mailbox_id, folder_id) index on mail_item table. This should be
# more efficient than full table scan of mail_item tables most of whose
# rows have folder_id != 1.
sub renameExistingChatsFolder($$) {
my ($mboxId, $gid) = @_;
my $sql =<<_SQL_;
UPDATE mboxgroup$gid.mail_item mi, mailbox mb
SET mi.subject = CONCAT('Chats - renamed (', mi.id, ' - $NOW)'),
mi.mod_metadata = mb.change_checkpoint,
mi.mod_content = mb.change_checkpoint,
mi.change_date = $NOW
WHERE mi.mailbox_id = $mboxId AND mi.folder_id = 1 AND
mi.id != $ID AND LOWER(mi.subject) = 'chats' AND
mb.id = mi.mailbox_id;
_SQL_
return $sql;
}
# Create the system Chats folder for each mailbox in the specified
# mailbox group.
sub createChatsFolder($) {
my $gid = shift;
my $sql = <<_SQL_;
INSERT INTO mboxgroup$gid.mail_item (
mailbox_id, id, type, parent_id, folder_id, index_id, imap_id,
date, size, volume_id, blob_digest,
unread, flags, tags, sender,
subject, metadata,
mod_metadata, change_date, mod_content
)
SELECT
id, $ID, 1, 1, 1, null, null,
$NOW, 0, null, null,
0, 0, 0, null,
'Chats', '$METADATA',
change_checkpoint, $NOW, change_checkpoint
FROM mailbox
WHERE group_id = $gid
ON DUPLICATE KEY UPDATE subject = 'Chats';
_SQL_
return $sql;
}
# Metadata: "d1:ai1e4:unxti14e1:vi9e2:vti5ee"
# d #is map
# ###
# #
# # FN_ATTRS = 1 (FOLDER_IS_IMMUTABLE)
# #
# 1:
# a # FN_ATTRS (See Folder.java, FOLDER_IS_IMMUTABLE)
# i #int
# 1
# e #end
# ###
# #
# # UID_NEXT = 14 (for imap, this should just start as the type-id of the folder)
# #
# 4: # is string
# unxt # value
# i # is number
# 7
# e # end
# ###
# #
# # MD_VERSION = 9 (current MD version from source code)
# #
# 1:
# v
# i # number
# 9
# e #end
# ###
# #
# # FN_VIEW = 5 (MailItem TYPE_* entry, this one means MESSAGE)
# #
# 2:
# vt
# i # int
# 5
# e #int end
# e # map end
| nico01f/z-pec | ZimbraServer/src/db/migration/migrate20061101-IMFolder.pl | Perl | mit | 4,071 |
use utf8;
package CSN::Schema::Result::Profilefieldoption;
# Created by DBIx::Class::Schema::Loader
# DO NOT MODIFY THE FIRST PART OF THIS FILE
=head1 NAME
CSN::Schema::Result::Profilefieldoption
=cut
use strict;
use warnings;
use base 'DBIx::Class::Core';
=head1 COMPONENTS LOADED
=over 4
=item * L<DBIx::Class::InflateColumn::DateTime>
=back
=cut
__PACKAGE__->load_components("InflateColumn::DateTime");
=head1 TABLE: C<profilefieldoption>
=cut
__PACKAGE__->table("profilefieldoption");
=head1 ACCESSORS
=head2 option_id
data_type: 'integer'
is_auto_increment: 1
is_nullable: 0
sequence: 'profilefieldoption_option_id_seq'
=head2 field_id
data_type: 'integer'
is_foreign_key: 1
is_nullable: 0
=head2 language_key
data_type: 'varchar'
is_nullable: 0
size: 80
=cut
__PACKAGE__->add_columns(
"option_id",
{
data_type => "integer",
is_auto_increment => 1,
is_nullable => 0,
sequence => "profilefieldoption_option_id_seq",
},
"field_id",
{ data_type => "integer", is_foreign_key => 1, is_nullable => 0 },
"language_key",
{ data_type => "varchar", is_nullable => 0, size => 80 },
);
=head1 PRIMARY KEY
=over 4
=item * L</option_id>
=back
=cut
__PACKAGE__->set_primary_key("option_id");
=head1 RELATIONS
=head2 field
Type: belongs_to
Related object: L<CSN::Schema::Result::Profilefield>
=cut
__PACKAGE__->belongs_to(
"field",
"CSN::Schema::Result::Profilefield",
{ field_id => "field_id" },
{ is_deferrable => 0, on_delete => "CASCADE", on_update => "NO ACTION" },
);
# Created by DBIx::Class::Schema::Loader v0.07040 @ 2014-10-29 23:21:31
# DO NOT MODIFY THIS OR ANYTHING ABOVE! md5sum:M1McMq8AtB6swUBLo8gflw
# You can replace this text with custom code or comments, and it will be preserved on regeneration
1;
| CodingMinds/CSN | lib/CSN/Schema/Result/Profilefieldoption.pm | Perl | mit | 1,828 |
=pod
=head1 NAME
SSL_get_ex_data_X509_STORE_CTX_idx,
SSL_CTX_set_verify, SSL_set_verify,
SSL_CTX_set_verify_depth, SSL_set_verify_depth,
SSL_verify_cb,
SSL_verify_client_post_handshake,
SSL_set_post_handshake_auth,
SSL_CTX_set_post_handshake_auth
- set various SSL/TLS parameters for peer certificate verification
=head1 SYNOPSIS
#include <openssl/ssl.h>
typedef int (*SSL_verify_cb)(int preverify_ok, X509_STORE_CTX *x509_ctx);
void SSL_CTX_set_verify(SSL_CTX *ctx, int mode, SSL_verify_cb verify_callback);
void SSL_set_verify(SSL *ssl, int mode, SSL_verify_cb verify_callback);
SSL_get_ex_data_X509_STORE_CTX_idx(void);
void SSL_CTX_set_verify_depth(SSL_CTX *ctx, int depth);
void SSL_set_verify_depth(SSL *ssl, int depth);
int SSL_verify_client_post_handshake(SSL *ssl);
void SSL_CTX_set_post_handshake_auth(SSL_CTX *ctx, int val);
void SSL_set_post_handshake_auth(SSL *ssl, int val);
=head1 DESCRIPTION
SSL_CTX_set_verify() sets the verification flags for B<ctx> to be B<mode> and
specifies the B<verify_callback> function to be used. If no callback function
shall be specified, the NULL pointer can be used for B<verify_callback>.
SSL_set_verify() sets the verification flags for B<ssl> to be B<mode> and
specifies the B<verify_callback> function to be used. If no callback function
shall be specified, the NULL pointer can be used for B<verify_callback>. In
this case last B<verify_callback> set specifically for this B<ssl> remains. If
no special B<callback> was set before, the default callback for the underlying
B<ctx> is used, that was valid at the time B<ssl> was created with
L<SSL_new(3)>. Within the callback function,
B<SSL_get_ex_data_X509_STORE_CTX_idx> can be called to get the data index
of the current SSL object that is doing the verification.
SSL_CTX_set_verify_depth() sets the maximum B<depth> for the certificate chain
verification that shall be allowed for B<ctx>.
SSL_set_verify_depth() sets the maximum B<depth> for the certificate chain
verification that shall be allowed for B<ssl>.
SSL_CTX_set_post_handshake_auth() and SSL_set_post_handshake_auth() enable the
Post-Handshake Authentication extension to be added to the ClientHello such that
post-handshake authentication can be requested by the server. If B<val> is 0
then the extension is not sent, otherwise it is. By default the extension is not
sent. A certificate callback will need to be set via
SSL_CTX_set_client_cert_cb() if no certificate is provided at initialization.
SSL_verify_client_post_handshake() causes a CertificateRequest message to be
sent by a server on the given B<ssl> connection. The SSL_VERIFY_PEER flag must
be set; the SSL_VERIFY_POST_HANDSHAKE flag is optional.
=head1 NOTES
The verification of certificates can be controlled by a set of logically
or'ed B<mode> flags:
=over 4
=item SSL_VERIFY_NONE
B<Server mode:> the server will not send a client certificate request to the
client, so the client will not send a certificate.
B<Client mode:> if not using an anonymous cipher (by default disabled), the
server will send a certificate which will be checked. The result of the
certificate verification process can be checked after the TLS/SSL handshake
using the L<SSL_get_verify_result(3)> function.
The handshake will be continued regardless of the verification result.
=item SSL_VERIFY_PEER
B<Server mode:> the server sends a client certificate request to the client.
The certificate returned (if any) is checked. If the verification process
fails, the TLS/SSL handshake is
immediately terminated with an alert message containing the reason for
the verification failure.
The behaviour can be controlled by the additional
SSL_VERIFY_FAIL_IF_NO_PEER_CERT, SSL_VERIFY_CLIENT_ONCE and
SSL_VERIFY_POST_HANDSHAKE flags.
B<Client mode:> the server certificate is verified. If the verification process
fails, the TLS/SSL handshake is
immediately terminated with an alert message containing the reason for
the verification failure. If no server certificate is sent, because an
anonymous cipher is used, SSL_VERIFY_PEER is ignored.
=item SSL_VERIFY_FAIL_IF_NO_PEER_CERT
B<Server mode:> if the client did not return a certificate, the TLS/SSL
handshake is immediately terminated with a "handshake failure" alert.
This flag must be used together with SSL_VERIFY_PEER.
B<Client mode:> ignored (see BUGS)
=item SSL_VERIFY_CLIENT_ONCE
B<Server mode:> only request a client certificate once during the
connection. Do not ask for a client certificate again during
renegotiation or post-authentication if a certificate was requested
during the initial handshake. This flag must be used together with
SSL_VERIFY_PEER.
B<Client mode:> ignored (see BUGS)
=item SSL_VERIFY_POST_HANDSHAKE
B<Server mode:> the server will not send a client certificate request
during the initial handshake, but will send the request via
SSL_verify_client_post_handshake(). This allows the SSL_CTX or SSL
to be configured for post-handshake peer verification before the
handshake occurs. This flag must be used together with
SSL_VERIFY_PEER. TLSv1.3 only; no effect on pre-TLSv1.3 connections.
B<Client mode:> ignored (see BUGS)
=back
If the B<mode> is SSL_VERIFY_NONE none of the other flags may be set.
The actual verification procedure is performed either using the built-in
verification procedure or using another application provided verification
function set with
L<SSL_CTX_set_cert_verify_callback(3)>.
The following descriptions apply in the case of the built-in procedure. An
application provided procedure also has access to the verify depth information
and the verify_callback() function, but the way this information is used
may be different.
SSL_CTX_set_verify_depth() and SSL_set_verify_depth() set a limit on the
number of certificates between the end-entity and trust-anchor certificates.
Neither the
end-entity nor the trust-anchor certificates count against B<depth>. If the
certificate chain needed to reach a trusted issuer is longer than B<depth+2>,
X509_V_ERR_CERT_CHAIN_TOO_LONG will be issued.
The depth count is "level 0:peer certificate", "level 1: CA certificate",
"level 2: higher level CA certificate", and so on. Setting the maximum
depth to 2 allows the levels 0, 1, 2 and 3 (0 being the end-entity and 3 the
trust-anchor).
The default depth limit is 100,
allowing for the peer certificate, at most 100 intermediate CA certificates and
a final trust anchor certificate.
The B<verify_callback> function is used to control the behaviour when the
SSL_VERIFY_PEER flag is set. It must be supplied by the application and
receives two arguments: B<preverify_ok> indicates, whether the verification of
the certificate in question was passed (preverify_ok=1) or not
(preverify_ok=0). B<x509_ctx> is a pointer to the complete context used
for the certificate chain verification.
The certificate chain is checked starting with the deepest nesting level
(the root CA certificate) and worked upward to the peer's certificate.
At each level signatures and issuer attributes are checked. Whenever
a verification error is found, the error number is stored in B<x509_ctx>
and B<verify_callback> is called with B<preverify_ok>=0. By applying
X509_CTX_store_* functions B<verify_callback> can locate the certificate
in question and perform additional steps (see EXAMPLES). If no error is
found for a certificate, B<verify_callback> is called with B<preverify_ok>=1
before advancing to the next level.
The return value of B<verify_callback> controls the strategy of the further
verification process. If B<verify_callback> returns 0, the verification
process is immediately stopped with "verification failed" state. If
SSL_VERIFY_PEER is set, a verification failure alert is sent to the peer and
the TLS/SSL handshake is terminated. If B<verify_callback> returns 1,
the verification process is continued. If B<verify_callback> always returns
1, the TLS/SSL handshake will not be terminated with respect to verification
failures and the connection will be established. The calling process can
however retrieve the error code of the last verification error using
L<SSL_get_verify_result(3)> or by maintaining its
own error storage managed by B<verify_callback>.
If no B<verify_callback> is specified, the default callback will be used.
Its return value is identical to B<preverify_ok>, so that any verification
failure will lead to a termination of the TLS/SSL handshake with an
alert message, if SSL_VERIFY_PEER is set.
After calling SSL_set_post_handshake_auth(), the client will need to add a
certificate or certificate callback to its configuration before it can
successfully authenticate. This must be called before SSL_connect().
SSL_verify_client_post_handshake() requires that verify flags have been
previously set, and that a client sent the post-handshake authentication
extension. When the client returns a certificate the verify callback will be
invoked. A write operation must take place for the Certificate Request to be
sent to the client, this can be done with SSL_do_handshake() or SSL_write_ex().
Only one certificate request may be outstanding at any time.
When post-handshake authentication occurs, a refreshed NewSessionTicket
message is sent to the client.
=head1 BUGS
In client mode, it is not checked whether the SSL_VERIFY_PEER flag
is set, but whether any flags other than SSL_VERIFY_NONE are set. This can
lead to unexpected behaviour if SSL_VERIFY_PEER and other flags are not used as
required.
=head1 RETURN VALUES
The SSL*_set_verify*() functions do not provide diagnostic information.
The SSL_verify_client_post_handshake() function returns 1 if the request
succeeded, and 0 if the request failed. The error stack can be examined
to determine the failure reason.
=head1 EXAMPLES
The following code sequence realizes an example B<verify_callback> function
that will always continue the TLS/SSL handshake regardless of verification
failure, if wished. The callback realizes a verification depth limit with
more informational output.
All verification errors are printed; information about the certificate chain
is printed on request.
The example is realized for a server that does allow but not require client
certificates.
The example makes use of the ex_data technique to store application data
into/retrieve application data from the SSL structure
(see L<CRYPTO_get_ex_new_index(3)>,
L<SSL_get_ex_data_X509_STORE_CTX_idx(3)>).
...
typedef struct {
int verbose_mode;
int verify_depth;
int always_continue;
} mydata_t;
int mydata_index;
...
static int verify_callback(int preverify_ok, X509_STORE_CTX *ctx)
{
char buf[256];
X509 *err_cert;
int err, depth;
SSL *ssl;
mydata_t *mydata;
err_cert = X509_STORE_CTX_get_current_cert(ctx);
err = X509_STORE_CTX_get_error(ctx);
depth = X509_STORE_CTX_get_error_depth(ctx);
/*
* Retrieve the pointer to the SSL of the connection currently treated
* and the application specific data stored into the SSL object.
*/
ssl = X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx());
mydata = SSL_get_ex_data(ssl, mydata_index);
X509_NAME_oneline(X509_get_subject_name(err_cert), buf, 256);
/*
* Catch a too long certificate chain. The depth limit set using
* SSL_CTX_set_verify_depth() is by purpose set to "limit+1" so
* that whenever the "depth>verify_depth" condition is met, we
* have violated the limit and want to log this error condition.
* We must do it here, because the CHAIN_TOO_LONG error would not
* be found explicitly; only errors introduced by cutting off the
* additional certificates would be logged.
*/
if (depth > mydata->verify_depth) {
preverify_ok = 0;
err = X509_V_ERR_CERT_CHAIN_TOO_LONG;
X509_STORE_CTX_set_error(ctx, err);
}
if (!preverify_ok) {
printf("verify error:num=%d:%s:depth=%d:%s\n", err,
X509_verify_cert_error_string(err), depth, buf);
} else if (mydata->verbose_mode) {
printf("depth=%d:%s\n", depth, buf);
}
/*
* At this point, err contains the last verification error. We can use
* it for something special
*/
if (!preverify_ok && (err == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT)) {
X509_NAME_oneline(X509_get_issuer_name(err_cert), buf, 256);
printf("issuer= %s\n", buf);
}
if (mydata->always_continue)
return 1;
else
return preverify_ok;
}
...
mydata_t mydata;
...
mydata_index = SSL_get_ex_new_index(0, "mydata index", NULL, NULL, NULL);
...
SSL_CTX_set_verify(ctx, SSL_VERIFY_PEER | SSL_VERIFY_CLIENT_ONCE,
verify_callback);
/*
* Let the verify_callback catch the verify_depth error so that we get
* an appropriate error in the logfile.
*/
SSL_CTX_set_verify_depth(verify_depth + 1);
/*
* Set up the SSL specific data into "mydata" and store it into th SSL
* structure.
*/
mydata.verify_depth = verify_depth; ...
SSL_set_ex_data(ssl, mydata_index, &mydata);
...
SSL_accept(ssl); /* check of success left out for clarity */
if (peer = SSL_get_peer_certificate(ssl)) {
if (SSL_get_verify_result(ssl) == X509_V_OK) {
/* The client sent a certificate which verified OK */
}
}
=head1 SEE ALSO
L<ssl(7)>, L<SSL_new(3)>,
L<SSL_CTX_get_verify_mode(3)>,
L<SSL_get_verify_result(3)>,
L<SSL_CTX_load_verify_locations(3)>,
L<SSL_get_peer_certificate(3)>,
L<SSL_CTX_set_cert_verify_callback(3)>,
L<SSL_get_ex_data_X509_STORE_CTX_idx(3)>,
L<SSL_CTX_set_client_cert_cb(3)>,
L<CRYPTO_get_ex_new_index(3)>
=head1 HISTORY
The SSL_VERIFY_POST_HANDSHAKE option, and the SSL_verify_client_post_handshake()
and SSL_set_post_handshake_auth() functions were added in OpenSSL 1.1.1.
=head1 COPYRIGHT
Copyright 2000-2018 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| kipid/blog | nodejs/openssl-master/doc/man3/SSL_CTX_set_verify.pod | Perl | mit | 14,179 |
#!/usr/bin/env perl
use strict;
use warnings;
use Getopt::Long;
use Bio::KBase::PROM::Client;
use Bio::KBase::PROM::Util qw(get_prom_client get_auth_token);
use Bio::KBase::workspace::ScriptHelpers qw(workspace);
my $DESCRIPTION =
"
NAME
prom-create-constraints -- create fba constraints from data
SYNOPSIS
prom-create-constraints [OPTIONS]
DESCRIPTION
With a genome object in the workspace, gene expression data in the
same namespace as the genome, and a regulatory network in the same
namespace as a genome, you can create a set of FBA model constraints
that can be used to predict transcription factor knockouts using the
PROM method (see Chandrasekarana and Price, 2010 PNAS). This method
will return the ID of the new PROM model constraints object if
successful, or an error message if something failed. This method
will exit with zero if the method was successful, or one if something
failed. Run with the verbose option to get a log and status message
of the steps involved in the constraints construction.
-g [GENOME_ID], --genome [GENOME_ID]
indicate the genome object id of a genome in the
workspace to link the constraints to; a genome object
can be created with the kbfba-loadgenome script.
-e [EXP_ID], --expression-data [EXP_ID]
indicate the id of the expression data collection with
which to use
-r [REG_NET_ID], --regulatory-network [REG_NET_ID]
indicate the id of the regulatory network with which
to use
-w [WORKSPACE_ID], --workspace [WORKSPACE_ID]
specify the workspace to use. If left blank, the default
workspace that is configured by the workspace service
scripts is used
-v, --verbose
in addition to the workspace regulatory network ID, which
will be on the last line, status messages are displayed; more
verbose errors are also displayed, which may be useful for
debugging
-h, --help
display this help message, ignore all arguments
EXAMPLES
Create a PROM model constraints object:
> prom-create-constraints -g 'kb|g.20848'
SEE ALSO
prom-load-expression-data
prom-load-regulatory-network
kbfba-loadgenome
AUTHORS
Michael Sneddon (mwsneddon\@lbl.gov)
Matt DeJongh (dejongh\@hope.edu)
Shinnosuke Kondo (shinnosuke.kondo\@hope.edu)
Christopher Henry (chenry\@mcs.anl.gov)
with help from Sriram Chandrasekaran
";
my $help = '';
my $verbose = '';
my $genomeId = '';
my $expressionId = '';
my $regNetworkId = '';
my $ws = workspace(); # defaults to the workspace configured by the workspace service
my $opt = GetOptions (
"help" => \$help,
"verbose" => \$verbose,
"genome=s" => \$genomeId,
"expression-data=s" => \$expressionId,
"regulatory-network=s" => \$regNetworkId,
"workspace=s" => \$ws
);
if($help) {
print $DESCRIPTION;
exit 0;
}
my $n_args = $#ARGV+1;
if($n_args==0) {
if($genomeId) {
if($expressionId) {
if($regNetworkId) {
#create client
my $prom;
eval{ $prom = get_prom_client(); };
if(!$prom) {
print "FAILURE - unable to create prom service client. Is you PROM URL correct? see prom-url.\n";
exit 1;
}
#grab auth info
my $token = get_auth_token();
#make the call
my $status; my $prom_id;
my $create_prom_constraints_parameters = {
genome_object_id => $genomeId,
expression_data_collection_id => $expressionId,
regulatory_network_id => $regNetworkId,
workspace_name => $ws,
token => $token
};
if($verbose) {
($status, $prom_id) = $prom->create_prom_constraints($create_prom_constraints_parameters);
} else {
eval {
($status, $prom_id) = $prom->create_prom_constraints($create_prom_constraints_parameters);
};
if(!$status) {
print "FAILURE - unknown internal server error. Run with --help for usage.\n";
print "This error is often caused if you provided an ID of an expression data or regulatory network\n";
print "data object which is not of the correct type. Check the ids you provided.\n";
exit 1;
}
}
if($verbose) { print $status."\n"; }
if($prom_id ne '') {
print $prom_id."\n";
exit 0;
} else {
print $status."\n";
exit 1;
}
} else {
print "FAILURE - no expression data collection specified. Run with --help for usage.\n";
exit 1;
}
} else {
print "FAILURE - no expression data collection specified. Run with --help for usage.\n";
exit 1;
}
} else {
print "FAILURE - no genome specified. Run with --help for usage.\n";
exit 1;
}
}
print "Bad options / Invalid number of arguments. Run with --help for usage.\n";
exit 1;
| kbase/prom_service | scripts/prom-create-constraints.pl | Perl | mit | 5,787 |
#!/usr/bin/perl -wl
# CommonAccord - bringing the world to agreement
# Written in 2014 by Primavera De Filippi To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty.
# You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
use warnings;
use strict;
my %remote;
my $remote_cnt = 0;
my $path = "./Doc/";
my $orig;
sub parse {
my($file,$root,$part) = @_; my $f;
ref($file) eq "GLOB" ? $f = $file : open $f, $file or die $!;
$orig = $f unless $orig;
my $content = parse_root($f, $root, $part);
if($content) { expand_fields($f, \$content, $part); return($content) }
return;
}
sub parse_root {
my ($f, $field, $oldpart) = @_; my $root;
seek($f, 0, 0);
while(<$f>) {
return $root if ($root) = $_ =~ /^\Q$field\E\s*=\s*(.*?)$/;
}
seek($f, 0, 0);
while(<$f>) {
my($part,$what, $newfield);
# if( (($part, $what) = $_ =~ /^([^=]*)=\[(.+?)\]/) and ($field =~ s/^\Q$part\E//) ) {
if( (($part, $what) = $_ =~ /^([^=]*)=\[(.+?)\]/) and ($field =~ /^\Q$part\E/ )) {
if ( $part && ($field =~ /^\Q$part\E(.+?)$/) ){ $newfield = $1;}
$part = $oldpart . $part if $oldpart;
if($what =~ s/^\?//) {
if(! $remote{$path.$what}) { $remote_cnt++;
`curl '$what' > '$path/tmp$remote_cnt.cmacc'`;
$remote{$path.$what} = "$path/tmp$remote_cnt.cmacc";
}
$root = parse($remote{$path.$what}, $newfield || $field, $part);
}
else {
$root = parse($path.$what, $newfield || $field, $part);
}
return $root if $root;
}
}
return $root;
}
sub expand_fields {
my($f,$field,$part) = @_;
foreach( $$field =~ /\{([^}]+)\}/g ) {
my $ex = $_;
my $ox = $part ? $part . $ex : $ex;
my $value = parse($orig, $ox);
$$field =~ s/\{\Q$ex\E\}/$value/gg if $value;
}
}
my $output = parse($ARGV[0], "Model.Root");
# print $output;
# XXX FIX ME XXX This is horrible - but I'm just dead tired :(
my %seen; my @arr = $output=~/\{([^}]+)\}/g;
@arr = grep { ! $seen{$_}++ } @arr;
# select one:
# Key=
print "$_=\n" foreach @arr;
# Key=Key;
# print "$_=$_\n" foreach @arr;
# To make a new DefinedTerm, with a hyperlink to the definition:
# print "$_=<a href='#Def.$_.Sec' class='definedterm'>$_</a>\n" foreach @arr;
# to mark the place a defined term is defined inline.
# print "$_=\{_" . substr($_, 4, -4) ."\}\n" foreach @arr;
#clean up the temporary files (remote fetching)
`rm $_` for values %remote;
| CommonAccord/Cmacc-Source | vendor/cmacc-app/openedit-parser.pl | Perl | mit | 2,642 |
# This file was created by configpm when Perl was built. Any changes
# made to this file will be lost the next time perl is built.
# for a description of the variables, please have a look at the
# Glossary file, as written in the Porting folder, or use the url:
# http://perl5.git.perl.org/perl.git/blob/HEAD:/Porting/Glossary
package Config;
use strict;
use warnings;
use vars '%Config';
# Skip @Config::EXPORT because it only contains %Config, which we special
# case below as it's not a function. @Config::EXPORT won't change in the
# lifetime of Perl 5.
my %Export_Cache = (myconfig => 1, config_sh => 1, config_vars => 1,
config_re => 1, compile_date => 1, local_patches => 1,
bincompat_options => 1, non_bincompat_options => 1,
header_files => 1);
@Config::EXPORT = qw(%Config);
@Config::EXPORT_OK = keys %Export_Cache;
# Need to stub all the functions to make code such as print Config::config_sh
# keep working
sub bincompat_options;
sub compile_date;
sub config_re;
sub config_sh;
sub config_vars;
sub header_files;
sub local_patches;
sub myconfig;
sub non_bincompat_options;
# Define our own import method to avoid pulling in the full Exporter:
sub import {
shift;
@_ = @Config::EXPORT unless @_;
my @funcs = grep $_ ne '%Config', @_;
my $export_Config = @funcs < @_ ? 1 : 0;
no strict 'refs';
my $callpkg = caller(0);
foreach my $func (@funcs) {
die qq{"$func" is not exported by the Config module\n}
unless $Export_Cache{$func};
*{$callpkg.'::'.$func} = \&{$func};
}
*{"$callpkg\::Config"} = \%Config if $export_Config;
return;
}
die "Perl lib version (5.16.3) doesn't match executable '$0' version ($])"
unless $^V;
$^V eq 5.16.3
or die "Perl lib version (5.16.3) doesn't match executable '$0' version (" .
sprintf("v%vd",$^V) . ")";
sub FETCH {
my($self, $key) = @_;
# check for cached value (which may be undef so we use exists not defined)
return exists $self->{$key} ? $self->{$key} : $self->fetch_string($key);
}
sub TIEHASH {
bless $_[1], $_[0];
}
sub DESTROY { }
sub AUTOLOAD {
require 'Config_heavy.pl';
goto \&launcher unless $Config::AUTOLOAD =~ /launcher$/;
die "&Config::AUTOLOAD failed on $Config::AUTOLOAD";
}
# tie returns the object, so the value returned to require will be true.
tie %Config, 'Config', {
archlibexp => 'G:\\WXweb\\WX_web\\WX_web\\xampp\\perl\\lib',
archname => 'MSWin32-x86-multi-thread',
cc => 'gcc',
d_readlink => undef,
d_symlink => undef,
dlext => 'dll',
dlsrc => 'dl_win32.xs',
dont_use_nlink => undef,
exe_ext => '.exe',
inc_version_list => '',
intsize => '4',
ldlibpthname => '',
libpth => 'G:\\WXweb\\WX_web\\WX_web\\xampp\\c\\lib \\xampp\\c\\i686-w64-mingw32\\lib',
osname => 'MSWin32',
osvers => '4.0',
path_sep => ';',
privlibexp => 'G:\\WXweb\\WX_web\\WX_web\\xampp\\perl\\lib',
scriptdir => 'G:\\WXweb\\WX_web\\WX_web\\xampp\\perl\\bin',
sitearchexp => 'G:\\WXweb\\WX_web\\WX_web\\xampp\\perl\\site\\lib',
sitelibexp => 'G:\\WXweb\\WX_web\\WX_web\\xampp\\perl\\site\\lib',
so => 'dll',
useithreads => 'define',
usevendorprefix => 'define',
version => '5.16.3',
};
eval {
require Portable;
Portable->import('Config');
};
1;
| liuyangning/WX_web | xampp/perl/lib/Config.pm | Perl | mit | 3,303 |
package DeviewSched::ScheduleCrawler::2015;
use 5.010;
use utf8;
use strict;
use warnings;
use DateTime;
use LWP::UserAgent;
use JSON qw/decode_json/;
use Data::Dumper;
use Moose;
with 'DeviewSched::ScheduleCrawler';
sub URL_TIMETABLE () { 'http://deview.kr/2015/timetable' }
sub YEAR () { 2015 }
sub DEFAULT_TZ () { 'Asia/Seoul' }
sub PROGRAM_TYPE_REGISTER () { "REGISTER" }
sub PROGRAM_TYPE_KEYNOTE () { "KEYNOTE" }
sub PROGRAM_TYPE_SESSION () { "SESSION" }
sub PROGRAM_TYPE_BOF () { "BOF" } # BOF는 뭐지?!
sub REBUILD_DATA_SESSION_INFO () {
{
'name' => 'title',
'content' => 'description',
}
}
sub REBUILD_DATA_SPEAKER_INFO () {
{
'belong' => 'organization',
'contact' => 'email',
'profileImageUrl' => 'picture',
'id' => undef
}
}
has '_cache_raw_timetable' => (
is => 'rw',
isa => 'HashRef'
);
has '_cache_category_list' => (
is => 'rw',
isa => 'ArrayRef',
default => sub { [] },
);
has '_cache_schedule_list' => (
is => 'rw',
isa => 'ArrayRef',
default => sub { [] },
);
sub BUILD {
my $self = shift;
eval {
$self->_refresh_cache;
};
if ($@) {
# TODO: 상세한 오류 메시지
die $@;
}
}
sub schedule_list {
my $self = shift;
my @schedule_list = map { $_->{id} } @{ $self->_cache_schedule_list };
return @schedule_list;
}
sub session_detail {
my $self = shift;
my $session_id = shift;
my ($session_info) = grep { $_->{id} == $session_id } @{ $self->_cache_schedule_list };
my $session = $self->_build_session_info(
$session_id,
$self->_rebuild_hash($session_info, REBUILD_DATA_SESSION_INFO)
);
my @speakers = map {
$self->_build_speaker_info(
$session_id,
$self->_rebuild_hash($_, REBUILD_DATA_SPEAKER_INFO)
);
} @{ $session_info->{speakerList} };
return ($session, \@speakers);
}
sub _rebuild_hash {
my $self = shift;
# copy hash contents
my $hashref = { %{ (shift) } };
my $remap_data = shift;
while (my ($key, $newkey) = each %$remap_data) {
if ($hashref->{$key}) {
my $value = delete $hashref->{$key};
$hashref->{$newkey} = $value if defined $newkey;
}
}
return $hashref;
}
sub _refresh_cache {
my $self = shift;
unless ($self->_cache_raw_timetable) {
# 타임테이블이 캐싱되지 않음
$self->_cache_raw_timetable($self->_request_raw_timetable);
$self->_parse_raw_timetable($self->_cache_raw_timetable);
}
}
sub _request_raw_timetable {
my $self = shift;
my $res = $self->_request('get', URL_TIMETABLE);
if ($res->is_success) {
my $timetable = eval { decode_json $res->decoded_content };
die "json_decode failed: $@" if $@;
return $timetable unless $@;
}
}
sub _parse_raw_timetable {
my $self = shift;
my $timetable = shift;
my @day_list = @{ $timetable->{dayList} };
map { $self->_parse_raw_day($_) } @day_list;
}
sub _parse_raw_day {
my $self = shift;
my $day = shift;
my $day_id = $day->{id};
my @program_list = @{ $day->{programList} };
my @schedule_list;
for my $program (@program_list) {
# TODO: 스케쥴 타입 컬럼을 따로 만들어서 세션, 등록, 키노트 등으로 분류해야 할려나..
# 환장하겠군! > <);
#
# 일단은 세션만 가져오도록 합니다
# 시작 / 종료 시간
my ($starts_at, $ends_at) = $self->_parse_raw_program_time($day, $program);
if ($program->{feature} eq PROGRAM_TYPE_SESSION) {
my $track = 1;
my @raw_session_list = @{ $program->{sessionList} };
for my $session (@raw_session_list) {
# 정보 설정
$session->{track} = $track++;
$session->{day} = $day_id;
$session->{starts_at} = $starts_at;
$session->{ends_at} = $ends_at;
push @schedule_list, $session;
}
}
}
push @{ $self->_cache_schedule_list }, @schedule_list;
}
sub _parse_session_num {
# maybe useless
my $self = shift;
my $title = shift;
return $title =~ m/^세션 (\d+)$/;
}
sub _parse_raw_program_time {
my $self = shift;
my ($day, $program) = @_;
my ($date_month, $date_day) = $day->{date} =~ m/^(\d{1,2})\.(\d{1,2})\s+\w+$/;
return map {
my ($hour, $min) = $_ =~ m/(\d{2}):(\d{2})/;
DateTime->new (
year => $self->YEAR,
month => $date_month,
day => $date_day,
hour => $hour,
minute => $min,
time_zone => DEFAULT_TZ
);
} ($program->{startTime}, $program->{endTime});
}
1;
__END__
=encoding utf-8
| GDG-SSU/deviewsched-backend | lib/DeviewSched/ScheduleCrawler/2015.pm | Perl | mit | 5,004 |
package O2Plugin::Shop::Obj::Product::Variant::Option;
use strict;
use base 'O2::Obj::Object';
#-----------------------------------------------------------------------------
sub setPriceModifierExVat {
my ($obj, $priceModifier) = @_;
$obj->_validatePriceModifier($priceModifier);
return $obj->setModelValue('priceModifierExVat', $priceModifier);
}
#-----------------------------------------------------------------------------
sub setPriceModifierIncVat {
my ($obj, $priceModifier) = @_;
$obj->{priceModifierIncVat} = $priceModifier;
}
#-----------------------------------------------------------------------------
sub getPriceModifierIncVat {
my ($obj) = @_;
return $obj->{priceModifierIncVat} if $obj->{priceModifierIncVat};
my ($how, $amount) = $obj->_validatePriceModifier( $obj->getPriceModifierExVat() );
return "$how$amount" if $how eq '*' || $how eq '/';
my $product = $obj->getProduct();
die "Can't convert from priceModifierExVat to priceModifierIncVat without being able to get the vatPercentage from the product.";
$amount *= ( 100+$product->getVatPercentage() ) / 100;
return "$how$amount";
}
#-----------------------------------------------------------------------------
sub _validatePriceModifier {
my ($obj, $priceModifier) = @_;
my ($how, $amount) = $priceModifier =~ m{ \A ( [-+/*] ) (.+) \z }xms;
$amount =~ s{,}{.}xmsg;
die "Price modifier ($priceModifier) not valid" if !$how || $amount ne 1*$amount || ($how eq '/' && $amount == 0);
return ($how, $amount);
}
#-----------------------------------------------------------------------------
sub getModifiedPriceExVat {
my ($obj, $originalPrice) = @_;
return eval "$originalPrice" . $obj->getPriceModifierExVat();
}
#-----------------------------------------------------------------------------
1;
| haakonsk/O2-CMS | plugins/shop/lib/O2Plugin/Shop/Obj/Product/Variant/Option.pm | Perl | mit | 1,819 |
#!/usr/bin/perl
require 'quickies.pl'
$user_information = $MasterPath . "/User Information";
$worlddir = $MasterPath . "/se/Planets/";
if($ENV{'REMOTE_HOST'} =~ /209.156/) {die;}
if($ENV{'REMOTE_HOST'} =~ /209.252/) {die;}
if($ENV{'REMOTE_HOST'} =~ /216.70/) {die;}
&parse_form;
$data{'cname'} =~ tr/ /_/;
if ($data{'cname'} eq "Imperiumz") {print "content-type: text/html\n\n";}
dbmopen(%password, "$user_information/password", 0777) or print "Error 1- $!";
if ($data{'pword1'} eq $password{$data{'cname'}}) {
dbmclose(%password);
dbmopen(%planet, "$user_information/planet", 0777) or print "Error 2- $!";
use File::Find;
$userdir = $MasterPath . "/se/Planets/$planet{$data{'cname'}}/users/$data{'cname'}";
if ($data{'cname'} eq "Imperiumz") {
print "$userdir<BR>";
}
unless (-d "$userdir") {
print "<SCRIPT>alert(\"That country does not exist. Please confirm your country name and try again.\");history.back();</SCRIPT>";
die;
}
if (-f "$userdir/dupe.txt") {
print "<SCRIPT>alert(\"Your nation has been locked down for security reasons. Please contact the GSD team at shattered.empires\@canada.com for details.\");history.back();</SCRIPT>";
die;
}
chdir($MasterPath . '/se/Planets/$planet{$data{"cname"}}/users');
finddepth(\&deltree,"$userdir") or print "Error 3- $!";
rmdir("$userdir");
dbmclose(%planet);
&RemoveInfo;
print "Location: http://www.bluewand.com/seclassic.php\n\n";
die;
} else {
print "Location: http://www.bluewand.com/seclassic2.php\n\n";
die;
}
sub parse_form {
# Get the input
read(STDIN, $buffer, $ENV{'CONTENT_LENGTH'});
# Split the name-value pairs
@pairs = split(/&/, $buffer);
foreach $pair (@pairs) {
($name, $value) = split(/=/, $pair);
$value =~ tr/+/ /;
$value =~ s/%([a-fA-F0-9][a-fA-F0-9])/pack("C", hex($1))/eg;
$value =~ s/<!--(.|\n)*-->//g;
$value =~ s/<([^>]|\n)*>//g;
$data{$name} = $value;
}
}
sub deltree {
$file = "$File::Find::dir/$_";
unlink("$File::Find::dir/$_") or rmdir("$File::Find::dir/$_")
}
sub RemoveInfo
{
#AccessCode
dbmopen(%datain, "$user_information/accesscode", 0777) or print "Code - $!<BR>";
$Code = ($datain{$data{'cname'}});
delete($datain{$data{'cname'}});
dbmclose(%datain);
#Email Address
dbmopen(%datain, "$user_information/emailaddress",0777) or print "Code - $!<BR>";;
$Email = ($datain{$data{'cname'}});
delete($datain{$data{'cname'}});
dbmclose(%datain);
#Password
dbmopen(%password, "$user_information/password",0777) or print "Code - $!<BR>";;
delete($password{$data{'cname'}});
dbmclose(%password);
#Planet
dbmopen(%planet, "$user_information/planet",0777) or print "Code - $!<BR>";;
$Planet = ($planet{$data{'cname'}});
delete($planet{$data{'cname'}});
dbmclose(%planet);
#IP
dbmopen(%ip, "$user_information/ip",0777) or print "Code - $!<BR>";;
delete($ip{$data{'cname'}});
dbmclose(%ip);
#Date
dbmopen(%date, "$user_information/date",0777) or print "Code - $!<BR>";;
delete($date{$data{'cname'}});
dbmclose(%date);
#Httphost
dbmopen(%httphost, "$user_information/httphost",0777) or print "Code - $!<BR>";;
delete($httphost{$data{'cname'}});
dbmclose(%httphost);
}
| cpraught/shattered-empires | countryreset.pl | Perl | mit | 3,168 |
#!/u01/app/oracle/product/11.1.0.7/perl -w
use strict;
use DBI;
$SIG{INT} = \&cleanup;
# WatchLongops.pl
# This program runs continuously to show current Long Operations on
# an Oracle Instance.
# David Mann
# http://ba6.us
# dmann99@gmail.com
die "\nWatchInstanceLongops.pl\n\n" .
"This procedure watches long operations on an instance.\n\n" .
"syntax: perl $0 [SCHEMA_NAME]/[SCHEMA_PASSWORD]@[TNSNAME]\n"
if @ARGV < 0;
# Get command line arguments
my ( $connect_string ) = @ARGV;
# Connect to DB
print "Connecting to DB...\n";
my $dbh = getDBConnection( $connect_string );
### SQL Query that drives this procedure ###
my $sql = qq{ select ROUND(sofar/totalwork*100,2) as pct,
elapsed_seconds,
time_remaining,
v\$session_longops.message
from v\$session_longops
where sofar<>totalwork
order by target, sid
};
### Main Program : Begin ###
my ($percent, $secselap, $secsleft, $message);
my ($count);
my $sth = $dbh->prepare($sql);
while (1) {
writeHeader($connect_string);
# Prepare and execute SELECT
$sth->execute();
# Declare and Bind Columns
$sth->bind_columns(undef, \$percent, \$secselap, \$secsleft, \$message);
# Fetch rows from DB
while( $sth->fetch() ) {
writeOutputBlock($percent, $message, $secselap, $secsleft);
}
sleep(5);
}
print "\ndone\n";
# Close cursor
$sth->finish();
$dbh->disconnect() or warn "DB disconnection failed: $DBI::errstrn";
close(OUT);
### Main Program : End ###
# Function: getDBConnection
# Parameters: $connectstring - connection string in format user/password@tnsname
sub getDBConnection {
my ($connectstring) = @_;
my $slashpos = index($connectstring,'/');
my $atpos = index($connectstring,'@');
die "missing /" if ($slashpos == -1);
die "missing @" if ($atpos == -1);
my $username = substr($connectstring, 0, $slashpos);
my $password = substr($connectstring, $slashpos+1, $atpos - $slashpos - 1);
my $tnsname = substr($connectstring, $atpos + 1);
return DBI->connect( "dbi:Oracle:".$tnsname,
$username,
$password,
{
RaiseError => 1,
AutoCommit => 0
}
) || die "Database connection not made: $DBI::errstr";
};
# Function: writeHeader
# Parameters: $connString - connection string in format user/password@tnsname
sub writeHeader {
my ($connectstring) = @_;
my $atpos = index($connectstring,'@');
die "missing @" if ($atpos == -1);
system "cls";
system "clear";
print "Monitoring Instance: " . substr($connectstring, $atpos + 1) . "\n\n";
};
# Function: writeOutputBlock
# Parameters: $percent - percent done (0 - 100)
# $message - Text message from v$session_longops
# $secselap - Seconds elapsed from v$session_longops
# $secsleft - Estimated seconds left from v$session_longops
sub writeOutputBlock {
my ($percent, $message, $secselap, $secsleft) = @_;
print substr($message,0,79) . "\n";
print "Percent : $percent ";
print "Minutes elapsed : " . int($secselap/60) . ":" . sprintf("%02s", $secselap % 60) . " ";
print "Est. Minutes left : " . int($secsleft/60) . ":" . sprintf("%02s", $secsleft % 60) . "\n";
&writeProgressBar($percent,80);
};
# Function: writeProgressBar
# Parameters: $perc - percent done (0 - 100)
# $width - Total width of the Progress Bar
sub writeProgressBar {
my ($perc, $width) = @_;
my $spaces = int($perc / 100 * ($width-2));
print "|";
for ($count = 1; $count <= ($width-2); $count++) {
if ($count < $spaces) {
print "X";
} else {
print "=";
}
};
print "|\n";
};
sub cleanup {die "\nending\n";}
| dmann99/oscripts | longops-perl/longops.pl | Perl | mit | 3,732 |
#!/usr/bin/perl -w
#
use strict;
#
use MongoDB;
#
use Try::Tiny;
use Safe::Isa;
#
if (scalar(@ARGV) == 0)
{
printf "usage: $0 db host [port [user [passwd]]]\n", ;
exit 0;
}
#
my $db_to_drop = undef;
my $host_name = undef;
my $port = undef;
my $user_name = undef;
my $password = undef;
#
die "missing host or db: $!" unless (scalar(@ARGV) >= 2);
#
$db_to_drop = shift @ARGV;
$host_name = shift @ARGV;
#
$port = shift @ARGV if (scalar(@ARGV) > 0);
$user_name = shift @ARGV if (scalar(@ARGV) > 0);
$password = shift @ARGV if (scalar(@ARGV) > 0);
#
my $mongo_uri = undef;
#
if (defined($port))
{
if (defined($user_name))
{
if (defined($password))
{
$mongo_uri = "mongodb://${user_name}:${password}@${host_name}:${port}";
}
else
{
$mongo_uri = "mongodb://${user_name}@${host_name}:${port}";
}
}
else
{
$mongo_uri = "mongodb://${host_name}:${port}";
}
}
else
{
$mongo_uri = "mongodb://${host_name}";
}
#
my $client = undef;
#
$client = MongoDB->connect($mongo_uri);
$client->connect;
#
my @db_names = $client->database_names;
foreach my $db_name (@db_names)
{
if ($db_name eq $db_to_drop)
{
my $db = $client->get_database($db_to_drop);
$db->drop;
last;
}
}
#
$client->disconnect;
#
exit 0;
| ombt/analytics | apex/testbin/mongo/mongo.drop.db.pl | Perl | mit | 1,336 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::server::hp::ilo::xmlapi::mode::components::ctrl;
use strict;
use warnings;
sub load { }
sub check {
my ($self) = @_;
$self->{output}->output_add(long_msg => "Checking controllers");
$self->{components}->{ctrl} = {name => 'ctrl', total => 0, skip => 0};
return if ($self->check_filter(section => 'ctrl'));
return if (!defined($self->{xml_result}->{GET_EMBEDDED_HEALTH_DATA}->{STORAGE}->{CONTROLLER}));
#<STORAGE>
# <CONTROLLER>
# <LABEL VALUE = "Controller on System Board"/>
# <STATUS VALUE = "OK"/>
# <CONTROLLER_STATUS VALUE = "OK"/>
# <SERIAL_NUMBER VALUE = "001438031632F40"/>
# <MODEL VALUE = "HP Smart Array P420i Controller"/>
# <FW_VERSION VALUE = "5.42"/>
# <CACHE_MODULE_STATUS VALUE = "OK"/>
# <CACHE_MODULE_SERIAL_NUM VALUE = "PBKUC0BRH6V822"/>
# <CACHE_MODULE_MEMORY VALUE = "1048576 KB"/>
#
foreach my $result (@{$self->{xml_result}->{GET_EMBEDDED_HEALTH_DATA}->{STORAGE}->{CONTROLLER}}) {
my $instance = $result->{LABEL}->{VALUE};
next if ($self->check_filter(section => 'ctrl', instance => $instance));
next if ($result->{STATUS}->{VALUE} =~ /not installed|n\/a|not present|not applicable/i &&
$self->absent_problem(section => 'ctrl', instance => $instance));
$self->{components}->{ctrl}->{total}++;
$self->{output}->output_add(long_msg => sprintf("controller '%s' status is '%s' [instance = %s]",
$result->{LABEL}->{VALUE}, $result->{STATUS}->{VALUE}, $instance));
my $exit = $self->get_severity(label => 'default', section => 'ctrl', value => $result->{STATUS}->{VALUE});
if (!$self->{output}->is_status(value => $exit, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit,
short_msg => sprintf("Controller '%s' status is '%s'", $result->{LABEL}->{VALUE}, $result->{STATUS}->{VALUE}));
}
}
}
1; | Tpo76/centreon-plugins | hardware/server/hp/ilo/xmlapi/mode/components/ctrl.pm | Perl | apache-2.0 | 2,892 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
package Bio::EnsEMBL::Variation::Pipeline::ProteinFunction::RunSift;
use strict;
use File::Path qw(make_path remove_tree);
use Data::Dumper;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Variation::ProteinFunctionPredictionMatrix qw(@ALL_AAS);
use Bio::EnsEMBL::Variation::Utils::ComparaUtils qw(dump_alignment_for_sift);
use base ('Bio::EnsEMBL::Variation::Pipeline::ProteinFunction::BaseProteinFunction');
my $MEDIAN_CUTOFF = 2.75; # as per README
sub run {
my $self = shift;
my $translation_md5 = $self->required_param('translation_md5');
my $sift_dir = $self->required_param('sift_dir');
my $working_dir = $self->required_param('sift_working');
my $ncbi_dir = $self->required_param('ncbi_dir');
my $blastdb = $self->required_param('blastdb');
if (! -e $blastdb && !glob("$blastdb*")) {
die("Blastdb ($blastdb) does not exist");
}
if (! -d $ncbi_dir) {
die("Ncbi_dir ($ncbi_dir) does not exist");
}
my $dir = substr($translation_md5, 0, 2);
my $output_dir = "$working_dir/$dir/$translation_md5";
my $tarball = 'scratch.tgz';
unless (-d $output_dir) {
my $err;
make_path($output_dir, {error => \$err});
die "make_path failed: ".Dumper($err) if $err && @$err;
}
chdir $output_dir or die "Failed to chdir to $output_dir";
my $fasta_file = "protein.fa";
my $aln_file = "protein.alignedfasta";
my $res_file = "protein.SIFTprediction";
my $subs_file = "subs.txt";
if (-e "$output_dir/$tarball") {
system("tar zxvf $tarball > /dev/null") == 0
or die "Failed to untar $output_dir/$tarball: $!";
}
# set necessary environment variables for sift
$ENV{NCBI} = $ncbi_dir;
$ENV{BLIMPS_DIR} = $sift_dir.'/blimps';
$ENV{SIFT_DIR} = $sift_dir;
$ENV{tmpdir} = $output_dir;
# fetch our protein
my $peptide = $self->get_protein_sequence($translation_md5);
$self->dbc and $self->dbc->disconnect_if_idle();
my $alignment_ok = 1;
unless (-e $aln_file) {
# we need to get the multiple alignment
if ($self->param('use_compara')) {
my $stable_id = $self->get_stable_id_for_md5($translation_md5);
eval {
dump_alignment_for_sift($stable_id, $aln_file);
};
if ($@) {
warn "Failed to get a compara alignment for $stable_id: $@";
$alignment_ok = 0;
}
}
else {
# do the alignment ourselves
# first create a fasta file for the protein sequence
open (FASTA_FILE, ">$fasta_file");
my $pep_copy = $peptide;
$pep_copy =~ s/(.{80})/$1\n/g;
chomp $pep_copy;
print FASTA_FILE ">$translation_md5\n$pep_copy\n";
close FASTA_FILE;
# and run the alignment program
$self->dbc->disconnect_when_inactive(1);
my $cmd = "$sift_dir/bin/ensembl_seqs_chosen_via_median_info.csh $fasta_file $blastdb $MEDIAN_CUTOFF";
my ($exit_code, $stderr, $flat_cmd) = $self->run_system_command($cmd);
#die `env`."\n".$cmd;
# my $exit_code = system($cmd);
$self->dbc->disconnect_when_inactive(0);
if ($exit_code == 0) {
$alignment_ok = 1;
}
else {
# If we just did not find enough hits, that's ok, just skip this
my $error_file = $fasta_file . ".query.globalX.error";
if (-s $error_file) {
open my $error_fh, "<", $error_file or die $!;
my $error_msg = <$error_fh>;
close $error_fh;
chomp $error_msg;
if ($error_msg =~ /Not enough sequences \(only \d\) found by the PSI-BLAST search!|PSI-BLAST found no hits/) {
$self->_insert_error_msg($translation_md5, 'Not enough sequences found by PSI-BLAST search', 'sift');
return;
} else {
die "Alignment for $translation_md5 failed - cmd: $flat_cmd: $stderr";
}
} else {
# the alignment failed for some reason, what to do?
die "Alignment for $translation_md5 failed - cmd: $flat_cmd: $stderr";
}
}
}
}
if ($alignment_ok) {
# work out the sift score for each possible amino acid substitution
unless (-e $subs_file) {
# create our substitution file
my $pos = 0;
open SUBS, ">$subs_file" or die "Failed to open $subs_file: $!";
my @aas = split //, $peptide;
for my $ref (@aas) {
$pos++;
for my $alt (@ALL_AAS) {
unless ($ref eq $alt) {
print SUBS $ref.$pos.$alt."\n";
}
}
}
close SUBS;
}
# and run sift on it
$self->dbc->disconnect_when_inactive(1);
my $cmd = "$sift_dir/bin/info_on_seqs $aln_file $subs_file $res_file";
my ($exit_code, $stderr, $flat_cmd) = $self->run_system_command($cmd);
if ($exit_code != 0) {
# If there was not enough sequences selected, skip it
my $error_file = "protein.alignedfasta.error";
if (-s $error_file) {
open my $error_fh, "<", $error_file or die $!;
my $error_msg = <$error_fh>;
close $error_fh;
chomp $error_msg;
if ($error_msg =~ /\d sequence\(s\) were chosen\. Less than the minimum number of sequences required/) {
$self->_insert_error_msg($translation_md5, 'Less than the minimum number of sequences required', 'sift');
return;
} else {
die("Failed to run for $translation_md5 with $flat_cmd: error $exit_code = [$stderr]\n");
}
} else {
# Otherwise die with a reason
die("Failed to run for $translation_md5 with $flat_cmd: error $exit_code = [$stderr]\n");
}
}
$self->dbc->disconnect_when_inactive(0);
# parse and store the results
open (RESULTS, "<$res_file") or die "Failed to open $res_file: $!";
# parse the results file
my $pred_matrix = Bio::EnsEMBL::Variation::ProteinFunctionPredictionMatrix->new(
-analysis => 'sift',
-peptide_length => length($peptide),
-translation_md5 => $translation_md5,
);
my %evidence_stored;
my $results_available = 0;
while (<RESULTS>) {
chomp;
next if /WARNING/;
next if /NOT SCORED/;
my ($subst, $prediction, $score, $median_cons, $num_seqs, $blocks) = split;
my ($ref_aa, $pos, $alt_aa) = $subst =~ /([A-Z])(\d+)([A-Z])/;
next unless $ref_aa && $alt_aa && defined $pos;
$results_available = 1;
my $low_quality = 0;
$low_quality = 1 if $median_cons > 3.25 || $num_seqs < 10;
$pred_matrix->add_prediction(
$pos,
$alt_aa,
$prediction,
$score,
$low_quality
);
unless ($evidence_stored{$pos} ==1) {
## add attribs by position
$pred_matrix->add_evidence( 'sequence_number', $pos, $num_seqs );
$pred_matrix->add_evidence( 'conservation_score', $pos, $median_cons );
$evidence_stored{$pos} = 1;
}
}
if ($results_available == 1 ){
# avoid entering null matrices
my $var_dba = $self->get_species_adaptor('variation');
my $pfpma = $var_dba->get_ProteinFunctionPredictionMatrixAdaptor
or die "Failed to get matrix adaptor";
$pfpma->store($pred_matrix);
$var_dba->dbc and $var_dba->dbc->disconnect_if_idle();
$self->dbc and $self->dbc->disconnect_if_idle();
}
}
# tar up the files
my ($exit_code, $stderr, $flat_cmd) = $self->run_system_command(
"tar --remove-files --exclude *.tgz -czvf $tarball * > /dev/null"
);
die "Failed to create $output_dir/$tarball: $stderr" unless $exit_code == 0;
}
sub _insert_error_msg {
my $self = shift;
my ($translation_md5, $error, $analysis) = @_;
my $sql = "INSERT INTO failure_reason (translation_md5,error_msg,analysis) VALUES (?,?,?)";
my $sth = $self->data_dbc->prepare($sql);
$sth->execute($translation_md5, $error, $analysis);
$sth->finish();
}
| Ensembl/ensembl-variation | modules/Bio/EnsEMBL/Variation/Pipeline/ProteinFunction/RunSift.pm | Perl | apache-2.0 | 8,881 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Component::Gene::RegulationImage;
use strict;
use warnings;
no warnings "uninitialized";
use base qw(EnsEMBL::Web::Component::Gene);
sub _init {
my $self = shift;
$self->cacheable( 0 );
$self->ajaxable( 1 );
$self->has_image(1);
}
sub content {
my $self = shift;
my $object = $self->object || $self->hub->core_object('gene');
my $extended_slice = $object->get_extended_reg_region_slice;
my $wuc = $object->get_imageconfig( 'generegview' );
$wuc->set_parameters({
'container_width' => $extended_slice->length,
'image_width', => $self->image_width || 800,
});
## Turn gene display on....
my $key = $wuc->get_track_key( 'transcript', $object );
$wuc->modify_configs( [$key], {qw(display collapsed_label)} );
if ( $self->hub->species_defs->databases->{'DATABASE_FUNCGEN'} ) {
$wuc->{'data_by_cell_line'} = $self->new_object('Slice', $extended_slice, $object->__data)->get_cell_line_data($wuc) if keys %{$self->hub->species_defs->databases->{'DATABASE_FUNCGEN'}{'tables'}{'cell_type'}{'ids'}};
}
my $image = $self->new_image( $extended_slice, $wuc, [] );
$image->imagemap = 'yes';
$image->set_button( 'drag', 'title' => 'Drag to select region' );
return if $self->_export_image( $image );
return $image->render;
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Component/Gene/RegulationImage.pm | Perl | apache-2.0 | 2,026 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V8::Enums::ValueRuleSetDimensionEnum;
use strict;
use warnings;
use Const::Exporter enums => [
UNSPECIFIED => "UNSPECIFIED",
UNKNOWN => "UNKNOWN",
GEO_LOCATION => "GEO_LOCATION",
DEVICE => "DEVICE",
AUDIENCE => "AUDIENCE"
];
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V8/Enums/ValueRuleSetDimensionEnum.pm | Perl | apache-2.0 | 865 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2020] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 NAME
XrefParser::EntrezGeneParser
=head1 DESCRIPTION
This parser will read and create dependent xrefs from a simple
comma-delimited file downloaded from the EntrezGene database.
=head1 SYNOPSIS
my $parser = XrefParser::EntrezGeneParser->new($db->dbh);
$parser->run({
source_id => 11,
species_id => 9606,
files => [ "gene_info.gz" ],
});
=cut
package XrefParser::EntrezGeneParser;
use strict;
use warnings;
use Carp;
use Text::CSV;
use parent qw( XrefParser::BaseParser );
my $EXPECTED_NUMBER_OF_COLUMNS = 16;
=head2 run
Arg [1] : HashRef standard list of arguments from ParseSource
Description: Add dependent xrefs from EntrezGene to the xref database
Return type: Int; 0 upon success
Exceptions : throws on all processing errors
Caller : ParseSource in the xref pipeline
=cut
sub run {
my ( $self, $ref_arg ) = @_;
my $source_id = $ref_arg->{source_id};
my $species_id = $ref_arg->{species_id};
my $species_name = $ref_arg->{species};
my $files = $ref_arg->{files};
my $verbose = $ref_arg->{verbose} // 0;
my $dbi = $ref_arg->{dbi} // $self->dbi;
if ( ( !defined $source_id ) or
( !defined $species_id ) or
( !defined $files ) )
{
confess 'Need to pass source_id, species_id and files';
}
my $file = @{$files}[0];
my $wiki_source_id =
$self->get_source_id_for_source_name( 'WikiGene', undef, $dbi );
my $eg_io = $self->get_filehandle($file);
if ( !defined $eg_io ) {
confess "Could not open $file";
}
my $input_file = Text::CSV->new({
sep_char => "\t",
empty_is_undef => 1,
allow_loose_quotes => 1
})
|| confess "Cannot use file $file: " . Text::CSV->error_diag();
# process header
if ( ! is_file_header_valid( $input_file->header( $eg_io ) ) ) {
confess "Malformed or unexpected header in EntrezGene file '${file}'";
}
my $xref_count = 0;
my $syn_count = 0;
my %seen; # record already processed xrefs
# read data and load xrefs
RECORD:
while ( my $data = $input_file->getline($eg_io) ) {
my ( $tax_id, $acc, $symbol, undef, $synonyms, undef, undef, undef, $desc ) = @{ $data };
# species_id corresponds to the species taxonomy id, see:
# https://github.com/Ensembl/ensembl-xref/pull/31#issuecomment-445838474
if ( $tax_id ne $species_id ) {
next RECORD;
}
if ( exists $seen{$acc} ) {
next RECORD;
}
$self->add_xref({
acc => $acc,
label => $symbol,
desc => $desc,
source_id => $source_id,
species_id => $species_id,
dbi => $dbi,
info_type => 'DEPENDENT'
});
$self->add_xref({
acc => $acc,
label => $symbol,
desc => $desc,
source_id => $wiki_source_id,
species_id => $species_id,
dbi => $dbi,
info_type => 'DEPENDENT'
});
$xref_count += 1;
my @syn = split qr{ \| }msx, $synonyms;
foreach my $synonym ( @syn ) {
if ( $synonym ne q{-} ) {
$self->add_to_syn( $acc, $source_id, $synonym, $species_id, $dbi );
$syn_count += 1;
}
}
$seen{$acc} = 1;
} ## end while ( my $data = $input_file...)
$input_file->eof ||
confess "Error parsing file $file, should be EOF: " . $input_file->error_diag();
$eg_io->close();
if ( $verbose ) {
print $xref_count . " EntrezGene Xrefs added with $syn_count synonyms\n";
}
return 0;
} ## end sub run
=head2 is_file_header_valid
Arg [1..N] : list of column names provided by Text::CSV::getline()
Example : if ( ! is_file_header_valid( $csv->getline( $fh ) ) {
confess 'Bad header';
}
Description: Verifies if the header of a EntrezGene file follows expected
syntax.
Return type: boolean
Exceptions : none
Caller : internal
Status : Stable
=cut
sub is_file_header_valid {
my ( @header ) = @_;
# Don't bother with parsing column names if their number does not
# match to begin with
if ( scalar @header != $EXPECTED_NUMBER_OF_COLUMNS ) {
return 0;
}
my @field_patterns
= (
qr{ \A [#]? \s* tax_id }msx,
qr{ geneid }msx,
qr{ symbol }msx,
qr{ locustag }msx,
qr{ synonyms }msx,
qr{ dbxrefs }msx,
qr{ chromosome }msx,
qr{ map_location }msx,
qr{ description }msx,
qr{ type_of_gene }msx,
qr{ symbol_from_nomenclature_authority }msx,
qr{ full_name_from_nomenclature_authority }msx,
qr{ nomenclature_status }msx,
qr{ other_designations }msx,
qr{ modification_date }msx,
qr{ feature_type }msx,
);
my $header_field;
foreach my $pattern (@field_patterns) {
$header_field = shift @header;
# Make sure we run the regex match in scalar context
return 0 unless scalar ( $header_field =~ m{ $pattern }msx );
}
# If we have made it this far, all should be in order
return 1;
}
1;
| james-monkeyshines/ensembl | misc-scripts/xref_mapping/XrefParser/EntrezGeneParser.pm | Perl | apache-2.0 | 5,728 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package cloud::aws::ec2::mode::status;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
my %map_type = (
'instance' => 'InstanceId',
'asg' => 'AutoScalingGroupName'
);
my %map_status = (
0 => 'passed',
1 => 'failed'
);
sub prefix_metric_output {
my ($self, %options) = @_;
return ucfirst($options{instance_value}->{type}) . " '" . $options{instance_value}->{display} . "' ";
}
sub custom_status_threshold {
my ($self, %options) = @_;
my $status = 'ok';
if (defined($self->{instance_mode}->{option_results}->{critical_status}) && $self->{instance_mode}->{option_results}->{critical_status} ne '' &&
$self->eval(value => $self->{instance_mode}->{option_results}->{critical_status})) {
$status = 'critical';
} elsif (defined($self->{instance_mode}->{option_results}->{warning_status}) && $self->{instance_mode}->{option_results}->{warning_status} ne '' &&
$self->eval(value => $self->{instance_mode}->{option_results}->{warning_status})) {
$status = 'warning';
}
return $status;
}
sub custom_status_output {
my ($self, %options) = @_;
return $self->{result_values}->{metric} . ": " . $self->{result_values}->{status};
}
sub custom_status_calc {
my ($self, %options) = @_;
$self->{result_values}->{status} = $map_status{$options{new_datas}->{$self->{instance} . '_' . $options{extra_options}->{metric}}};
$self->{result_values}->{metric} = $options{extra_options}->{metric};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'metric', type => 1, cb_prefix_output => 'prefix_metric_output', message_multiple => "All status metrics are ok", skipped_code => { -10 => 1 } },
];
foreach my $metric ('StatusCheckFailed_Instance', 'StatusCheckFailed_System') {
my $entry = {
label => lc($metric), threshold => 0, set => {
key_values => [ { name => $metric }, { name => 'display' } ],
closure_custom_calc => $self->can('custom_status_calc'),
closure_custom_calc_extra_options => { metric => $metric },
closure_custom_output => $self->can('custom_status_output'),
closure_custom_perfdata => sub { return 0; },
closure_custom_threshold_check => $self->can('custom_status_threshold')
}
};
push @{$self->{maps_counters}->{metric}}, $entry;
}
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
'type:s' => { name => 'type' },
'name:s@' => { name => 'name' },
'warning-status:s' => { name => 'warning_status', default => '' },
'critical-status:s' => { name => 'critical_status', default => '%{status} =~ /failed/i' }
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::check_options(%options);
if (!defined($self->{option_results}->{type}) || $self->{option_results}->{type} eq '') {
$self->{output}->add_option_msg(short_msg => "Need to specify --type option.");
$self->{output}->option_exit();
}
if ($self->{option_results}->{type} ne 'asg' && $self->{option_results}->{type} ne 'instance') {
$self->{output}->add_option_msg(short_msg => "Instance type '" . $self->{option_results}->{type} . "' is not handled for this mode");
$self->{output}->option_exit();
}
if (!defined($self->{option_results}->{name}) || $self->{option_results}->{name} eq '') {
$self->{output}->add_option_msg(short_msg => "Need to specify --name option.");
$self->{output}->option_exit();
}
foreach my $instance (@{$self->{option_results}->{name}}) {
if ($instance ne '') {
push @{$self->{aws_instance}}, $instance;
}
}
$self->{aws_timeframe} = defined($self->{option_results}->{timeframe}) ? $self->{option_results}->{timeframe} : 90;
$self->{aws_period} = defined($self->{option_results}->{period}) ? $self->{option_results}->{period} : 60;
$self->{aws_statistics} = ['Average'];
foreach my $metric ('StatusCheckFailed_Instance', 'StatusCheckFailed_System') {
push @{$self->{aws_metrics}}, $metric;
}
$self->change_macros(macros => ['warning_status', 'critical_status']);
}
sub manage_selection {
my ($self, %options) = @_;
my %metric_results;
foreach my $instance (@{$self->{aws_instance}}) {
$metric_results{$instance} = $options{custom}->cloudwatch_get_metrics(
namespace => 'AWS/EC2',
dimensions => [ { Name => $map_type{$self->{option_results}->{type}}, Value => $instance } ],
metrics => $self->{aws_metrics},
statistics => $self->{aws_statistics},
timeframe => $self->{aws_timeframe},
period => $self->{aws_period},
);
foreach my $metric (keys %{$metric_results{$instance}}) {
next if (!defined($metric_results{$instance}->{$metric}->{average}));
$self->{metric}->{$instance}->{display} = $instance;
$self->{metric}->{$instance}->{type} = $self->{option_results}->{type};
$self->{metric}->{$instance}->{$metric} = $metric_results{$instance}->{$metric}->{average};
}
}
if (scalar(keys %{$self->{metric}}) <= 0) {
$self->{output}->add_option_msg(short_msg => 'No metrics detected.');
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check EC2 instances status metrics.
Example:
perl centreon_plugins.pl --plugin=cloud::aws::ec2::plugin --custommode=paws --mode=status --region='eu-west-1'
--type='asg' --name='centreon-middleware' --verbose
See 'https://docs.aws.amazon.com/AmazonCloudWatch/latest/monitoring/ec2-metricscollected.html' for more informations.
Default statistic: 'average' / Only valid statistic: 'average'.
=over 8
=item B<--type>
Set the instance type (Required) (Can be: 'asg', 'instance').
=item B<--name>
Set the instance name (Required) (Can be multiple).
=item B<--warning-status>
Set warning threshold for status.
Can used special variables like: %{status}.
'status' can be: 'passed', 'failed'.
=item B<--critical-status>
Set critical threshold for status (Default: '%{status} =~ /failed/i').
Can used special variables like: %{status}.
'status' can be: 'passed', 'failed'.
=back
=cut
| centreon/centreon-plugins | cloud/aws/ec2/mode/status.pm | Perl | apache-2.0 | 7,334 |
#!/usr/bin/perl -w
#
# Copyright 2019, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This example creates a hotel campaign, a hotel ad group and hotel ad
# group ad.
#
# Prerequisite: You need to have access to the Hotel Ads Center, which can be
# granted during integration with Google Hotels. The integration instructions
# can be found at:
# https://support.google.com/hotelprices/answer/6101897
use strict;
use warnings;
use utf8;
use FindBin qw($Bin);
use lib "$Bin/../../lib";
use Google::Ads::GoogleAds::Client;
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
use Google::Ads::GoogleAds::V10::Resources::CampaignBudget;
use Google::Ads::GoogleAds::V10::Resources::Campaign;
use Google::Ads::GoogleAds::V10::Resources::HotelSettingInfo;
use Google::Ads::GoogleAds::V10::Resources::NetworkSettings;
use Google::Ads::GoogleAds::V10::Resources::AdGroup;
use Google::Ads::GoogleAds::V10::Resources::AdGroupAd;
use Google::Ads::GoogleAds::V10::Resources::Ad;
use Google::Ads::GoogleAds::V10::Common::PercentCpc;
use Google::Ads::GoogleAds::V10::Common::HotelAdInfo;
use Google::Ads::GoogleAds::V10::Enums::BudgetDeliveryMethodEnum qw(STANDARD);
use Google::Ads::GoogleAds::V10::Enums::AdvertisingChannelTypeEnum qw(HOTEL);
use Google::Ads::GoogleAds::V10::Enums::AdGroupTypeEnum qw(HOTEL_ADS);
use Google::Ads::GoogleAds::V10::Enums::CampaignStatusEnum;
use Google::Ads::GoogleAds::V10::Enums::AdGroupStatusEnum;
use Google::Ads::GoogleAds::V10::Enums::AdGroupAdStatusEnum;
use
Google::Ads::GoogleAds::V10::Services::CampaignBudgetService::CampaignBudgetOperation;
use Google::Ads::GoogleAds::V10::Services::CampaignService::CampaignOperation;
use Google::Ads::GoogleAds::V10::Services::AdGroupService::AdGroupOperation;
use Google::Ads::GoogleAds::V10::Services::AdGroupAdService::AdGroupAdOperation;
use Getopt::Long qw(:config auto_help);
use Pod::Usage;
use Cwd qw(abs_path);
use Data::Uniqid qw(uniqid);
# The following parameter(s) should be provided to run the example. You can
# either specify these by changing the INSERT_XXX_ID_HERE values below, or on
# the command line.
#
# Parameters passed on the command line will override any parameters set in
# code.
#
# Running the example with -h will print the command line usage.
my $customer_id = "INSERT_CUSTOMER_ID_HERE";
# Specify your Hotels Ads Center account ID below. You can see how to find the
# account ID in the Hotel Ads Center at:
# https://support.google.com/hotelprices/answer/6399770.
# This ID is the same account ID that you use in API requests to the Travel
# Partner APIs at:
# https://developers.google.com/hotels/hotel-ads/api-reference/.
my $hotel_center_account_id = "INSERT_HOTEL_CENTER_ACCOUNT_ID_HERE";
# Specify maximum bid limit that can be set when creating a campaign using the
# Percent CPC bidding strategy.
my $cpc_bid_ceiling_micro_amount = 20000000;
sub add_hotel_ad {
my ($api_client, $customer_id, $hotel_center_account_id,
$cpc_bid_ceiling_micro_amount)
= @_;
# Create a budget to be used by the campaign that will be created below.
my $budget_resource_name = add_campaign_budget($api_client, $customer_id);
# Create a hotel campaign.
my $campaign_resource_name =
add_hotel_campaign($api_client, $customer_id,
$budget_resource_name, $hotel_center_account_id,
$cpc_bid_ceiling_micro_amount);
# Create a hotel ad group.
my $ad_group_resource_name =
add_hotel_ad_group($api_client, $customer_id, $campaign_resource_name);
# Create a hotel ad group ad.
add_hotel_ad_group_ad($api_client, $customer_id, $ad_group_resource_name);
return 1;
}
# Creates a new campaign budget in the specified client account.
sub add_campaign_budget {
my ($api_client, $customer_id) = @_;
# Create a campaign budget.
my $campaign_budget =
Google::Ads::GoogleAds::V10::Resources::CampaignBudget->new({
name => "Interplanetary Cruise Budget #" . uniqid(),
deliveryMethod => STANDARD,
# Set the amount of budget.
amountMicros => 5000000,
# Make the budget explicitly shared.
explicitlyShared => "true"
});
# Create a campaign budget operation.
my $campaign_budget_operation =
Google::Ads::GoogleAds::V10::Services::CampaignBudgetService::CampaignBudgetOperation
->new({create => $campaign_budget});
# Add the campaign budget.
my $campaign_budget_resource_name =
$api_client->CampaignBudgetService()->mutate({
customerId => $customer_id,
operations => [$campaign_budget_operation]})->{results}[0]{resourceName};
printf "Added a budget with resource name: '%s'.\n",
$campaign_budget_resource_name;
return $campaign_budget_resource_name;
}
# Creates a new hotel campaign in the specified client account.
# [START add_hotel_ad]
sub add_hotel_campaign {
my ($api_client, $customer_id, $budget_resource_name,
$hotel_center_account_id, $cpc_bid_ceiling_micro_amount)
= @_;
# [START add_hotel_ad_1]
# Create a hotel campaign.
my $campaign = Google::Ads::GoogleAds::V10::Resources::Campaign->new({
name => "Interplanetary Cruise Campaign #" . uniqid(),
# Configure settings related to hotel campaigns including advertising
# channel type and hotel setting info.
advertisingChannelType => HOTEL,
hotelSetting =>
Google::Ads::GoogleAds::V10::Resources::HotelSettingInfo->new({
hotelCenterId => $hotel_center_account_id
}
),
# Recommendation: Set the campaign to PAUSED when creating it to prevent
# the ads from immediately serving. Set to ENABLED once you've added
# targeting and the ads are ready to serve.
status => Google::Ads::GoogleAds::V10::Enums::CampaignStatusEnum::PAUSED,
# Set the bidding strategy to PercentCpc. Only Manual CPC and Percent CPC
# can be used for hotel campaigns.
percentCpc => Google::Ads::GoogleAds::V10::Common::PercentCpc->new(
{cpcBidCeilingMicros => $cpc_bid_ceiling_micro_amount}
),
# Set the budget.
campaignBudget => $budget_resource_name,
# Configure the campaign network options. Only Google Search is allowed for
# hotel campaigns.
networkSettings =>
Google::Ads::GoogleAds::V10::Resources::NetworkSettings->new({
targetGoogleSearch => "true"
})});
# [END add_hotel_ad_1]
# Create a campaign operation.
my $campaign_operation =
Google::Ads::GoogleAds::V10::Services::CampaignService::CampaignOperation->
new({create => $campaign});
# Add the campaign.
my $campaign_resource_name = $api_client->CampaignService()->mutate({
customerId => $customer_id,
operations => [$campaign_operation]})->{results}[0]{resourceName};
printf "Added a hotel campaign with resource name: '%s'.\n",
$campaign_resource_name;
return $campaign_resource_name;
}
# [END add_hotel_ad]
# Creates a new hotel ad group in the specified campaign.
# [START add_hotel_ad_2]
sub add_hotel_ad_group {
my ($api_client, $customer_id, $campaign_resource_name) = @_;
# Create an ad group.
my $ad_group = Google::Ads::GoogleAds::V10::Resources::AdGroup->new({
name => "Earth to Mars Cruise #" . uniqid(),
# Set the campaign.
campaign => $campaign_resource_name,
# Set the ad group type to HOTEL_ADS.
# This cannot be set to other types.
type => HOTEL_ADS,
cpcBidMicros => 1000000,
status => Google::Ads::GoogleAds::V10::Enums::AdGroupStatusEnum::ENABLED
});
# Create an ad group operation.
my $ad_group_operation =
Google::Ads::GoogleAds::V10::Services::AdGroupService::AdGroupOperation->
new({create => $ad_group});
# Add the ad group.
my $ad_group_resource_name = $api_client->AdGroupService()->mutate({
customerId => $customer_id,
operations => [$ad_group_operation]})->{results}[0]{resourceName};
printf "Added a hotel ad group with resource name: '%s'.\n",
$ad_group_resource_name;
return $ad_group_resource_name;
}
# [END add_hotel_ad_2]
# Creates a new hotel ad group ad in the specified ad group.
# [START add_hotel_ad_3]
sub add_hotel_ad_group_ad {
my ($api_client, $customer_id, $ad_group_resource_name) = @_;
# Create an ad group ad and set a hotel ad to it.
my $ad_group_ad = Google::Ads::GoogleAds::V10::Resources::AdGroupAd->new({
# Set the ad group.
adGroup => $ad_group_resource_name,
# Set the ad to a new shopping product ad.
ad => Google::Ads::GoogleAds::V10::Resources::Ad->new({
hotelAd => Google::Ads::GoogleAds::V10::Common::HotelAdInfo->new()}
),
status => Google::Ads::GoogleAds::V10::Enums::AdGroupAdStatusEnum::ENABLED
});
# Create an ad group ad operation.
my $ad_group_ad_operation =
Google::Ads::GoogleAds::V10::Services::AdGroupAdService::AdGroupAdOperation
->new({create => $ad_group_ad});
# Add the ad group ad.
my $ad_group_ad_resource_name = $api_client->AdGroupAdService()->mutate({
customerId => $customer_id,
operations => [$ad_group_ad_operation]})->{results}[0]{resourceName};
printf "Added a hotel ad group ad with resource name: '%s'.\n",
$ad_group_ad_resource_name;
return $ad_group_ad_resource_name;
}
# [END add_hotel_ad_3]
# Don't run the example if the file is being included.
if (abs_path($0) ne abs_path(__FILE__)) {
return 1;
}
# Get Google Ads Client, credentials will be read from ~/googleads.properties.
my $api_client = Google::Ads::GoogleAds::Client->new();
# By default examples are set to die on any server returned fault.
$api_client->set_die_on_faults(1);
# Parameters passed on the command line will override any parameters set in code.
GetOptions(
"customer_id=s" => \$customer_id,
"hotel_center_account_id=i" => \$hotel_center_account_id,
"cpc_bid_ceiling_micro_amount=i" => \$cpc_bid_ceiling_micro_amount
);
# Print the help message if the parameters are not initialized in the code nor
# in the command line.
pod2usage(2)
if not check_params($customer_id, $hotel_center_account_id,
$cpc_bid_ceiling_micro_amount);
# Call the example.
add_hotel_ad($api_client, $customer_id =~ s/-//gr,
$hotel_center_account_id, $cpc_bid_ceiling_micro_amount);
=pod
=head1 NAME
add_hotel_ad
=head1 DESCRIPTION
This example creates a hotel campaign, a hotel ad group and hotel ad group ad.
Prerequisite: You need to have access to the Hotel Ads Center, which can be granted
during integration with Google Hotels. The integration instructions can be found at:
https://support.google.com/hotelprices/answer/6101897
=head1 SYNOPSIS
add_hotel_ad.pl [options]
-help Show the help message.
-customer_id The Google Ads customer ID.
-hotel_center_account_id The Hotel Ads Center account ID.
-cpc_bid_ceiling_micro_amount [optional] The CPC bid ceiling micro amount.
=cut
| googleads/google-ads-perl | examples/hotel_ads/add_hotel_ad.pl | Perl | apache-2.0 | 11,404 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Component::Help::Contact;
use strict;
use warnings;
use base qw(EnsEMBL::Web::Component::Help);
use URI::Escape qw(uri_unescape);
sub _init {
my $self = shift;
$self->cacheable( 0 );
$self->ajaxable( 0 );
$self->configurable( 0 );
}
sub content {
my $self = shift;
my $hub = $self->hub;
## Where did the user come from?
my ($path, $query) = split('\?', $ENV{'HTTP_REFERER'});
my @A = split('/', $path);
my $source = $A[-1];
my $form = $self->new_form({'id' => 'contact', 'class' => 'compact', 'action' => "/Help/SendEmail", 'method' => 'post', 'enctype'=>'multipart/form-data', 'data-ajax'=>'false'});
my $fieldset = $form->add_fieldset;
if ($hub->param('strong')) {
$fieldset->add_notes(sprintf('Sorry, no pages were found containing the term <strong>%s</strong> (or more than 50% of articles contain this term).
Please <a href="/Help/Search">try again</a> or use the form below to contact HelpDesk:', $hub->param('kw')));
}
$fieldset->add_field([{
'type' => 'String',
'name' => 'name',
'label' => 'Your name',
'value' => uri_unescape($hub->param('name')) || '',
'required' => 1,
},
{
'type' => 'Email',
'name' => 'address',
'label' => 'Your Email',
'value' => uri_unescape($hub->param('address')) || '',
'required' => 1,
},
{
'type' => 'String',
'name' => 'subject',
'label' => 'Subject',
'value' => uri_unescape($hub->param('subject')) || '',
},
{
'type' => 'Text',
'name' => 'message',
'label' => 'Message',
'value' => uri_unescape($hub->param('message')) || '',
'notes' => 'Tip: drag the bottom righthand corner to make this box bigger.',
},
{
'type' => 'File',
'name' => 'attachment',
'label' => 'Include a file or screenshot (optional)',
'value' => '',
}]);
$fieldset->add_hidden({
'name' => 'string',
'value' => uri_unescape($hub->param('string')) || '',
});
$fieldset->add_hidden({
'name' => 'source',
'value' => $source || '',
});
$fieldset->add_button({
'type' => 'Submit',
'name' => 'submit',
'value' => 'Send',
});
$_->set_attribute('data-role', 'none') for @{$fieldset->get_elements_by_tag_name([qw(input select textarea)])};
return $form->render;
}
1;
| Ensembl/ensembl-webcode | modules/EnsEMBL/Web/Component/Help/Contact.pm | Perl | apache-2.0 | 3,135 |
#
# Copyright 2021 Electric Cloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use strict;
use warnings;
no warnings 'redefine';
use XML::Simple;
use Data::Dumper;
use ElectricCommander::Util;
use subs qw(debug);
use Time::HiRes qw(time gettimeofday tv_interval);
my @logs = ();
sub debug($) {
my ($message) = @_;
push @logs, scalar time . ": " . $message;
if ($ENV{EC_SETUP_DEBUG}) {
print scalar time . ": $message\n";
}
}
# External Credential Manageent Update:
# We're retrieving the steps with attached creds from property sheet
use JSON;
my $stepsWithCredentials = getStepsWithCredentials();
# End of External Credential Management Update
my %startApp = (
label => "WebLogic - Start Application",
procedure => "StartApp",
description => "Starts an application",
category => "Application Server"
);
my %stopApp = (
label => "WebLogic - Stop Application",
procedure => "StopApp",
description => "Stops an application",
category => "Application Server"
);
my %checkServerStatus = (
label => "WebLogic - Check Server Status",
procedure => "CheckServerStatus",
description => "Checks the status of the given server URL",
category => "Application Server"
);
my %deployApp = (
label => "WebLogic - Deploy Application",
procedure => "DeployApp",
description => "Deploys or redeploys an application or module using the weblogic",
category => "Application Server"
);
my %runDeployer = (
label => "WebLogic - Run Deployer",
procedure => "RunDeployer",
description => "Runs weblogic.Deployer in a free-mode",
category => "Application Server"
);
my %undeployApp = (
label => "WebLogic - Undeploy Application",
procedure => "UndeployApp",
description => "Stops the deployment unit and removes staged files from target servers",
category => "Application Server"
);
my %runWLST = (
label => "WebLogic - Run WLST",
procedure => "RunWLST",
description => "Runs Jython scripts using weblogic1.WLST",
category => "Application Server"
);
my %startAdminServer = (
label => "WebLogic - Start Admin Server",
procedure => "StartAdminServer",
description => "Starts the WebLogic Admin Server",
category => "Application Server"
);
my %stopAdminServer = (
label => "WebLogic - Stop Admin Server",
procedure => "StopAdminServer",
description => "Stops the WebLogic Admin Server",
category => "Application Server"
);
my %startManagedServer = (
label => "WebLogic - Start Managed Server",
procedure => "StartManagedServer",
description => "Starts a WebLogic Managed Server",
category => "Application Server"
);
my %stopManagedServer = (
label => "WebLogic - Stop Managed Server",
procedure => "StopManagedServer",
description => "Stops a WebLogic Managed Server",
category => "Application Server"
);
my %checkPageStatus = (
label => "WebLogic - Check Page Status",
procedure => "CheckPageStatus",
description => "Checks the status of the given page URL",
category => "Application Server"
);
my %startNodeManager = (
label => "WebLogic - Start Node Manager",
procedure => "StartNodeManager",
description => "Starts the WebLogic Node Manager",
category => "Application Server"
);
my %stopNodeManager = (
label => "WebLogic - Stop Node Manager",
procedure => "StopNodeManager",
description => "Stops the WebLogic Node Manager",
category => "Application Server"
);
my %createDatasource = (
label => "WebLogic - Create Datasource",
procedure => "CreateDatasource",
description => "Creates a Datasource (DEPRECATED)",
category => "Application Server"
);
my %deleteDatasource = (
label => "WebLogic - Delete Datasource",
procedure => "DeleteDatasource",
description => "Deletes a Datasource",
category => "Application Server"
);
my %suspendServer = (
label => "WebLogic - Suspend Server",
procedure => "SuspendServer",
description => "Suspends a server",
category => "Application Server"
);
my %resumeServer = (
label => "WebLogic - Resume Server",
procedure => "ResumeServer",
description => "Resumes a server",
category => "Application Server"
);
my %createUser = (
label => "WebLogic - Create User",
procedure => "CreateUser",
description => "Create new user",
category => "Application Server"
);
my %createGroup = (
label => "WebLogic - Create Group",
procedure => "CreateGroup",
description => "Create new group",
category => "Application Server"
);
my %deleteUser = (
label => "WebLogic - Delete User",
procedure => "DeleteUser",
description => "Delete user",
category => "Application Server"
);
my %deleteGroup = (
label => "WebLogic - Delete Group",
procedure => "DeleteGroup",
description => "Delete Group",
category => "Application Server"
);
my %addUserToGroup = (
label => "WebLogic - Add User To Group",
procedure => "AddUserToGroup",
description => "Add User To Group",
category => "Application Server"
);
my %removeUserFromGroup = (
label => "WebLogic - Remove User From Group",
procedure => "RemoveUserFromGroup",
description => "Remove User From Group",
category => "Application Server"
);
my %changeUserPassword = (
label => "WebLogic - Change User Password",
procedure => "ChangeUserPassword",
description => "Change User Password",
category => "Application Server"
);
my %unlockUserAccount = (
label => "WebLogic - Unlock User Account",
procedure => "UnlockUserAccount",
description => "Unlock User Account",
category => "Application Server"
);
my %updateApp = (
label => "WebLogic - Update Application (DEPRECATED)",
procedure => "UpdateApp",
description => "Update Application",
category => "Application Server"
);
my %createDomain = (
label => "WebLogic - Create Domain",
procedure => "CreateDomain",
description => "Create a new domain from template",
category => "Application Server"
);
my %createTemplate = (
label => "WebLogic - Create Template",
procedure => "CreateTemplate",
description => "Create a domain template from an existing domain",
category => "Application Server"
);
my %createCluster = (
label => "WebLogic - Create Cluster",
procedure => "CreateCluster",
description => "Create a new cluster",
category => "Application Server"
);
my %deleteCluster = (
label => "WebLogic - Delete Cluster",
procedure => "DeleteCluster",
description => "Delete a cluster",
category => "Application Server"
);
my %createManagedServer = (
label => "WebLogic - Create Managed Server",
procedure => "createManagedServer",
description => "Create a new managed server",
category => "Application Server"
);
my %deleteManagedServer = (
label => "WebLogic - Delete Managed Server",
procedure => "deleteManagedServer",
description => "Delete a managed server",
category => "Application Server"
);
my %addServerToCluster = (
label => "WebLogic - Add Server To Cluster",
procedure => "AddServerToCluster",
description => "Add server to cluster",
category => "Application Server"
);
my %configureUserLockoutManager = (
label => "WebLogic - Configure User Lockout Manager",
procedure => "ConfigureUserLockoutManager",
description => "Configure User Lockout Manager",
category => "Application Server"
);
my %startCluster = (
label => "WebLogic - Start Cluster",
procedure => "StartCluster",
description => "StartCluster",
category => "Application Server"
);
my %stopCluster = (
label => "WebLogic - Stop Cluster",
procedure => "StopCluster",
description => "StopCluster",
category => "Application Server"
);
my %updateAppConfig = (
label => "WebLogic - Update Application Config",
procedure => "UpdateAppConfig",
description => "Updates Application Config",
category => "Application Server"
);
my %checkClusterStatus = (
label => "WebLogic - Check Cluster Status",
procedure => "CheckClusterStatus",
description => "Check Cluster Status",
category => "Application Server"
);
my %createOrUpdateJMSModule = (
label => "WebLogic - Create Or Update JMS Module",
procedure => "CreateOrUpdateJMSModule",
description => "Creates or updates JMS module",
category => "Application Server"
);
my %createOrUpdateDatasource = (
label => "WebLogic - Create Or Update Datasource",
procedure => "CreateOrUpdateDatasource",
description => "This procedure creates a new generic JDBC Data Source or updates an existing one based on the update action.",
category => "Application Server"
);
my %createOrUpdateConnectionFactory = (
label => "WebLogic - Create Or Update Connection Factory",
procedure => "CreateOrUpdateConnectionFactory",
description => "Creates or updates Connection Factory",
category => "Application Server"
);
my %deleteConnectionFactory = (
label => "WebLogic - Delete Connection Factory",
procedure => "DeleteConnectionFactory",
description => "Deletes Connection Factory",
category => "Application Server"
);
my %createOrUpdateJMSQueue = (
label => "WebLogic - Create Or Update JMS Queue",
procedure => "CreateOrUpdateJMSQueue",
description => "Creates or updates JMS Queue",
category => "Application Server"
);
my %deleteJMSQueue = (
label => "WebLogic - Delete JMS Queue",
procedure => "DeleteJMSQueue",
description => "Deletes JMS Queue",
category => "Application Server"
);
my %createOrUpdateJMSTopic = (
label => "WebLogic - Create Or Update JMS Topic",
procedure => "CreateOrUpdateJMSTopic",
description => "Creates or updates JMS Topic",
category => "Application Server"
);
my %deleteJMSTopic = (
label => "WebLogic - Delete JMS Topic",
procedure => "DeleteJMSTopic",
description => "Deletes JMS Topic",
category => "Application Server"
);
my %deleteJMSModule = (
label => "WebLogic - Delete JMS Module",
procedure => "DeleteJMSModule",
description => "Deletes JMS module",
category => "Application Server"
);
my %createOrUpdateJMSModuleSubdeployment = (
label => "WebLogic - Create Or Update JMS Module Subdeployment",
procedure => "CreateOrUpdateJMSModuleSubdeployment",
description => "Creates or updates JMS module Subdeployment",
category => "Application Server"
);
my %deleteJMSModuleSubdeployment = (
label => "WebLogic - Delete JMS Module Subdeployment",
procedure => "DeleteJMSModuleSubdeployment",
description => "Deletes JMS module Subdeployment",
category => "Application Server"
);
my %createOrUpdateJMSServer = (
label => "WebLogic - Create Or Update JMS Server",
procedure => "CreateOrUpdateJMSServer",
description => "Creates or updates JMS Server",
category => "Application Server"
);
my %deleteJMSServer = (
label => "WebLogic - Delete JMS Server",
procedure => "DeleteJMSServer",
description => "Deletes JMS Server",
category => "Application Server"
);
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Start App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Stop App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Check Server Status");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Deploy App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Run Deployer");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Undeploy App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Run WLST");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Start Admin Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Stop Admin Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Start Managed Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Stop Managed Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Check Page Status");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Start Node Manager");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/EC-WebLogic - Stop Node Manager");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Start App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Stop App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Check Server Status");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Deploy App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Run Deployer");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Undeploy App");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Run WLST");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Start Admin Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Stop Admin Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Start Managed Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Stop Managed Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Check Page Status");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Start Node Manager");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Stop Node Manager");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Start Application");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Stop Application");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Deploy Application");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Undeploy Application");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Datasource");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete Datasource");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Data Source");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete Data Source");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Suspend Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Resume Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Start Cluster");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Stop Cluster");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Update Application");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Update Application (DEPRECATED)");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Update Application Config");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Check Cluster Status");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or JMS Resource");
# $batch->deleteProperty(
# "/server/ec_customEditors/pickerStep/WebLogic - Create Or Update Datasource");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or Update Connection Factory");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete Connection Factory");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or Update JMS Queue");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete JMS Queue");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or Update JMS Topic");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete JMS Topic");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or Update JMS Module");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete JMS Module");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or Update JMS Module Subdeployment");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete JMS Module Subdeployment");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create Or Update JMS Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Delete JMS Server");
$batch->deleteProperty("/server/ec_customEditors/pickerStep/WebLogic - Create or Update Datasource");
@::createStepPickerSteps = (
\%startApp, \%stopApp,
\%checkServerStatus, \%deployApp,
\%runDeployer, \%undeployApp,
\%runWLST, \%startAdminServer,
\%stopAdminServer, \%startManagedServer,
\%stopManagedServer, \%checkPageStatus,
\%startNodeManager, \%stopNodeManager,
\%createDatasource, \%deleteDatasource,
\%suspendServer, \%resumeServer,
\%createUser, \%createGroup,
\%deleteUser, \%deleteGroup,
\%addUserToGroup, \%removeUserFromGroup,
\%changeUserPassword, \%unlockUserAccount,
\%updateApp, \%createDomain,
\%createTemplate, \%createCluster,
\%addServerToCluster, \%configureUserLockoutManager,
\%deleteCluster, \%createManagedServer,
\%deleteManagedServer, \%startCluster,
\%stopCluster, \%updateAppConfig,
\%checkClusterStatus, \%createOrUpdateConnectionFactory,
\%deleteConnectionFactory, \%createOrUpdateJMSQueue,
\%createOrUpdateJMSTopic, \%deleteJMSTopic,
\%createOrUpdateJMSModule, \%deleteJMSModule,
\%createOrUpdateJMSModuleSubdeployment, \%deleteJMSModuleSubdeployment,
\%createOrUpdateJMSServer, \%deleteJMSServer, \%deleteJMSQueue,
\%createOrUpdateDatasource
);
if ($upgradeAction eq 'upgrade') {
migrateConfigurations($otherPluginName);
migrateProperties($otherPluginName);
debug "Migrated properties";
reattachExternalCredentials($otherPluginName);
}
# Disabling this branch of logic temporary
if (0 && ($upgradeAction eq "upgrade")) {
patch_configs("/plugins/$otherPluginName/project/weblogic_cfgs");
my $query = $commander->newBatch();
my $newcfg = $query->getProperty("/plugins/$pluginName/project/weblogic_cfgs");
my $oldcfgs = $query->getProperty("/plugins/$otherPluginName/project/weblogic_cfgs");
my $creds = $query->getCredentials("\$[/plugins/$otherPluginName]");
local $self->{abortOnError} = 0;
$query->submit();
# if new plugin does not already have cfgs
if ($query->findvalue($newcfg, "code") eq "NoSuchProperty") {
# if old cfg has some cfgs to copy
if ($query->findvalue($oldcfgs, "code") ne "NoSuchProperty") {
$batch->clone({
path => "/plugins/$otherPluginName/project/weblogic_cfgs",
cloneName => "/plugins/$pluginName/project/weblogic_cfgs"
}
);
}
}
# Copy configuration credentials and attach them to the appropriate steps
my $nodes = $query->find($creds);
if ($nodes) {
my @nodes = $nodes->findnodes('credential/credentialName');
for (@nodes) {
my $cred = $_->string_value;
# Clone the credential
$batch->clone({
path => "/plugins/$otherPluginName/project/credentials/$cred",
cloneName => "/plugins/$pluginName/project/credentials/$cred"
}
);
# Make sure the credential has an ACL entry for the new project principal
my $xpath = $commander->getAclEntry(
"user",
"project: $pluginName",
{
projectName => $otherPluginName,
credentialName => $cred
}
);
if ($xpath->findvalue("//code") eq "NoSuchAclEntry") {
$batch->deleteAclEntry(
"user",
"project: $otherPluginName",
{
projectName => $pluginName,
credentialName => $cred
}
);
$batch->createAclEntry(
"user",
"project: $pluginName",
{
projectName => $pluginName,
credentialName => $cred,
readPrivilege => 'allow',
modifyPrivilege => 'allow',
executePrivilege => 'allow',
changePermissionsPrivilege => 'allow'
}
);
} ## end if ($xpath->findvalue(...))
for my $step (@$stepsWithCredentials) {
# Attach the credential to the appropriate steps
$batch->attachCredential(
"\$[/plugins/$pluginName/project]",
$cred, {
procedureName => $step->{procedureName},
stepName => $step->{stepName}
}
);
}
} ## end for (@nodes)
} ## end if ($nodes)
reattachExternalCredentials($otherPluginName);
} ## end if (0 && ($upgradeAction...))
sub patch_configs {
my ($config_path) = @_;
my $configs = '';
eval {
my $res = $commander->getProperty($config_path);
$configs = $res->findvalue('//propertySheetId')->string_value();
};
unless ($configs) {
return;
}
my $cfg_list = undef;
eval {
my $t = $commander->getProperties({propertySheetId => $configs});
my $cfg_data = XMLin($t->{_xml});
$cfg_list = $cfg_data->{response}->{propertySheet}->{property};
if (ref $cfg_list eq 'HASH') {
$cfg_list = [$cfg_list];
}
if (ref $cfg_list ne 'ARRAY') {
$cfg_list = [];
}
};
for my $c (@$cfg_list) {
my $debug_level = undef;
eval {
my $prop = $commander->getProperty($config_path . '/' . $c->{propertyName});
eval {
my $sheet = $commander->getProperties({propertySheetId => $c->{propertySheetId}});
$sheet = XMLin($sheet->{_xml});
for my $p (@{$sheet->{response}->{propertySheet}->{property}}) {
if ($p->{propertyName} eq 'debug_level') {
if (!ref $p->{value} && $p->{value} =~ m/^\d+$/s) {
$debug_level = $p->{value};
}
}
}
};
1;
} or do {
next;
};
defined $debug_level and next;
$debug_level = 1;
$commander->setProperty($config_path . '/' . $c->{propertyName} . '/debug_level' => $debug_level);
} ## end for my $c (@$cfg_list)
return 1;
} ## end sub patch_configs
my $restartFlagName = 'WebLogicServerRestartRequired';
my @proceduresWithPossibleRestart = qw(
AddServerToCluster
AddUserToGroup
ChangeUserPassword
CheckClusterStatus
ConfigureUserLockoutManager
CreateCluster
CreateDataSource
CreateDomain
CreateGroup
CreateManagedServer
CreateOrUpdateConnectionFactory
CreateOrUpdateDatasource
CreateOrUpdateJMSModule
CreateOrUpdateJMSModuleSubdeployment
CreateOrUpdateJMSQueue
CreateOrUpdateJMSServer
CreateOrUpdateJMSTopic
CreateTemplate
CreateUser
DeleteCluster
DeleteConnectionFactory
DeleteDatasource
DeleteGroup
DeleteJMSModule
DeleteJMSModuleSubdeployment
DeleteJMSQueue
DeleteJMSServer
DeleteJMSTopic
DeleteManagedServer
DeleteUser
DeployApp
RemoveUserFromGroup
ResumeServer
StartApp
StartCluster
StopApp
StopCluster
StopNodeManager
SuspendServer
UndeployApp
UnlockUserAccount
UpdateApp
UpdateAppConfig
);
my @formalOutputParameters = map {{formalOutputParameterName => $restartFlagName, procedureName => $_}} @proceduresWithPossibleRestart;
if ($promoteAction eq 'promote') {
reattachExternalConfigurations($otherPluginName);
## Check if agent supports formalOutputParameters API,
if (exists $ElectricCommander::Arguments{getFormalOutputParameters}) {
my $versions = $commander->getVersions();
if (my $version = $versions->findvalue('//version')) {
require ElectricCommander::Util;
ElectricCommander::Util->import('compareMinor');
if (compareMinor($version, '8.3') >= 0) {
checkAndSetOutputParameters(@formalOutputParameters);
}
}
}
}
sub checkAndSetOutputParameters {
my (@parameters) = @_;
# Form flatten unique list of procedureNames
# and get all parameters for defined procedures
my $query = $commander->newBatch();
my %subs = ();
foreach my $param (@parameters) {
my $proc_name = $param->{procedureName};
$subs{$proc_name} = 1;
}
foreach (keys %subs) {
$subs{$_} = $query->getFormalOutputParameters($otherPluginName, {procedureName => $_});
}
$query->submit();
my @params_to_create = ();
foreach my $proc_name (keys %subs) {
my $response_for_params = $query->find($proc_name);
push @params_to_create, checkMissingOutputParameters(\@parameters, $response_for_params);
}
createMissingOutputParameters(@params_to_create);
} ## end sub checkAndSetOutputParameters
sub checkMissingOutputParameters {
my ($parameters, $response) = @_;
my @parameters = @{$parameters};
# This is list of keys to build unique parameter's indices
my @key_parts = ('formalOutputParameterName', 'procedureName');
my @params_to_create = ();
my %parsed_parameters = ();
if ($response) {
my @defined_params = ($response->findnodes('formalOutputParameter'));
if (@defined_params) {
for my $param (@defined_params) {
my $key = join('_', map {$param->find($_)->string_value()} @key_parts);
# Setting a flag parameter that parameter is already created
$parsed_parameters{$key} = 1;
}
}
}
foreach my $param (@parameters) {
my $key = join('_', map {$param->{$_} || ''} @key_parts);
if (!exists $parsed_parameters{$key}) {
push(
@params_to_create, [
$pluginName,
$param->{formalOutputParameterName},
{procedureName => $param->{procedureName}}
]
);
}
}
return @params_to_create;
} ## end sub checkMissingOutputParameters
sub createMissingOutputParameters {
my (@params_to_create) = @_;
my @responses = ();
if (@params_to_create) {
my $create_batch = $commander->newBatch();
push @responses, $create_batch->createFormalOutputParameter(@$_) foreach (@params_to_create);
$create_batch->submit();
}
# print Dumper \@responses
return 1;
}
sub reattachExternalCredentials {
my ($otherPluginName) = @_;
my $configName = getConfigLocation($otherPluginName);
my $configsPath = "/plugins/$otherPluginName/project/$configName";
my $xp = $commander->getProperty($configsPath);
my $id = $xp->findvalue('//propertySheetId')->string_value();
my $props = $commander->getProperties({propertySheetId => $id});
for my $node ($props->findnodes('//property/propertySheetId')) {
my $configPropertySheetId = $node->string_value();
my $config = $commander->getProperties({propertySheetId => $configPropertySheetId});
# iterate through props to get credentials.
for my $configRow ($config->findnodes('//property')) {
my $propName = $configRow->findvalue('propertyName')->string_value();
my $propValue = $configRow->findvalue('value')->string_value();
# print "Name $propName, value: $propValue\n";
if ($propName =~ m/credential$/s && $propValue =~ m|^\/|s) {
for my $step (@$stepsWithCredentials) {
$batch->attachCredential({
projectName => $pluginName,
procedureName => $step->{procedureName},
stepName => $step->{stepName},
credentialName => $propValue,
}
);
# debug "Attached credential to $step->{stepName}";
}
print "Reattaching $propName with val: $propValue\n";
}
}
# exit 0;
} ## end for my $node ($props->findnodes...)
} ## end sub reattachExternalCredentials
sub getConfigLocation {
my ($otherPluginName) = @_;
my $configName = eval {$commander->getProperty("/plugins/$otherPluginName/project/ec_configPropertySheet")->findvalue('//value')->string_value} || 'weblogic_cfgs';
return $configName;
}
sub getStepsWithCredentials {
my $retval = [];
eval {
my $pluginName = '@PLUGIN_NAME@';
my $stepsJson = $commander->getProperty("/projects/$pluginName/procedures/CreateConfiguration/ec_stepsWithAttachedCredentials")->findvalue('//value')
->string_value;
$retval = decode_json($stepsJson);
};
return $retval;
}
sub reattachExternalConfigurations {
my ($otherPluginName) = @_;
my %migrated = ();
# For the configurations that exists while the plugin was deleted
# The api is new so it requires the upgraded version of the agent
eval {
my $cfgs = $commander->getPluginConfigurations({
pluginKey => '@PLUGIN_KEY@',
}
);
my @creds = ();
for my $cfg ($cfgs->findnodes('//pluginConfiguration/credentialMappings/parameterDetail')) {
my $value = $cfg->findvalue('parameterValue')->string_value();
push @creds, $value;
}
for my $cred (@creds) {
next if $migrated{$cred};
for my $stepWithCreds (@$stepsWithCredentials) {
$commander->attachCredential({
projectName => "/plugins/$pluginName/project",
credentialName => $cred,
procedureName => $stepWithCreds->{procedureName},
stepName => $stepWithCreds->{stepName}
}
);
}
$migrated{$cred} = 1;
debug "Migrated $cred";
}
1;
} or do {
debug "getPluginConfiguration API is not supported on the promoting agent, falling back";
for my $stepWithCreds (@$stepsWithCredentials) {
my $step = $commander->getStep({
projectName => "/plugins/$otherPluginName/project",
procedureName => $stepWithCreds->{procedureName},
stepName => $stepWithCreds->{stepName},
}
);
for my $attachedCred ($step->findnodes('//attachedCredentials/credentialName')) {
my $credName = $attachedCred->string_value();
$commander->attachCredential({
projectName => "/plugins/$pluginName/project",
credentialName => $credName,
procedureName => $stepWithCreds->{procedureName},
stepName => $stepWithCreds->{stepName}
}
);
$migrated{$credName} = 1;
debug "Migrated credential $credName to $stepWithCreds->{procedureName}";
}
} ## end for my $stepWithCreds (...)
};
} ## end sub reattachExternalConfigurations
sub migrateConfigurations {
my ($otherPluginName) = @_;
my $configName = getConfigLocation($otherPluginName);
# my $configName = eval {
# $commander->getProperty("/plugins/$otherPluginName/project/ec_configPropertySheet")->findvalue('//value')->string_value
# } || 'ec_plugin_cfgs';
$commander->clone({
path => "/plugins/$otherPluginName/project/$configName",
cloneName => "/plugins/$pluginName/project/$configName"
}
);
my $xpath = $commander->getCredentials("/plugins/$otherPluginName/project");
for my $credential ($xpath->findnodes('//credential')) {
my $credName = $credential->findvalue('credentialName')->string_value;
# If credential name starts with "/", it means that it is a reference.
# We do not need to clone it.
# if ($credName !~ m|^\/|s) {
debug "Migrating old configuration $credName";
$batch->clone({
path => "/plugins/$otherPluginName/project/credentials/$credName",
cloneName => "/plugins/$pluginName/project/credentials/$credName"
}
);
$batch->deleteAclEntry({
principalName => "project: $otherPluginName",
projectName => $pluginName,
credentialName => $credName,
principalType => 'user'
}
);
$batch->deleteAclEntry({
principalType => 'user',
principalName => "project: $pluginName",
credentialName => $credName,
projectName => $pluginName,
}
);
$batch->createAclEntry({
principalType => 'user',
principalName => "project: $pluginName",
projectName => $pluginName,
credentialName => $credName,
objectType => 'credential',
readPrivilege => 'allow',
modifyPrivilege => 'allow',
executePrivilege => 'allow',
changePermissionsPrivilege => 'allow'
}
);
#}
for my $step (@$stepsWithCredentials) {
$batch->attachCredential({
projectName => $pluginName,
procedureName => $step->{procedureName},
stepName => $step->{stepName},
credentialName => $credName,
}
);
debug "Attached credential to $step->{stepName}";
}
} ## end for my $credential ($xpath...)
} ## end sub migrateConfigurations
sub migrateProperties {
my ($otherPluginName) = @_;
my $clonedPropertySheets
= eval {decode_json($commander->getProperty("/plugins/$otherPluginName/project/ec_clonedProperties")->findvalue('//value')->string_value);};
unless ($clonedPropertySheets) {
debug "No properties to migrate";
return;
}
for my $prop (@$clonedPropertySheets) {
$commander->clone({
path => "/plugins/$otherPluginName/project/$prop",
cloneName => "/plugins/$pluginName/project/$prop"
}
);
debug "Cloned $prop"
}
}
| electric-cloud/EC-WebLogic | src/main/resources/project/ec_setup.pl | Perl | apache-2.0 | 37,003 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<helpdesk.org>.
=cut
# Read a variation config module and generate some SQL to populate the attrib,
# attrib_type and attrib_set tables in the variation database
use strict;
use warnings;
use DBI;
use Getopt::Long;
my $config;
my $no_model;
my $host;
my $port;
my $user;
my $pass;
my $db;
my $help;
GetOptions(
"config=s" => \$config,
"no_model" => \$no_model,
"host=s" => \$host,
"port=i" => \$port,
"user=s" => \$user,
"pass=s" => \$pass,
"db=s" => \$db,
"help|h" => \$help,
);
unless ($no_model || ($host && $user && $db)) {
print "Missing required parameter...\n" unless $help;
$help = 1;
}
if ($help) {
print "Usage: $0 --config <module> --host <host> --port <port> --user <user> --pass <pass> --db <database> --no_model --help > attrib_entries.sql\n";
exit(0);
}
# pull in our config module
$config ||= 'Bio::EnsEMBL::Variation::Utils::Config';
eval qq{require $config};
die "Failed to require config module '$config':\n$@" if $@;
# and import the variables we need
our $MAX_ATTRIB_CODE_LENGTH;
our @ATTRIB_TYPES;
our @ATTRIB_SETS;
our %ATTRIBS;
eval {
$config->import(qw(
$MAX_ATTRIB_CODE_LENGTH
@ATTRIB_TYPES
@ATTRIB_SETS
%ATTRIBS
));
};
die "Failed to import required data structures from config module '$config':\n$@" if $@;
# format strings for inserting into our 3 tables
my $attrib_type_fmt =
q{INSERT IGNORE INTO attrib_type (attrib_type_id, code, name, description) VALUES (%d, %s, %s, %s);};
my $attrib_fmt =
q{INSERT IGNORE INTO attrib (attrib_id, attrib_type_id, value) VALUES (%d, %d, '%s');};
my $set_fmt =
q{INSERT IGNORE INTO attrib_set (attrib_set_id, attrib_id) VALUES (%d, %d);};
# these hashes store mappings to our attrib and attrib_type IDs
my %attrib_ids;
my $attrib_type_ids;
# these variables store the current highest used ID for each table
my $last_attrib_type_id = 0;
my $last_attrib_id = 0;
my $last_attrib_set_id = 0;
# these hashes store the existing IDs from the database, if --no_model
# is used then these will just be empty and new IDs will be generated
my $existing_attrib_type;
my $existing_attrib;
my $existing_set;
unless ($no_model) {
# prefetch existing IDs from the database
my $dbh = DBI->connect(
"DBI:mysql:database=$db;host=$host;port=$port",
$user,
$pass,
);
my $get_types_sth = $dbh->prepare(qq{
SELECT code, attrib_type_id FROM attrib_type
});
$get_types_sth->execute;
while (my ($code, $id) = $get_types_sth->fetchrow_array) {
$existing_attrib_type->{$code} = $id;
$last_attrib_type_id = $id if $id > $last_attrib_type_id;
}
my $get_attribs_sth = $dbh->prepare(qq{
SELECT attrib_type_id, value, attrib_id FROM attrib
});
$get_attribs_sth->execute;
while (my ($type_id, $value, $id) = $get_attribs_sth->fetchrow_array) {
$existing_attrib->{$type_id}->{$value} = $id;
$last_attrib_id = $id if $id > $last_attrib_id;
}
my $get_sets_sth = $dbh->prepare(qq{
SELECT attrib_set_id, attrib_id FROM attrib_set
});
$get_sets_sth->execute;
while (my ($set_id, $attrib_id) = $get_sets_sth->fetchrow_array) {
$existing_set->{$set_id}->{$attrib_id} = 1;
$last_attrib_set_id = $set_id if $set_id > $last_attrib_set_id;
}
}
# the following subroutines are used to get the corresponding IDs for
# our tables, they will return existing IDs where possible and generate
# new ones when required
sub get_attrib_type_id {
my ($code) = @_;
warn "$code is > $MAX_ATTRIB_CODE_LENGTH characters, have you changed the schema to match?"
if length($code) > $MAX_ATTRIB_CODE_LENGTH;
my $id = $existing_attrib_type->{$code};
unless (defined $id) {
$id = ++$last_attrib_type_id;
$existing_attrib_type->{$code} = $id
}
return $id;
}
sub get_attrib_id {
my ($type_id, $value) = @_;
my $id = $existing_attrib->{$type_id}->{$value};
unless (defined $id) {
$id = ++$last_attrib_id;
$existing_attrib->{$type_id}->{$value} = $id
}
return $id;
}
sub get_attrib_set_id {
my $new_set = { map {$_ => 1} @_ };
# we need to check if the new set is a sub or super set
# of an existing set, i.e. it is just adding or removing
# members of the set, in which case we can reuse the set id
# otherwise we assign a new id
my $is_subset = sub {
my ($s1, $s2) = @_;
for my $e (keys %$s2) {
return 0 unless $s1->{$e};
}
return 1;
};
for my $set_id (keys %$existing_set) {
my $set = $existing_set->{$set_id};
if ($is_subset->($set, $new_set) || $is_subset->($new_set, $set)) {
return $set_id;
}
}
# assigne a nee set id
$last_attrib_set_id++;
map { $existing_set->{$last_attrib_set_id}->{$_} = 1 } keys %$new_set;
return $last_attrib_set_id;
}
# the SQL string we are building
my $SQL;
# first define the attrib type entries
for my $attrib_type (@ATTRIB_TYPES) {
my $code = delete $attrib_type->{code} or die "code required for attrib_type";
my $name = delete $attrib_type->{name};
my $description = delete $attrib_type->{description};
my $attrib_type_id = get_attrib_type_id($code);
die "Unexpected entries in attrib_type definition: ".(join ',', keys %$attrib_type)
if keys %$attrib_type;
$SQL .= sprintf($attrib_type_fmt,
$attrib_type_id,
"'$code'",
($name ? "'$name'" : "''"),
($description ? "'$description'" : 'NULL'),
)."\n";
$attrib_type_ids->{$code} = $attrib_type_id;
}
# second, take the entries from the ATTRIBS and add them as single-element hashes to the @ATTRIB_SETS array
while (my ($type,$values) = each(%ATTRIBS)) {
map {push(@ATTRIB_SETS,{$type => $_})} @{$values};
}
# third, loop over the ATTRIB_SETS array and add attribs and assign them to attrib_sets as necessary
for my $set (@ATTRIB_SETS) {
# Keep the attrib_ids
my @attr_ids;
# Iterate over the type => value entries in the set
while (my ($type,$value) = each(%{$set})) {
# Lookup the attrib_type
my $attrib_type_id = $attrib_type_ids->{$type} or next;
# insert a new attrib if we haven't seen it before
my $attrib_id = $attrib_ids{$type . "_" . $value};
unless (defined($attrib_id)) {
$attrib_id = get_attrib_id($attrib_type_id, $value);
$SQL .= sprintf($attrib_fmt, $attrib_id, $attrib_type_id, $value)."\n";
$attrib_ids{$type . "_" . $value} = $attrib_id;
}
push(@attr_ids,$attrib_id);
}
# If the set had more than one attribute, group them into a set
if (scalar(@attr_ids) > 1) {
my $attrib_set_id = get_attrib_set_id(@attr_ids);
map {$SQL .= sprintf($set_fmt, $attrib_set_id, $_)."\n"} @attr_ids;
}
}
# print out our SQL
print $SQL . "\n" if $SQL;
| dbolser/ensembl-variation | scripts/misc/create_attrib_sql.pl | Perl | apache-2.0 | 8,002 |
=head1 LICENSE
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2022] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::Analysis::Runnable::ExonerateCloneEnds -
=head1 SYNOPSIS
my $runnable =
Bio::EnsEMBL::Analysis::Runnable::ExonerateCloneEnds->new(
-query_seqs => \@q_seqs,
-query_type => 'dna',
-target_seqs => \@t_seqs,
-options => $options,
);
$runnable->run; #create and fill Bio::Seq object
my @results = $runnable->output;
=head1 DESCRIPTION
This module handles a specific use of the Exonerate (G. Slater) program,
to align clone sequences with genomic sequences.
=head1 METHODS
=cut
package Bio::EnsEMBL::Analysis::Runnable::ExonerateCloneEnds;
use warnings ;
use vars qw(@ISA);
use strict;
use Bio::EnsEMBL::Analysis::Runnable;
use Bio::EnsEMBL::Analysis::Runnable::BaseExonerate;
use Bio::EnsEMBL::DnaDnaAlignFeature;
use Bio::EnsEMBL::Feature;
use Bio::EnsEMBL::FeaturePair;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
@ISA = qw(Bio::EnsEMBL::Analysis::Runnable::BaseExonerate);
sub new {
my ( $class, @args ) = @_;
my $self = $class->SUPER::new(@args);
return $self;
}
#
# Implementation of method in abstract superclass
#
sub parse_results {
my ( $self, $fh ) = @_;
my @features;
while (<$fh>){
next unless /^RESULT:/;
chomp;
my (
$tag, $q_id, $q_start, $q_end, $q_strand,
$t_id, $t_start, $t_end, $t_strand, $score,
$perc_id, $q_length, $t_length, $gene_orientation,
@vulgar_blocks
) = split;
my $cigar_string='';
while (@vulgar_blocks){
throw("Something funny has happened to the input vulgar string." .
" Expecting components in multiples of three, but only have [" .
scalar @vulgar_blocks . "] items left to process.")
unless scalar @vulgar_blocks >= 3;
my $match_type = shift @vulgar_blocks;
my $query_match_length = shift @vulgar_blocks;
my $target_match_length = shift @vulgar_blocks;
if ($match_type eq "G"){
if ($query_match_length == 0){
$match_type="D";
$query_match_length = $target_match_length;
}elsif ($target_match_length == 0){
$match_type="I";
}
}
$cigar_string .= $query_match_length.$match_type;
}
my $feature =
$self->make_feature(
$q_id, $q_start, $q_end, $q_strand,
$t_id, $t_start, $t_end, $t_strand, $score,
$perc_id, $q_length, $cigar_string
);
if($feature){
push @features, $feature;
}else{
warn "Clone end feature from probe :$q_id doesnt match well enough\n";
}
}
return \@features;
}
#
# Create dna align feature objects:
#
sub make_feature{
my ($self, @args) = @_;
my (
$tag, $q_id, $q_start, $q_end, $q_strand,
$t_id, $t_start, $t_end, $t_strand, $score,
$perc_id, $q_length, $cigar_string
) = @_;
if($q_strand eq '+'){
$q_strand = 1;
if($t_strand eq '+'){
$t_strand = 1;
}elsif($t_strand eq '-'){
$t_strand = -1;
}else{
throw "unrecognised target strand symbol: $t_strand\n";
}
}elsif($q_strand eq '-'){
$q_strand = -1;
if($t_strand eq '-'){
$t_strand = -1;
}elsif($t_strand eq '+'){
$t_strand = 1;
}else{
throw "unrecognised target strand symbol: $t_strand\n";
}
}else{
throw "unrecognised query strand symbol: $q_strand\n";
}
# Exonerate reports query start -1 so in some cases we get alignments with hit start 0
# we add 1 to avoid this situation.
$q_start+=1;
$t_start+=1;
# for reverse strand matches, Exonerate reports end => start
if ($q_start > $q_end) {
($q_start, $q_end) = ($q_end, $q_start);
}
if ($t_start > $t_end) {
($t_start, $t_end) = ($t_end, $t_start);
}
my $feature =
new Bio::EnsEMBL::DnaDnaAlignFeature(
-seqname => $t_id,
-start => $t_start,
-end => $t_end,
-strand => $t_strand,
-hseqname => $q_id,
-hstart => $q_start,
-hend => $q_end,
-hstrand => $q_strand,
-score => $score,
-percent_id => $perc_id,
-cigar_string => $cigar_string,
);
return $feature;
}
1;
| Ensembl/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/Runnable/ExonerateCloneEnds.pm | Perl | apache-2.0 | 5,222 |
=head1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package EnsEMBL::Web::Configuration::Account;
use strict;
use warnings;
use previous qw(SECURE_PAGES populate_tree);
sub SECURE_PAGES {
return __PACKAGE__->PREV::SECURE_PAGES, qw(OpenID);
}
sub populate_tree {
my $self = shift;
$self->PREV::populate_tree(@_);
if ($self->hub->user) {
# page modified from openid buttons component to allow a logged in user to select another openid provider as an alternative login option
$self->get_node('Preferences')->append($self->create_node('Details/AddLogin', 'Add Login', [
'edit_details' => 'EnsEMBL::OpenID::Component::Account::Buttons'
], { 'no_menu_entry' => 1 }));
} else {
# modify login form for openid login options
$self->delete_node('Login');
$self->create_node('Login', 'Login', [
'message' => 'EnsEMBL::Users::Component::Account::Message',
'login' => 'EnsEMBL::Users::Component::Account::Login',
'openid' => 'EnsEMBL::OpenID::Component::Account::Buttons'
], { 'availability' => 1 });
# modify registration form for openid login options
$self->delete_node('Register');
$self->create_node('Register', 'Register', [
'message' => 'EnsEMBL::Users::Component::Account::Message',
'register' => 'EnsEMBL::Users::Component::Account::Register',
'openid' => 'EnsEMBL::OpenID::Component::Account::Buttons'
], { 'availability' => 1 });
# page displayed when user logs in to the site for the first time via openid to ask him some extra registration info
$self->create_node('OpenID/Register', '', [
'message' => 'EnsEMBL::Users::Component::Account::Message',
'register' => 'EnsEMBL::OpenID::Component::Account::Register'
], { 'no_menu_entry' => 1 });
# page displayed when user logs in to the site for the first time via openid to ask provide email address if he already has an account on ensembl
$self->create_node('OpenID/LinkExisting', '', [
'message' => 'EnsEMBL::Users::Component::Account::Message',
'register' => 'EnsEMBL::OpenID::Component::Account::LinkExisting'
], { 'no_menu_entry' => 1 });
# page displayed to ask the user to choose a way to authenticate his account when user logs in to the site for the first time via openid to asks email to link existing account
$self->create_node('OpenID/Authenticate', '', [
'message' => 'EnsEMBL::Users::Component::Account::Message',
'register' => 'EnsEMBL::OpenID::Component::Account::Authenticate'
], { 'no_menu_entry' => 1 });
# OpenID related commands - command to make request to openid provider, command to handle response from the provider, command to add a new openid user
$self->create_node( "OpenID/$_", '', [], { 'no_menu_entry' => 1, 'command' => 'EnsEMBL::OpenID::Command::Account::Add' }) for qw(Add Link);
}
# Openid resuest and response commands work both ways - user logged in or not (if your is logged it, its a request to add login)
$self->create_node( "OpenID/$_", '', [], { 'no_menu_entry' => 1, 'command' => "EnsEMBL::OpenID::Command::Account::$_" }) for qw(Request Response);
}
1;
| andrewyatz/public-plugins | openid/modules/EnsEMBL/Web/Configuration/Account.pm | Perl | apache-2.0 | 3,813 |
package VMOMI::ArrayOfAnyURI;
use parent 'VMOMI::ComplexType';
use strict;
use warnings;
our @class_ancestors = ( );
our @class_members = (
['anyURI', undef, 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ArrayOfAnyURI.pm | Perl | apache-2.0 | 393 |
package Mojo::Webqq::Recent::Discuss;
use strict;
use Mojo::Webqq::Base 'Mojo::Webqq::Model::Base';
has [qw(
did
type
)];
sub update{
my $self = shift;
my $hash = shift;
for(keys %$self){
$self->{$_} = $hash->{$_} if exists $hash->{$_} ;
}
$self;
}
1;
| sjdy521/Mojo-Webqq | lib/Mojo/Webqq/Recent/Discuss.pm | Perl | bsd-2-clause | 289 |
use strict;
open( INFILE, "<", $ARGV[0] )
or die("Cannot open file $ARGV[0] for reading: $!");
while ( my $line = <INFILE> ) {
next if ( $line =~ m/^\s$/ );
chomp($line);
my $up = 1;
my @r = map {
if ( $_ =~ /^[[:alpha:]]/ )
{
$up = !$up;
if ($up) {
lc $_;
}
else {
uc $_;
}
}
else {
$_;
}
} split( //, $line );
printf( "%s\n", join( '', @r ) );
}
close(INFILE);
| nikai3d/ce-challenges | easy/roller_coaster.pl | Perl | bsd-3-clause | 534 |
#!/usr/bin/perl
use Getopt::Long;
use File::Basename;
my $DIR="/opt/convey/examples/cnymemcached/mcd-benchmark";
my $MCHAMMR="";
my $MEMCACHED="/opt/convey/cnymemcached/bin/memcached";
my $CTRL="";
my $DEBUG=1;
my $server_mem=16384;
my $server_conns=4096;
my $server_threads=16;
my $delay_max=225000;
my $conns_per_thread=32;
#total number of connections to use from all clients. This works out to
#4000 connections at a full 16 threads and scales down appropriately as
#we drop the number of server threads
my $client_conns=$conns_per_thread*$server_threads;
#hosts used to provide the benchmark load
my @bhosts=();
#host used to probe latency
my $phost="grizzly6";
#run time for each test point
my $time=30;
#number of gets in a multiget.
my $mget=32;
#number of client threads to run per client
my $threads=8;
#memcached server IP
my $host="10.2.0.128";
#This is the name/ip used to ssh into the memcached server. May not match the server
#IP is the test network is on a private network.
my $host_ssh="coconino";
#port to use for memcached
my $port=11211;
my $port_count=1;
#this is our latency cutoff. We will stop the test run when latency rises above this
#point (is microseconds)
my $lat_cut=1000;
#data size to SETs and GETs
my $data_size=32;
my $key_size=8;
my $delay_delta_base=.7;
#my $key_count=65536;
my $op="MGET";
my $op_count=2048;
my $keys_per_op=32;
my $out=1;
my $delay=1000;
my $start_server_flag=0;
my $stop_server_flag=0;
my $server_start_delay=60;
my $mcast="224.0.0.37";
GetOptions("dir=s" => \$DIR,
"mchammr=s" => \$MCHAMMR,
"memcached=s" => \$MEMCACHED,
"conns=i" => \$client_conns,
"server_threads=i" => \$server_threads,
"client_threads=i" => \$threads,
"server_name=s" => \$host_ssh,
"probe_host=s" => \$phost,
"load_host=s" => \@bhosts,
"max_latency=i" => \$lat_cut,
"data_size=i" => \$data_size,
"key_size=i" => \$key_size,
"server_ip=s" => \$host,
"base_port=i" => \$port,
"port_count=i" => \$port_count,
"op_count=i" => \$op_count,
"keys_per_op=i" => \$keys_per_op,
"delta_base=i" => \$delay_delta_base,
"out=i" => \$out,
"delay=i" => \$delay,
"time=i" => \$time,
"stop_server+" => \$stop_server_flag,
"start_server+" => \$start_server_flag,
"server_start_delay=i" => \$server_start_delay,
"mcast_net=s" => \$mcast
);
$MCHAMMR="$DIR/mc-hammr/mc-hammr" if($MCHAMMR eq "");
$CTRL="$DIR/ctrl/mcast_msg" if ($CTRL eq "");
$threads=int($threads/$port_count);
$threads=1 if($threads<1);
#calculate number of connections per thread to make the total connections add up
#to what we want
my $conns=int($client_conns/scalar(@bhosts)/$threads/$port_count);
#config file names for various functions
my $load_conf="/tmp/load.conf";
my $bench_conf="/tmp/bench.conf";
my $probe_conf="/tmp/probe.conf";
my $gps;
my $maxgps;
my $lastgps=0;
my $lat;
my $delay_delta;
my %results=();
my $lat_repeat=0;
my $gps_repeat=0;
my $fork=0;
if($port_count>1) {
$fork=1;
}
if($keys_per_op==1) {
$op="GET";
}
#Turn on autoflush for STDOUT
$|=1;
#make the config file we will use to load the freshly started server with some
#data for benchmarking
print "making \"load\" config file...\n";
if(!make_conf_file($load_conf,
{host=>$host,
port=>$port,
port_count=>$port_count,
send=>"SET",
key_len=>$key_size,
value_size=>$data_size,
out=>1,
threads=>1,
conns=>1,
loop=>1,
wait=>0,
op_count=>($op_count*$keys_per_op),
fork=>$fork,
})) {
die("error making config file: $load_conf");
}
#make the config file we will use to probe the latency of the server under load
print "making \"probe\" config file...\n";
if(!make_conf_file($probe_conf,
{host=>$host,
port=>$port,
port_count=>$port_count,
send=>"GET",
delay=>0,
time=>$time,
threads=>($port_count<4 ? 4 : $port_count),
key_len=>$key_size,
value_size=>$data_size,
op_count=>($op_count*$keys_per_op),
out=>1,
conns=>1,
fork=>$fork,
mcast=>$mcast,
wait=>1})) {
die("error making config file: $probe_conf");
}
#start up the server
$host_port_string="";
for($p=$port; $p<$port+$port_count; $p++) {
$host_port_string .= "-l $host:$p ";
}
if($start_server_flag) {
if($port_count==1) {
print "starting server on $host:$port...\n";
} else {
print "starting server on $host:$port-" . ($port + $port_count - 1) . "...\n";
}
if(!start_server($host_ssh, "-m $server_mem -c $server_conns $host_port_string", $server_threads)) {
die("error starting memcached server");
}
if($server_start_delay) {
print "waiting on server to stabilize ($server_start_delay sec)\n";
sleep($server_start_delay);
}
}
#put the "load" config file on the client system we will use to load the data
print "loading the \"load\" config on $phost...\n";
if(!load_conf($load_conf, $phost)) {
die("error loading config file: $load_conf");
}
#now populate the cache
print "populating cache...\n";
if(populate_cache($phost)==0) {
if($stop_server_flag) {
stop_server($host_ssh);
}
die("error populating cache");
}
#copy the probe config over to the probe host to ready it for measurement duty
print "loading the probe config on $phost...\n";
if(!load_conf($probe_conf, $phost)) {
die("error loading config file: $probe_conf");
}
print "determining max throughput....\n";
#create a "max throughput" config file for the benchmark machines
if(!make_conf_file($bench_conf,
{host=>$host,
port=>$port,
port_count=>$port_count,
send=>$op,
threads=>$threads,
conns=>$conns,
op_count=>$op_count,
keys_per_op=>$keys_per_op,
delay=>$delay,
time=>$time,
out=>$out,
value_size=>$data_size,
key_len=>$key_size,
fork=>$fork,
mcast=>$mcast,
wait=>1})) {
die("error making config file: $bench_conf");
}
#and get it loaded
print "load benchmark config file on hosts: " . join(", ", @bhosts) . "\n";
if(!load_conf($bench_conf, @bhosts)) {
die("error loading config file: $bench_conf");
}
#This will get us somewhere near the max we will measure. In fact, it may get us above the max as
#we cut of the run when we cross the max latency threshold.
print "running benchmark...\n";
($maxgps, $lat)=bench($time+120, $port_count, $phost, @bhosts);
print "\ngps=$maxgps, lat=$lat\n\n";
if($stop_server_flag) {
#being done we need to clean stuff up and stop the server
print "shutting down server\n";
stop_server($host_ssh);
print "\n\n";
}
exit(0);
sub populate_cache {
my $client=$_[0];
my $out="";
$out=`ssh $client $MCHAMMR /tmp/mc-hammr.conf`;
if($?>>8) {
print $out;
return(0);
}
if($DEBUG) {
print $out;
}
return(1);
}
sub bench {
my $timeout=shift;
my $port_count=shift;
my $phost=shift;
my $ready_count=(scalar(@_)+1)*$port_count;
my $line="";
my $lat=undef;
my $gps=0;
my $client;
my $host;
print "pdsh -R ssh -u $timeout -w $phost,". join(",", @_) . " $MCHAMMR /tmp/mc-hammr.conf 2>/dev/null \n";
open(RUN, "pdsh -R ssh -u $timeout -w $phost,". join(",", @_) . " $MCHAMMR /tmp/mc-hammr.conf 2>/dev/null |");
printf("waiting on benchmark clients to be ready: $ready_count");
while($ready_count) {
$line=<RUN>;
if($line=~/bound/) {
$ready_count--;
print " $ready_count";
}
}
sleep(2);
print " go\n";
`$CTRL go`;
while(<RUN>) {
next unless(/bound/ || /cumulative: /);
print;
m/^(.*):\s*cumulative.*rate: (\d+\.\d\d), .*lat: (\d+\.\d\d)/;
$host=$1;
$gps+=$2;
$lat=$3 if($host eq $phost)
}
close(RUN);
clean_client("$phost,". join(",", @_));
return($gps, $lat);
}
sub clean_client {
#print "cleaning up clients\n";
#print "pdsh -R ssh -u 10 -w $_[0] pkill " . basename($MCHAMMR) . " 2>&1 1>/dev/null\n";
system "pdsh -R ssh -u 10 -w $_[0] pkill " . basename($MCHAMMR) . " 2>/dev/null";
}
sub start_server {
my $rc;
print "ssh -f $_[0] $MEMCACHED -t $_[2] $_[1] 2>&1\n";
#return(1);
$rc=system("ssh -f $_[0] $MEMCACHED -t $_[2] $_[1] 2>&1");
if($rc>>8) {
return(0);
}
return(1);
}
sub stop_server {
system "ssh -q $_[0] pkill " . basename($MEMCACHED);
sleep(15);
}
sub load_conf {
my $conf=shift(@_);
my $out="";
my $host;
for $host (@_) {
$out=`scp -B -q $conf $host:/tmp/mc-hammr.conf`;
if($?>>8!=0) {
print $out;
return(0);
}
if($DEBUG) {
print $out;
}
}
return(1);
}
sub make_conf_file {
my $fname=$_[0];
my %cfg=%{$_[1]};
my @required=qw(send host port op_count);
my $key;
my $port;
for $key (@required) {
unless (exists($cfg{$key})) {
print "missing required config entry: $key\n";
return(0);
}
}
if(!exists($cfg{port_count})) {
$cfg{port_count}=1;
}
open(CONF, "> $fname");
for($port=$cfg{port};$port<$cfg{port}+$cfg{port_count}; $port++) {
print CONF "send=" . $cfg{send};
print CONF ",recv=async";
if(exists($cfg{threads})) {
print CONF ",threads=".$cfg{threads};
} else {
print CONF ",threads=1";
}
if(exists($cfg{conns})) {
print CONF ",conns_per_thread=". $cfg{conns};
} else {
print CONF ",conns_per_thread=1";
}
if(exists($cfg{key_prefix})) {
print CONF ",key_prefix=" . $cfg{key_prefix};
} else {
print CONF ",key_prefix=0:";
}
if(exists($cfg{size})) {
print CONF ",value_size=".$cfg{value_size};
} else {
print CONF ",value_size=32";
}
if(exists($cfg{key_len})) {
print CONF ",key_len=".$cfg{key_len};
} else {
print CONF ",key_len=8";
}
print CONF ",host=". $cfg{host};
print CONF ",port=". $port;
if(exists($cfg{loop})) {
print CONF ",loop=" . $cfg{loop};
}
if(exists($cfg{time})) {
print CONF ",time=" . $cfg{time};
}
if(exists($cfg{delay})) {
print CONF ",delay=" . $cfg{delay};
}
if(exists($cfg{fork})) {
print CONF ",fork=" . $cfg{fork};
} else {
print CONF ",fork=0";
}
if(exists($cfg{ops_per_conn})) {
print CONF ",ops_per_conn=".$cfg{ops_per_conn};
} else {
print CONF ",ops_per_conn=0";
}
if(exists($cfg{out})) {
print CONF ",out=" . $cfg{out};
} else {
print CONF ",out=1";
}
if(exists($cfg{wait})) {
print CONF ",mcast_wait=".$cfg{wait};
}
if(exists($cfg{op_count})) {
print CONF ",op_count=".$cfg{op_count};
}
if(exists($cfg{mcast})) {
print CONF ",mcast_net=".$cfg{mcast};
}
if(exists($cfg{keys_per_op})) {
print CONF ",ops_per_frame=".$cfg{keys_per_op};
} else {
print CONF ",ops_per_frame=1";
}
print CONF "\n";
}
close(CONF);
return(1);
}
| TonyBrewer/OpenHT | apps/memcached/contrib/mcd-benchmark/benchmark_scripts/bench.pl | Perl | bsd-3-clause | 10,706 |
#!/usr/bin/env perl
use JEvent;
use Config::IniFiles;
my $ini = Config::IniFiles->new(-file=>$ENV{JEVENTINI} || '/etc/jevent.ini');
my $je = JEvent->new(Config => $ini)
or die "Unable to create JEvent interface";
$je->Connect();
my $node = "/some/random/node";
my $content = "<some random=\"1\"><data/></some>\n";
my $msg = $je->Publish(Node=>$node,Content=>$content);
| stockholmuniversity/JEvent | cmds/sample-one-shot.pl | Perl | bsd-3-clause | 381 |
#!/usr/bin/perl
require "subparseform.lib";
&Parse_Form;
@numbers = split(/,/, $formdata{'number'});
print "Content-type: text/html\n\n";
print "The numbers you entered were:";
foreach $number (@numbers) {
print "<LI>$number";
}
foreach $number(@numbers) {
$number = sqrt($number);
}
print "<P>The square roots of those numbers are: ";
foreach $number(@numbers) {
print "<LI>$number";
} | devjin24/howtomcatworks | bookrefer/jakarta-tomcat-5.0.18-src/jakarta-tomcat-catalina/tester/web/WEB-INF/cgi/modifyall.pl | Perl | apache-2.0 | 399 |
#!/usr/bin/perl
require "/perfstat/build/serialize/create/ServiceConfig.pl";
#create new service
$service = Service->new( RRA => "RRA:AVERAGE:0.5:1:288 RRA:AVERAGE:0.5:7:288 RRA:AVERAGE:0.5:30:288 RRA:AVERAGE:0.5:365:288",
rrdStep => "300",
serviceName => "mem",
);
#add metric 0
$obj = Metric->new( rrdIndex => 0,
metricName => "memUsedPct",
friendlyName => "Memory Utilization",
status => "nostatus",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 1,
metricValue => "null",
warnThreshold => 90,
critThreshold => 95,
thresholdUnit => "Percent",
lowThreshold => "0",
highThreshold => "100",
);
$service->addMetric($obj);
#add metric 1
$obj = Metric->new( rrdIndex => 1,
metricName => "swapUsedPct",
friendlyName => "Swap Utilization",
status => "nostatus",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 1,
metricValue => "null",
warnThreshold => 80,
critThreshold => 90,
thresholdUnit => "Percent",
lowThreshold => "0",
highThreshold => "100",
);
$service->addMetric($obj);
#add metric 2
$obj = Metric->new( rrdIndex => 2,
metricName => "pageInKB",
friendlyName => "Pages In",
status => "nostatus",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 1,
metricValue => "null",
warnThreshold => 1000,
critThreshold => 5000,
thresholdUnit => "KB/Sec",
lowThreshold => "0",
highThreshold => "10000",
);
$service->addMetric($obj);
#add metric 3
$obj = Metric->new( rrdIndex => 3,
metricName => "pageOutKB",
friendlyName => "Pages Out",
status => "nostatus",
rrdDST => GAUGE,
rrdHeartbeat => 600,
rrdMin => 0,
rrdMax => U,
hasEvents => 1,
metricValue => "null",
warnThreshold => 500,
critThreshold => 1000,
thresholdUnit => "KB/Sec",
lowThreshold => "0",
highThreshold => "10000",
);
$service->addMetric($obj);
#add graph 0
$obj = Graph->new( name => "memUtilization",
title => "Memory Utilization",
comment => "",
imageFormat => "png",
width => "500",
height => "120",
verticalLabel => "Percent",
upperLimit => "100",
lowerLimit => "0",
rigid => "Y",
base => "1000",
unitsExponent => "0",
noMinorGrids => "",
stepValue => "",
gprintFormat => "%8.2lf",
metricIndexHash => {},
metricArray => [],
);
$obj2 = GraphMetric->new( name => "swapUsedPct",
color => "#FFFF00",
lineType => "AREA",
gprintArray => [qw{AVERAGE LAST}],
cDefinition => "",
);
$obj->addGraphMetric("swapUsedPct", $obj2);
$obj2 = GraphMetric->new( name => "memUsedPct",
color => "#0000FF",
lineType => "STACK",
gprintArray => [qw{AVERAGE LAST}],
cDefinition => "",
);
$obj->addGraphMetric("memUsedPct", $obj2);
$service->addGraph("memUtilization", $obj);
#add graph 1
$obj = Graph->new( name => "memPaging",
title => "Memory Paging",
comment => "",
imageFormat => "png",
width => "500",
height => "120",
verticalLabel => "KB/sec",
upperLimit => "",
lowerLimit => "0",
rigid => "",
base => "1024",
unitsExponent => "0",
noMinorGrids => "",
stepValue => "",
gprintFormat => "%8.2lf",
metricIndexHash => {},
metricArray => [],
);
$obj2 = GraphMetric->new( name => "pageInKB",
color => "#FF0000",
lineType => "LINE2",
gprintArray => [qw{AVERAGE LAST}],
cDefinition => "",
);
$obj->addGraphMetric("pageInKB", $obj2);
$obj2 = GraphMetric->new( name => "pageOutKB",
color => "#FFFF00",
lineType => "LINE2",
gprintArray => [qw{AVERAGE LAST}],
cDefinition => "",
);
$obj->addGraphMetric("pageOutKB", $obj2);
$service->addGraph("memPaging", $obj);
#print out this service
print ("Ref: ref($service)\n");
$serviceName = $service->getServiceName();
$RRA = $service->getRRA();
$rrdStep = $service->getRRDStep();
$lastUpdate = $service->getLastUpdate();
print ("serviceName: $serviceName\n");
print ("RRA: $RRA\n");
print ("rrdStep: $rrdStep\n");
print ("Last Update: $lastUpdate\n");
#print out this services metrics
$arrayLength = $service->getMetricArrayLength();
print ("metric Array Length = $arrayLength\n\n");
for ($counter=0; $counter < $arrayLength; $counter++)
{
$metricObject = $service->{metricArray}->[$counter];
$rrdIndex = $metricObject->getRRDIndex();
$rrdDST = $metricObject->getRRDDST();
$rrdHeartbeat = $metricObject->getRRDHeartbeat();
$rrdMin = $metricObject->getRRDMin();
$rrdMax = $metricObject->getRRDMax();
$metricName = $metricObject->getMetricName();
$friendlyName = $metricObject->getFriendlyName();
$status = $metricObject->getStatus();
$hasEvents = $metricObject->getHasEvents();
$metricValue = $metricObject->getMetricValue();
$warnThreshold = $metricObject->getWarnThreshold();
$critThreshold = $metricObject->getCritThreshold();
$thresholdUnit = $metricObject->getThresholdUnit();
$lowThreshold = $metricObject->getLowThreshold();
$highThreshold = $metricObject->getHighThreshold();
print ("rrdIndex: $rrdIndex\n");
print ("rrdDST: $rrdDST\n");
print ("rrdHeartbeat: $rrdHeartbeat\n");
print ("rrdMin: $rrdMin\n");
print ("rrdMax: $rrdMax\n");
print ("metricName: $metricName\n");
print ("friendlyName: $friendlyName\n");
print ("status: $status\n");
print ("hasEvents: $hasEvents\n");
print ("metricValue: $metricValue\n");
print ("warnThreshold: $warnThreshold\n");
print ("critThreshold: $critThreshold\n");
print ("threshUnit: $thresholdUnit\n");
print ("lowThreshold: $lowThreshold\n");
print ("highThreshold: $highThreshold\n\n");
}
#print out this services graphs
$graph = $service->{graphHash};
foreach my $key (keys %{$graph})
{
$graphObject = $service->{graphHash}->{$key};
$name = $graphObject->getName();
$title = $graphObject->getTitle();
$comment = $graphObject->getComment();
$imageFormat = $graphObject->getImageFormat();
$width = $graphObject->getWidth();
$height = $graphObject->getHeight();
$verticalLabel = $graphObject->getVerticalLabel();
$upperLimit = $graphObject->getUpperLimit();
$lowerLimit = $graphObject->getLowerLimit();
$rigid = $graphObject->getRigid();
$base = $graphObject->getBase();
$unitsExponent = $graphObject->getUnitsExponent();
$noMinorGrids = $graphObject->getNoMinorGrids();
$stepValue = $graphObject->getStepValue();
$gprintFormat = $graphObject->getGprintFormat();
print ("name: $name\n");
print ("title: $title\n");
print ("comment: $comment\n");
print ("image format: $imageFormat\n");
print ("width: $width\n");
print ("height: $height\n");
print ("vertical label: $verticalLabel\n");
print ("upper limit: $upperLimit\n");
print ("lower limit: $lowerLimit\n");
print ("rigid: $rigid\n");
print ("base: $base\n");
print ("units exponent: $unitsExponent\n");
print ("no minor grids: $noMinorGrids\n");
print ("step value: $stepValue\n");
print ("gprint format: $gprintFormat\n");
print "\n";
# Via MetricArray
foreach my $key (@{$graphObject->{metricArray}}) {
my $metricName = $key->getName();
my $color = $key->{color};
my $lineType = $key->getLineType();
my $cDefinition = $key->getCdefinition();
print "Graph Metric Name: $metricName\n";
print "Color: $color\n";
print "Line Type: $lineType\n";
print "GPRINT : @{$key->{gprintArray}}\n";
print "CDEF: $cDefinition\n";
print "\n";
}
}
#Store the service
$service->store("$perfhome/etc/configs/Linux/$service->{serviceName}.ser") or die("can't store $service->{serviceName}.ser?\n");
| ktenzer/perfstat | misc/serialize/create/Linux/120304/mem.pl | Perl | apache-2.0 | 10,545 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.