code stringlengths 2 1.05M | repo_name stringlengths 5 101 | path stringlengths 4 991 | language stringclasses 3 values | license stringclasses 5 values | size int64 2 1.05M |
|---|---|---|---|---|---|
package App::Netdisco::SSHCollector::Platform::VOSS;
=head1 NAME
App::Netdisco::SSHCollector::Platform::VOSS
=head1 DESCRIPTION
Collect ARP entries from Extreme VSP devices running the VOSS operating system.
This is useful if running multiple VRFs as the built-in SNMP ARP collection will only fetch from the default GlobalRouter VRF.
By default this module gets ARP entries from all VRFs (0-512). To specify only certain VRFs in the config:
device_auth:
- tag: sshvsp
driver: cli
platform: VOSS
only:
- 10.1.1.1
- 192.168.0.1
username: oliver
password: letmein
vrfs: 1,5,100
The VRFs can be specified in any format that the "show ip arp vrfids" command will take. For example:
1,2,3,4,5,10
1-5,10
1-100
99
=cut
use strict;
use warnings;
use Dancer ':script';
use Expect;
use Moo;
=head1 PUBLIC METHODS
=over 4
=item B<arpnip($host, $ssh)>
Retrieve ARP entries from device. C<$host> is the hostname or IP address
of the device. C<$ssh> is a Net::OpenSSH connection to the device.
Returns a list of hashrefs in the format C<{ mac =E<gt> MACADDR, ip =E<gt> IPADDR }>.
=back
=cut
sub arpnip {
my ($self, $hostlabel, $ssh, $args) = @_;
debug "$hostlabel $$ arpnip()";
# default to entire range of VRFs
my $vrflist = "0-512";
# if specified in config, only get ARP from certain VRFs
if ($args->{vrfs}) {
if ($args->{vrfs} =~ m/^[0-9,\-]+$/) {
$vrflist = $args->{vrfs};
}
}
my ($pty, $pid) = $ssh->open2pty;
unless ($pty) {
debug "unable to run remote command [$hostlabel] " . $ssh->error;
return ();
}
my $expect = Expect->init($pty);
my ($pos, $error, $match, $before, $after);
my $prompt;
$prompt = qr/>/;
($pos, $error, $match, $before, $after) = $expect->expect(10, -re, $prompt);
$expect->send("terminal more disable\n");
($pos, $error, $match, $before, $after) = $expect->expect(5, -re, $prompt);
if ($before =~ m/% Invalid input detected/) {
debug "invalid command [$hostlabel]";
return ();
}
$expect->send("show ip arp vrfids $vrflist\n");
($pos, $error, $match, $before, $after) = $expect->expect(60, -re, $prompt);
my @lines = split(m/\n/, $before);
if ($before =~ m/% Invalid input detected/) {
debug "invalid command [$hostlabel]";
return ();
}
if ($before =~ m/Error : ([^\n]+)/) {
my $errormsg = $1;
if ($errormsg =~ m/Invalid VRF ID/ || $errormsg =~ m/vrfId should be/) {
debug "incorrect VRF specified [$hostlabel] : $vrflist : $errormsg";
return ();
}
else {
debug "general error fetching ARP [$hostlabel] : $errormsg";
return ();
}
}
my @arpentries;
my $ipregex = '(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)';
my $macregex = '([[:xdigit:]]{2}:){5}[[:xdigit:]]{2}';
# IP Address MAC Address VLAN Port Type TTL Tunnel
# 172.16.20.15 0024.b269.867d 100 1/1 DYNAMIC 999 device-name
foreach my $line (@lines) {
next unless $line =~ m/^\s*$ipregex\s+$macregex/;
my @fields = split m/\s+/, $line;
debug "[$hostlabel] arpnip - mac $fields[1] ip $fields[0]";
push @arpentries, { mac => $fields[1], ip => $fields[0] };
}
$expect->send("exit\n");
$expect->soft_close();
return @arpentries;
}
1;
| netdisco/netdisco | lib/App/Netdisco/SSHCollector/Platform/VOSS.pm | Perl | bsd-3-clause | 3,514 |
# SNMP::Info::Layer1::Asante
# $Id$
#
# Copyright (c) 2008 Max Baker changes from version 0.8 and beyond.
#
# Copyright (c) 2002,2003 Regents of the University of California
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Santa Cruz nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
package SNMP::Info::Layer1::Asante;
use strict;
use Exporter;
use SNMP::Info::Layer1;
@SNMP::Info::Layer1::Asante::ISA = qw/SNMP::Info::Layer1 Exporter/;
@SNMP::Info::Layer1::Asante::EXPORT_OK = qw//;
use vars qw/$VERSION %FUNCS %GLOBALS %MIBS %MUNGE/;
$VERSION = '3.34';
# Set for No CDP
%GLOBALS = ( %SNMP::Info::Layer1::GLOBALS, );
%FUNCS = (
%SNMP::Info::Layer1::FUNCS,
'asante_port' => 'ePortIndex',
'asante_group' => 'ePortGrpIndex',
'i_type' => 'ePortGrpIndex',
'asante_up' => 'ePortStateLinkStatus',
);
%MIBS = ( %SNMP::Info::Layer1::MIBS, 'ASANTE-AH1012-MIB' => 'asante' );
%MUNGE = ( %SNMP::Info::Layer1::MUNGE, );
sub interfaces {
my $asante = shift;
my $partial = shift;
my $rptr_port = $asante->rptr_port($partial) || {};
my %interfaces;
foreach my $port ( keys %$rptr_port ) {
$interfaces{$port} = $port;
}
return \%interfaces;
}
sub os {
return 'asante';
}
sub os_ver {
my $asante = shift;
my $descr = $asante->description();
if ( $descr =~ /software v(\d+\.\d+)/ ) {
return $1;
}
}
sub vendor {
return 'asante';
}
sub model {
my $asante = shift;
my $id = $asante->id();
my $model = &SNMP::translateObj($id);
return $model;
}
sub i_up {
my $asante = shift;
my $partial = shift;
my $asante_up = $asante->asante_up($partial) || {};
my $i_up = {};
foreach my $port ( keys %$asante_up ) {
my $up = $asante_up->{$port};
$i_up->{$port} = 'down' if $up =~ /on/;
$i_up->{$port} = 'up' if $up =~ /off/;
}
return $i_up;
}
sub i_speed {
my $asante = shift;
my $partial = shift;
my $i_speed = $asante->orig_i_speed($partial) || {};
my %i_speed;
$i_speed{"1.2"} = $i_speed->{1};
return \%i_speed;
}
sub i_mac {
my $asante = shift;
my $partial = shift;
my $i_mac = $asante->orig_i_mac($partial) || {};
my %i_mac;
$i_mac{"1.2"} = $i_mac->{1};
return \%i_mac;
}
sub i_description {
return;
}
sub i_name {
my $asante = shift;
my $partial = shift;
my $i_name = $asante->orig_i_descr($partial) || {};
my %i_name;
$i_name{"1.2"} = $i_name->{1};
return \%i_name;
}
1;
__END__
=head1 NAME
SNMP::Info::Layer1::Asante - SNMP Interface to old Asante 1012 Hubs
=head1 AUTHOR
Max Baker
=head1 SYNOPSIS
# Let SNMP::Info determine the correct subclass for you.
my $asante = new SNMP::Info(
AutoSpecify => 1,
Debug => 1,
DestHost => 'myswitch',
Community => 'public',
Version => 2
)
or die "Can't connect to DestHost.\n";
my $class = $asante->class();
print "SNMP::Info determined this device to fall under subclass : $class\n";
=head1 DESCRIPTION
Provides abstraction to the configuration information obtainable from a
Asante device through SNMP.
=head2 Inherited Classes
=over
=item SNMP::Info::Layer1
=back
=head2 Required MIBs
=over
=item F<ASANTE-AH1012-MIB>
=back
=head2 Inherited MIBs
See L<SNMP::Info::Layer1/"Required MIBs"> for its MIB requirements.
=head1 GLOBALS
=head2 Overrides
=over
=item $asante->os()
Returns 'asante'
=item $asante->os_ver()
Culls software version from description()
=item $asante->vendor()
Returns 'asante' :)
=item $asante->model()
Cross references $asante->id() to the F<ASANTE-AH1012-MIB> and returns
the results.
=back
=head2 Global Methods imported from SNMP::Info::Layer1
See L<SNMP::Info::Layer1/"GLOBALS"> for details.
=head1 TABLE METHODS
=head2 Overrides
=over
=item $asante->interfaces()
Returns reference to the map between IID and physical Port.
=item $asante->i_description()
Description of the interface.
=item $asante->i_mac()
MAC address of the interface. Note this is just the MAC of the port, not
anything connected to it.
=item $asante->i_name()
Returns reference to map of IIDs to human-set port name.
=item $asante->i_up()
Returns reference to map of IIDs to link status. Changes
the values of asante_up() to 'up' and 'down'.
=item $asante->i_speed()
Speed of the link, human format.
=back
=head2 Asante MIB
=over
=item $asante->ati_p_name()
(C<portName>)
=item $asante->ati_up()
(C<linkTestLED>)
=back
=head2 Table Methods imported from SNMP::Info::Layer1
See L<SNMP::Info::Layer1/"TABLE METHODS"> for details.
=cut
| 42wim/snmp-info | Info/Layer1/Asante.pm | Perl | bsd-3-clause | 6,232 |
#!/usr/bin/perl -CSD
#
# Simple command script for executing ParsCit in an
# offline mode (direct API call instead of going through
# the web service).
#
# Isaac Councill, 08/23/07
#
use strict;
use FindBin;
use lib "$FindBin::Bin/../lib";
use ParsCit::Controller;
my $textFile = $ARGV[0];
my $outFile = $ARGV[1];
if (!defined $textFile) {
print "Usage: $0 textfile [outfile]\n";
exit;
}
my $rXML = ParsCit::Controller::extractCitations($textFile);
if (defined $outFile) {
open (OUT, ">$outFile") or die "Could not open $outFile for writing: $!";
print OUT $$rXML;
close OUT;
} else {
print "$$rXML";
}
| WebSciences/citeseer-ucl | src/perl/ParsCit/bin/citeExtract.pl | Perl | apache-2.0 | 631 |
#!perl
##########################################################################
# FILE mk_distro.pl
# PURPOSE General Interface for making a Windows distribution
# ====================================================================
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ====================================================================
##########################################################################
# INCLUDED LIBRARY FILES
use strict;
#use File::Find;
use Cwd;
#use Win32;
require 'cmn.pl';
##########################################################################
# FUNCTION DECLARATIONS
sub Main;
sub MakeSetup;
##########################################################################
# CONSTANTS AND GLOBAL VARIABLES
my $g_AutoRun='';
my $g_MakeVersion='';
my $g_MakeDocs='';
my $g_MakeSetup='';
##########################################################################
# PROGRAM ENTRANCE
Main;
##########################################################################
# FUNCTION DEFINITIONS
#-------------------------------------------------------------------------
# FUNCTION Main
# DOES This is the program's main function
sub Main
{
$g_AutoRun="y" if ($ARGV[0] eq "-a");
my $Input='';
if ($g_AutoRun)
{
system ("perl set_version.pl -a");
system ("perl mk_svndoc.pl");
&MakeSetup;
}
else
{
print "mk_distro\n\n",
" You can make a complete or partial part of your setup from here. Bear in mind\n",
" that all the files needed to make and include in the setup must be in place.\n",
"\n MENU:\n",
" -----\n",
" v) Set version info on files related to the setup\n",
" d) Make documentation of the XML files in your working Subversion repository\n",
" s) Make a final setup of what you have made of v and d\n",
" e) Make everything\n\n",
" q) quit\n\n",
" Please, select one item [v/d/s/e/q]: ";
chomp ($Input = <STDIN>);
exit if ($Input eq "q");
$g_MakeVersion='y' if ($Input eq "v" || $Input eq "e");
$g_MakeDocs='y' if ($Input eq "d" || $Input eq "e");
$g_MakeSetup='y' if ($Input eq "s" || $Input eq "e");
if (! $g_MakeVersion && ! $g_MakeDocs && ! $g_MakeSetup)
{
print "\nUh, you did not give me a v,d,s,e or q, please try again\n";
sleep (2);
&Main;
}
system ("perl set_version.pl") if ($g_MakeVersion);
system ("perl mk_svndoc.pl")if ($g_MakeDocs);
&MakeSetup if ($g_MakeSetup);
}
}
#-------------------------------------------------------------------------
# FUNCTION MakeSetup
# DOES Making the Setup file
sub MakeSetup
{
my $SetupOut=&PathSetupOut;
my $PathISExe=&PathISExe;
my $RetVal=0;
chdir '..';
if (! $g_AutoRun)
{
print "Compiling the setup (take a nap, this will take some time)...\n";;
}
$RetVal=`"$PathISExe" svn.iss`;
}
#-------------------------------------------------------------------------------
# FUNCTION PathISExe
# DOES Finding and returning the current svn.exe path as of
# ..\svn_iss_dyn.iss
sub PathISExe
{
my $PathISExe = &cmn_ValuePathfile('path_is');
if ( ! -e "$PathISExe/ISCC.exe")
{
die "ERROR: Could not find path to ISCC.exe in svn_dynamics.iss\n";
}
$PathISExe = "$PathISExe\\ISCC.exe";
return $PathISExe;
}
| jmckaskill/subversion | packages/windows-innosetup/tools/mk_distro.pl | Perl | apache-2.0 | 4,314 |
#------------------------------------------------------------------------------
# File: cz.pm
#
# Description: ExifTool language-specific tag information
#
# Notes: This file generated automatically by Image::ExifTool::TagInfoXML
#------------------------------------------------------------------------------
package Image::ExifTool::Lang::cz;
%Image::ExifTool::Lang::cz::Translate = (
'AEMeteringMode' => {
PrintConv => {
'Multi-segment' => 'Multi segment',
},
},
'AEProgramMode' => {
PrintConv => {
'Landscape' => 'Krajina',
'Macro' => 'Makro',
'Portrait' => 'Portrét',
},
},
'AFPoint' => {
PrintConv => {
'None' => 'Žádná',
},
},
'AFPointBrightness' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'AFPointSelectionMethod' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'AFPointsInFocus' => {
PrintConv => {
'None' => 'Žádná',
},
},
'APEVersion' => 'APE verze',
'ActiveD-Lighting' => {
PrintConv => {
'Low' => 'Méně',
'Normal' => 'Normální',
},
},
'ActiveD-LightingMode' => {
PrintConv => {
'Low' => 'Méně',
'Normal' => 'Normální',
},
},
'AdultContentWarning' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'Annotations' => 'Poznámky Photoshop',
'Aperture' => 'F hodnota',
'ApertureValue' => 'Clona',
'Artist' => 'Autor',
'AssistButtonFunction' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'AutoBracketRelease' => {
PrintConv => {
'None' => 'Žádná',
},
},
'AutoLightingOptimizer' => {
PrintConv => {
'Low' => 'Méně',
},
},
'AutoRotate' => {
PrintConv => {
'None' => 'Žádná',
'Rotate 180' => '180° (dolů/vpravo)',
'Rotate 270 CW' => '90° po směru HR (vlevo/dolů)',
'Rotate 90 CW' => '90° ptoti směru HR (vpravo/nahoru)',
'Unknown' => 'Neznámý',
},
},
'BadFaxLines' => 'Špatné faxové řádky',
'BannerImageType' => {
PrintConv => {
'None' => 'Žádná',
},
},
'BatteryLevel' => 'Stav baterie',
'BitsPerSample' => 'Počet bitů na složku',
'BlurWarning' => {
PrintConv => {
'None' => 'Žádná',
},
},
'BrightnessValue' => 'Jas',
'CFAPattern' => 'CFA matrice',
'CFAPattern2' => 'CFA matice 2',
'CFARepeatPatternDim' => 'Velikost berevné matice CFA',
'CPUType' => {
PrintConv => {
'None' => 'Žádná',
},
},
'CalibrationIlluminant1' => {
PrintConv => {
'Cloudy' => 'Zataženo',
'Cool White Fluorescent' => 'Studená zářivka',
'Day White Fluorescent' => 'Denní zářivka',
'Daylight' => 'Denní světlo',
'Daylight Fluorescent' => 'Denní světlo',
'Fine Weather' => 'Slunečno',
'Flash' => 'Blesk',
'Fluorescent' => 'Žárovka',
'ISO Studio Tungsten' => 'Studiová světla',
'Other' => 'jiné osvětlení',
'Shade' => 'Stíny',
'Standard Light A' => 'Standardní světlo A',
'Standard Light B' => 'Standardní světlo B',
'Standard Light C' => 'Standardní světlo C',
'Tungsten' => 'Zářivka',
'Unknown' => 'Neznámý',
'White Fluorescent' => 'Bílá zářivka',
},
},
'CalibrationIlluminant2' => {
PrintConv => {
'Cloudy' => 'Zataženo',
'Cool White Fluorescent' => 'Studená zářivka',
'Day White Fluorescent' => 'Denní zářivka',
'Daylight' => 'Denní světlo',
'Daylight Fluorescent' => 'Denní světlo',
'Fine Weather' => 'Slunečno',
'Flash' => 'Blesk',
'Fluorescent' => 'Žárovka',
'ISO Studio Tungsten' => 'Studiová světla',
'Other' => 'jiné osvětlení',
'Shade' => 'Stíny',
'Standard Light A' => 'Standardní světlo A',
'Standard Light B' => 'Standardní světlo B',
'Standard Light C' => 'Standardní světlo C',
'Tungsten' => 'Zářivka',
'Unknown' => 'Neznámý',
'White Fluorescent' => 'Bílá zářivka',
},
},
'CameraOrientation' => {
PrintConv => {
'Horizontal (normal)' => '0° (nahoru/vlevo)',
'Rotate 270 CW' => '90° po směru HR (vlevo/dolů)',
'Rotate 90 CW' => '90° ptoti směru HR (vpravo/nahoru)',
},
},
'CanonExposureMode' => {
PrintConv => {
'Aperture-priority AE' => 'Priorita clony',
'Manual' => 'Manuální',
'Shutter speed priority AE' => 'Priorita času',
},
},
'CaptureXResolutionUnit' => {
PrintConv => {
'um' => 'µm (mikrometr)',
},
},
'CaptureYResolutionUnit' => {
PrintConv => {
'um' => 'µm (mikrometr)',
},
},
'CellLength' => 'Délka buňky',
'CellWidth' => 'Šířka buňky',
'CenterWeightedAreaSize' => {
PrintConv => {
'Average' => 'Průměr',
},
},
'ChrominanceNR_TIFF_JPEG' => {
PrintConv => {
'Low' => 'Méně',
},
},
'ChrominanceNoiseReduction' => {
PrintConv => {
'Low' => 'Méně',
},
},
'CleanFaxData' => 'Čistá fax data',
'ColorMap' => 'Barevná mapa',
'ColorMode' => {
PrintConv => {
'Landscape' => 'Krajina',
'Normal' => 'Normální',
'Portrait' => 'Portrét',
},
},
'ColorMoireReductionMode' => {
PrintConv => {
'Low' => 'Méně',
},
},
'ColorResponseUnit' => 'Odpovídající barevná jednotka',
'ColorSpace' => {
Description => 'Barevný prostor',
PrintConv => {
'Uncalibrated' => 'Nekalibrován',
},
},
'CommanderGroupAMode' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'CommanderGroupBMode' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'CommanderInternalFlash' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'Comment' => 'Komentář',
'ComponentsConfiguration' => 'Určení složek',
'CompressedBitsPerPixel' => 'Komprimační mod',
'Compression' => {
Description => 'Kompresní algoritmus',
PrintConv => {
'Epson ERF Compressed' => 'Epson ERF komprese',
'JPEG (old-style)' => 'JPEG (pův. verze)',
'Kodak DCR Compressed' => 'Kodak DCR komprese',
'Kodak KDC Compressed' => 'Kodak KDC komprese',
'Next' => 'kódování NeXT 2-bit',
'Nikon NEF Compressed' => 'Nikon NEF komprese',
'None' => 'Žádná',
'Pentax PEF Compressed' => 'Pentax PEF komprese',
'SGILog' => 'kódování SGI 32-bit Log Luminance',
'SGILog24' => 'kódování SGI 24-bit Log Luminance',
'Sony ARW Compressed' => 'Sony ARW komprese',
'Thunderscan' => 'kódování ThunderScan 4-bit',
'Uncompressed' => 'bez komprese',
},
},
'CompressionType' => {
PrintConv => {
'None' => 'Žádná',
},
},
'ConsecutiveBadFaxLines' => 'Sekvence vadných faxových řádků',
'ContentType' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'Contrast' => {
Description => 'Kontrast',
PrintConv => {
'High' => 'Více',
'Low' => 'Méně',
'Normal' => 'Normální',
},
},
'ConversionLens' => {
PrintConv => {
'Macro' => 'Makro',
},
},
'Copyright' => 'Držitel práv',
'CopyrightStatus' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'CreateDate' => 'Datum a čas generování digitálních dat',
'CropUnit' => {
PrintConv => {
'inches' => 'palce',
},
},
'CropUnits' => {
PrintConv => {
'inches' => 'palce',
},
},
'CustomRendered' => {
Description => 'Zpracování obrazu',
PrintConv => {
'Custom' => 'Uživatelské zpracování',
'Normal' => 'Normální proces',
},
},
'DataImprint' => {
PrintConv => {
'None' => 'Žádná',
},
},
'DateTime' => 'Datum a čas změny souboru',
'DateTimeOriginal' => 'Datum a čas vzniku originálních dat',
'Description' => 'Popis',
'DeviceSettingDescription' => 'Popis nastavení zařízení',
'DialDirectionTvAv' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'DigitalZoom' => {
PrintConv => {
'None' => 'Žádná',
},
},
'DigitalZoomRatio' => 'Digitální zoom',
'DisplaySize' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'DisplayUnits' => {
PrintConv => {
'inches' => 'palce',
},
},
'DisplayXResolutionUnit' => {
PrintConv => {
'um' => 'µm (mikrometr)',
},
},
'DisplayYResolutionUnit' => {
PrintConv => {
'um' => 'µm (mikrometr)',
},
},
'DisplayedUnitsX' => {
PrintConv => {
'inches' => 'palce',
},
},
'DisplayedUnitsY' => {
PrintConv => {
'inches' => 'palce',
},
},
'DjVuVersion' => 'DjVu verze',
'DocumentName' => 'Jméno dokumentu',
'DotRange' => 'Bodová rozteč',
'ETTLII' => {
PrintConv => {
'Average' => 'Průměr',
},
},
'EasyMode' => {
PrintConv => {
'Landscape' => 'Krajina',
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Night' => 'Noční foto',
'Portrait' => 'Portrét',
},
},
'Emphasis' => {
PrintConv => {
'None' => 'Žádná',
},
},
'ExifImageHeight' => 'Výška',
'ExifImageWidth' => 'Šířka',
'ExifToolVersion' => 'ExifTool verze',
'ExifVersion' => 'Exif verze',
'ExpandFilm' => 'Ext. film',
'ExpandFilterLens' => 'Ext. filtr objektivu',
'ExpandFlashLamp' => 'Ext. světlo blesku',
'ExpandLens' => 'Ext. objektiv',
'ExpandScanner' => 'Ext. skener',
'ExpandSoftware' => 'Ext. Software',
'ExposureCompensation' => 'Korekce expozice',
'ExposureIndex' => 'Index expozice',
'ExposureMode' => {
Description => 'Mód expozice',
PrintConv => {
'Aperture Priority' => 'Priorita clony',
'Aperture-priority AE' => 'Priorita clony',
'Auto' => 'Automatická expozice',
'Auto bracket' => 'Auto braketing',
'Landscape' => 'Krajina',
'Manual' => 'Manuální expozice',
'Portrait' => 'Portrét',
'Shutter Priority' => 'Priorita času',
'Shutter speed priority AE' => 'Priorita času',
},
},
'ExposureModeInManual' => {
PrintConv => {
'Center-weighted average' => 'Zvýrazněný střed',
'Partial metering' => 'Blokové',
'Spot metering' => 'Středový bod',
},
},
'ExposureProgram' => {
Description => 'Expoziční mod',
PrintConv => {
'Action (High speed)' => 'Akční program (ovlivněný čas závěrky)',
'Aperture Priority' => 'Priorita clony',
'Aperture-priority AE' => 'Priorita clony',
'Creative (Slow speed)' => 'Kreativní program (ovlivněná hloubka ostrosti)',
'Landscape' => 'Krajina',
'Manual' => 'Manuální',
'Not Defined' => 'Nedefinovaný',
'Portrait' => 'Portrét',
'Program AE' => 'Normální program',
'Shutter Priority' => 'Priorita času',
'Shutter speed priority AE' => 'Priorita času',
},
},
'ExposureTime' => 'Expoziční čas',
'ExposureTime2' => 'Expoziční čas 2',
'FNumber' => 'F hodnota',
'FaxProfile' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'FaxRecvParams' => 'Parametry příjemce faxu',
'FaxRecvTime' => 'Čas příjmu faxu',
'FaxSubAddress' => 'Sub adresa faxu',
'FileSource' => {
Description => 'Zdroj dat',
PrintConv => {
'Digital Camera' => 'Digitální fotoaparát',
'Film Scanner' => 'Filmový skener',
'Reflection Print Scanner' => 'Skener',
},
},
'FillOrder' => {
Description => 'Pořadí výplně',
PrintConv => {
'Normal' => 'Normální',
},
},
'FilterEffect' => {
PrintConv => {
'None' => 'Žádná',
},
},
'FilterEffectMonochrome' => {
PrintConv => {
'None' => 'Žádná',
},
},
'Flash' => {
Description => 'Blesk',
PrintConv => {
'Auto, Did not fire' => 'Blesk nepoužit, auto mod',
'Auto, Did not fire, Red-eye reduction' => 'Auto mod, nepoužit, redukce červených očí',
'Auto, Fired' => 'Blesk použit, auto mod',
'Auto, Fired, Red-eye reduction' => 'Blesk použit, auto mod, redukce červených očí',
'Auto, Fired, Red-eye reduction, Return detected' => 'Blesk použit, auto mod, redukce červených očí, odraz detekován',
'Auto, Fired, Red-eye reduction, Return not detected' => 'Blesk použit, auto mod, redukce červených očí, odraz nezjištěn',
'Auto, Fired, Return detected' => 'Blesk použit, auto mod, odraz detekován',
'Auto, Fired, Return not detected' => 'Blesk použit, auto mod, odraz nedetekován',
'Did not fire' => 'Blesk ne',
'Fired' => 'Blesk ano',
'Fired, Red-eye reduction' => 'Blesk použit, redukce červených očí',
'Fired, Red-eye reduction, Return detected' => 'Blesk použit, redukce červených očí, odraz detekován',
'Fired, Red-eye reduction, Return not detected' => 'Blesk použit, redukce červených očí, odraz nezjištěn',
'Fired, Return detected' => 'Odraz strobozáblesků detekován',
'Fired, Return not detected' => 'Odraz strobozáblesků nezjištěn',
'No Flash' => 'Blesk ne',
'No flash function' => 'Blesk nezjištěn',
'Off' => 'Blesk nepoužit, vynucený mod',
'Off, Did not fire' => 'Blesk nepoužit, vynucený mod',
'Off, Did not fire, Return not detected' => 'Blesk vypnut, bez záblesku, odraz nezachycen',
'Off, No flash function' => 'Neaktivní, bez funkce blesku',
'Off, Red-eye reduction' => 'Neaktivní, redukce červených očí',
'On' => 'Blesk použit, vynucený mod',
'On, Did not fire' => 'Blesk zapnut, nepoužit',
'On, Fired' => 'Blesk použit, vynucený mod',
'On, Red-eye reduction' => 'Blesk použit, vynucený mod, redukce červených očí',
'On, Red-eye reduction, Return detected' => 'Blesk použit, vynucený mod, redukce červených očí, odraz detekován',
'On, Red-eye reduction, Return not detected' => 'Blesk použit, vynucený mod, redukce červených očí, odraz nezjištěn',
'On, Return detected' => 'Blesk použit, vynucený mod, odraz detekován',
'On, Return not detected' => 'Blesk použit, vynucený mod, odraz nezjištěn',
},
},
'FlashControlMode' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FlashDevice' => {
PrintConv => {
'None' => 'Žádná',
},
},
'FlashEnergy' => 'Síla záblesku',
'FlashGroupAControlMode' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FlashGroupBControlMode' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FlashGroupCControlMode' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FlashIntensity' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'FlashMode' => {
PrintConv => {
'Normal' => 'Normální',
'Unknown' => 'Neznámý',
},
},
'FlashModel' => {
PrintConv => {
'None' => 'Žádná',
},
},
'FlashOn' => 'Blesk',
'FlashOptions' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'FlashOptions2' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'FlashType' => {
PrintConv => {
'None' => 'Žádná',
},
},
'FlashpixVersion' => 'Podporovaná verze Flashpix',
'FocalLength' => 'Ohnisková vzdálenost',
'FocalLength35efl' => 'Ohnisková vzdálenost',
'FocalLengthIn35mmFormat' => 'Přepočtená ohnisková vzdálenost (35mm)',
'FocalPlaneResolutionUnit' => {
Description => 'Jednotka rozlišení senzoru',
PrintConv => {
'None' => 'Žádná',
'inches' => 'palce',
'um' => 'µm (mikrometr)',
},
},
'FocalPlaneXResolution' => 'Horizontální rozlišení senzoru',
'FocalPlaneYResolution' => 'Vertikální rozlišení senzoru',
'Focus' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FocusContinuous' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FocusMode' => {
PrintConv => {
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Normal' => 'Normální',
},
},
'FocusMode2' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FocusModeSetting' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'FocusRange' => {
PrintConv => {
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Normal' => 'Normální',
},
},
'FocusTrackingLockOn' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'FreeByteCounts' => 'Počet volných bytů',
'FreeOffsets' => 'Volná datová pozice',
'GIFVersion' => 'GIF verze',
'GPSAltitude' => 'Nadmořská výška',
'GPSAltitudeRef' => {
Description => 'Nadmořská výška',
PrintConv => {
'Above Sea Level' => 'Nadmořská výška',
'Below Sea Level' => 'Nadmořská výška (záporná hodnota)',
},
},
'GPSAreaInformation' => 'Název GPS oblasti',
'GPSDOP' => 'Přesnost měření',
'GPSDateStamp' => 'GPS Datum',
'GPSDateTime' => 'GPS čas (atomový čas)',
'GPSDestBearing' => 'Azimut cíle',
'GPSDestBearingRef' => {
Description => 'Reference azimutu cíle.',
PrintConv => {
'Magnetic North' => 'Magnetický směr',
'True North' => 'Geografický směr',
},
},
'GPSDestDistance' => 'Vzdálenost k cíli',
'GPSDestDistanceRef' => {
Description => 'Reference vzdálenosti cíle',
PrintConv => {
'Kilometers' => 'Kilometry',
'Miles' => 'Míle',
'Nautical Miles' => 'Uzle',
},
},
'GPSDestLatitude' => 'Zeměpisná šířka cíle',
'GPSDestLatitudeRef' => {
Description => 'Reference pro zeměpisnou šířku cíle',
PrintConv => {
'North' => 'Severní šířka',
'South' => 'Jižní šířka',
},
},
'GPSDestLongitude' => 'Zeměpisná délka cíle',
'GPSDestLongitudeRef' => {
Description => 'Reference pro zeměpisnou délku cíle',
PrintConv => {
'East' => 'Východní délka',
'West' => 'Západní délka',
},
},
'GPSDifferential' => {
Description => 'GPS rozdílová korekce',
PrintConv => {
'Differential Corrected' => 'Započítaná rozdílová korekce',
'No Correction' => 'Měření bez korekce',
},
},
'GPSImgDirection' => 'Orientace obrázku',
'GPSImgDirectionRef' => {
Description => 'Reference k orientaci obrázku',
PrintConv => {
'Magnetic North' => 'Magnetický směr',
'True North' => 'Geografický směr',
},
},
'GPSLatitude' => 'Zeměpisná šířka',
'GPSLatitudeRef' => {
Description => 'Severní nebo Jižní šířka',
PrintConv => {
'North' => 'Severní šířka',
'South' => 'Jižní šířka',
},
},
'GPSLongitude' => 'Zeměpisná délka',
'GPSLongitudeRef' => {
Description => 'Východní nebo západní délka',
PrintConv => {
'East' => 'Východní délka',
'West' => 'Západní délka',
},
},
'GPSMapDatum' => 'Geodetická data',
'GPSMeasureMode' => {
Description => 'Mod GPS',
PrintConv => {
'2-D' => '2-dimenzionální měření',
'2-Dimensional' => '2-dimenzionální měření',
'2-Dimensional Measurement' => '2-dimenzionální měření',
'3-D' => '3-dimenzionální měření',
'3-Dimensional' => '3-dimenzionální měření',
'3-Dimensional Measurement' => '3-dimenzionální měření',
},
},
'GPSProcessingMethod' => 'Název procesní metody GPS',
'GPSSatellites' => 'GPS satelity využité při měření',
'GPSSpeed' => 'Rychlost GPS přijímače',
'GPSSpeedRef' => {
Description => 'Jednotka rychlosti',
PrintConv => {
'km/h' => 'Kilometry za hodinu',
'knots' => 'Uzle',
'mph' => 'Míle za hodinu',
},
},
'GPSStatus' => {
Description => 'Stav GPS přijímače',
PrintConv => {
'Measurement Active' => 'Probíhá měření',
},
},
'GPSTimeStamp' => 'GPS čas (atomový čas)',
'GPSTrack' => 'Směr pohybu',
'GPSTrackRef' => {
Description => 'Reference pro směr pohybu',
PrintConv => {
'Magnetic North' => 'Magnetický směr',
'True North' => 'Geografický směr',
},
},
'GPSVersionID' => 'Verze GPS TAGu',
'GainControl' => {
Description => 'Míra jasu',
PrintConv => {
'High gain down' => 'Silné zeslabení',
'High gain up' => 'Silné zesílení',
'Low gain down' => 'Slabé zeslabení',
'Low gain up' => 'Slabé zesílení',
'None' => 'Žádná',
},
},
'GrayResponseCurve' => 'Šedá referenční křivka',
'GrayResponseUnit' => {
Description => 'Jednotka odezvy šedé',
PrintConv => {
'0.0001' => 'Číslo udávající tisíce jednotek',
'0.001' => 'Číslo udávající stovky jednotek',
'0.1' => 'Číslo udávající desítky jednotek',
'1e-05' => 'Číslo udávající desetitisíce jednotek',
'1e-06' => 'Číslo udávající statisíce jednotek',
},
},
'HalftoneHints' => 'Půltóny',
'HighISONoiseReduction' => {
PrintConv => {
'Low' => 'Méně',
'Normal' => 'Normální',
},
},
'HostComputer' => 'Host',
'IPTC-NAA' => 'IPTC-NAA metadata',
'ISO' => 'Citlivost ISO',
'ISOSetting' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'ImageDescription' => 'Popis obrázku',
'ImageHeight' => 'Výška',
'ImageHistory' => 'Historie obrázku',
'ImageNumber' => 'Číslo obrázku',
'ImageOrientation' => {
PrintConv => {
'Portrait' => 'Portrét',
},
},
'ImageQuality' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'ImageSourceData' => 'Zdrojová data obrázku',
'ImageTone' => {
PrintConv => {
'Landscape' => 'Krajina',
'Portrait' => 'Portrét',
},
},
'ImageUniqueID' => 'Jedinečné ID obrázku',
'ImageWidth' => 'Šířka',
'InkNames' => 'Název náplně',
'InkSet' => 'Inkoustová sada',
'InternalFlash' => {
PrintConv => {
'Fired' => 'Blesk ano',
'Manual' => 'Manuální',
'No' => 'Blesk ne',
},
},
'InteropIndex' => {
Description => 'Identifikace',
PrintConv => {
'R03 - DCF option file (Adobe RGB)' => 'R03: DCF option file (Adobe RGB)',
'R98 - DCF basic file (sRGB)' => 'R98: DCF basic file (sRGB)',
'THM - DCF thumbnail file' => 'THM: DCF thumbnail file',
},
},
'InteropVersion' => 'Verze kompatibility',
'JFIFVersion' => 'JFIF verze',
'LicenseType' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'LightSource' => {
Description => 'Zdroj světla',
PrintConv => {
'Cloudy' => 'Zataženo',
'Cool White Fluorescent' => 'Studená zářivka',
'Day White Fluorescent' => 'Denní zářivka',
'Daylight' => 'Denní světlo',
'Daylight Fluorescent' => 'Denní světlo',
'Fine Weather' => 'Slunečno',
'Flash' => 'Blesk',
'Fluorescent' => 'Žárovka',
'ISO Studio Tungsten' => 'Studiová světla',
'Other' => 'jiné osvětlení',
'Shade' => 'Stíny',
'Standard Light A' => 'Standardní světlo A',
'Standard Light B' => 'Standardní světlo B',
'Standard Light C' => 'Standardní světlo C',
'Tungsten' => 'Zářivka',
'Unknown' => 'Neznámý',
'White Fluorescent' => 'Bílá zářivka',
},
},
'LoopStyle' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'LuminanceNoiseReduction' => {
PrintConv => {
'Low' => 'Méně',
},
},
'MIEVersion' => 'MIE verze',
'Macro' => {
PrintConv => {
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Normal' => 'Normální',
},
},
'MacroMode' => {
PrintConv => {
'Macro' => 'Makro',
'Normal' => 'Normální',
},
},
'Make' => 'Výrobce',
'ManualFlashOutput' => {
PrintConv => {
'Low' => 'Méně',
},
},
'MaxApertureValue' => 'Max clona objektivu',
'MaxSampleValue' => 'Max. hodnota vzorku',
'Metering' => {
PrintConv => {
'Spot' => 'Středový bod',
},
},
'MeteringMode' => {
Description => 'Režim měření expozice',
PrintConv => {
'Average' => 'Průměr',
'Center-weighted average' => 'Zvýrazněný střed',
'Multi-segment' => 'Multi segment',
'Other' => 'Jiné',
'Partial' => 'Blokové',
'Spot' => 'Středový bod',
'Unknown' => 'Neznámý',
},
},
'MeteringMode2' => {
PrintConv => {
'Multi-segment' => 'Multi segment',
},
},
'MeteringMode3' => {
PrintConv => {
'Multi-segment' => 'Multi segment',
},
},
'MinSampleValue' => 'Min. hodnota vzorku',
'MinoltaQuality' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'Model' => 'Typ fotoaparátu',
'Model2' => 'Typ zařízení (2)',
'ModifiedPictureStyle' => {
PrintConv => {
'Landscape' => 'Krajina',
'None' => 'Žádná',
'Portrait' => 'Portrét',
},
},
'ModifiedSharpnessFreq' => {
PrintConv => {
'Low' => 'Méně',
},
},
'ModifiedToneCurve' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'ModifiedWhiteBalance' => {
PrintConv => {
'Cloudy' => 'Zataženo',
'Daylight' => 'Denní světlo',
'Daylight Fluorescent' => 'Denní světlo',
'Flash' => 'Blesk',
'Fluorescent' => 'Žárovka',
'Shade' => 'Stíny',
'Tungsten' => 'Zářivka',
},
},
'ModifyDate' => 'Datum a čas změny souboru',
'MonochromeFilterEffect' => {
PrintConv => {
'None' => 'Žádná',
},
},
'MonochromeToningEffect' => {
PrintConv => {
'None' => 'Žádná',
},
},
'NEFCompression' => {
PrintConv => {
'Uncompressed' => 'bez komprese',
},
},
'Noise' => 'Šum',
'NoiseFilter' => {
PrintConv => {
'Low' => 'Méně',
},
},
'NoiseReduction' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'NumberofInks' => 'Číslo náplně',
'ObjectFileType' => {
PrintConv => {
'None' => 'Žádná',
'Unknown' => 'Neznámý',
},
},
'OldSubfileType' => 'Typ podsekce',
'Opto-ElectricConvFactor' => 'Optoel. konverzní faktor (OECF)',
'Orientation' => {
Description => 'Orientace',
PrintConv => {
'Horizontal (normal)' => '0° (nahoru/vlevo)',
'Mirror horizontal' => '0° (nahoru/vpravo)',
'Mirror horizontal and rotate 270 CW' => '90° po směru HR (vlevo/nahoru)',
'Mirror horizontal and rotate 90 CW' => '90° ptoti směru HR (vpravo/dolů)',
'Mirror vertical' => '180° (dolů/vlevo)',
'Rotate 180' => '180° (dolů/vpravo)',
'Rotate 270 CW' => '90° po směru HR (vlevo/dolů)',
'Rotate 90 CW' => '90° ptoti směru HR (vpravo/nahoru)',
},
},
'PEFVersion' => 'PEF verze',
'Padding' => 'Náhradní znaky',
'PageName' => 'Jméno stránky',
'PageNumber' => 'Číslo stránky',
'PhotoEffectsType' => {
PrintConv => {
'None' => 'Žádná',
},
},
'PhotometricInterpretation' => {
Description => 'Pixelové schéma',
PrintConv => {
'BlackIsZero' => 'Černá je nula',
'Color Filter Array' => 'CFA (Color Filter Matrix)',
'Pixar LogL' => 'CIE Log2(L) (Log luminance)',
'Pixar LogLuv' => 'CIE Log2(L)(u\',v\') (Log luminance and chrominance)',
'RGB Palette' => 'Barevné schema',
'Transparency Mask' => 'Průhlednost',
'WhiteIsZero' => 'Bílá je nula',
},
},
'PictureFinish' => {
PrintConv => {
'Portrait' => 'Portrét',
},
},
'PictureMode' => {
PrintConv => {
'Aperture-priority AE' => 'Priorita clony',
'Landscape' => 'Krajina',
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Portrait' => 'Portrét',
'Shutter speed priority AE' => 'Priorita času',
},
},
'PictureMode2' => {
PrintConv => {
'Aperture Priority' => 'Priorita clony',
'Manual' => 'Manuální',
'Shutter Speed Priority' => 'Priorita času',
},
},
'PictureStyle' => {
PrintConv => {
'Landscape' => 'Krajina',
'None' => 'Žádná',
'Portrait' => 'Portrét',
},
},
'PixelUnits' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'PlanarConfiguration' => {
Description => 'Uspořádání obrazových dat',
PrintConv => {
'Chunky' => 'Chunky Format (prokládaný)',
'Planar' => 'Planární (dvojrozměrný)',
},
},
'Predictor' => {
Description => 'Predikce',
PrintConv => {
'Horizontal differencing' => 'Horizontální diferenciace',
'None' => 'Bez predikce',
},
},
'PreviewColorSpace' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'PreviewImage' => 'Náhled',
'PreviewQuality' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'PrimaryChromaticities' => 'Chromatičnost primárních barev',
'ProgramLine' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'ProgramMode' => {
PrintConv => {
'None' => 'Žádná',
'Portrait' => 'Portrét',
},
},
'Quality' => {
PrintConv => {
'Low' => 'Méně',
'Normal' => 'Normální',
},
},
'QualityMode' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'RAFVersion' => 'RAF verze',
'Rating' => 'Hodnocení',
'RatingPercent' => 'Hodnocení v procentech',
'RawJpgQuality' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'RecordMode' => {
PrintConv => {
'Aperture Priority' => 'Priorita clony',
'Manual' => 'Manuální',
'Shutter Priority' => 'Priorita času',
},
},
'RecordingMode' => {
PrintConv => {
'Landscape' => 'Krajina',
'Manual' => 'Manuální',
'Portrait' => 'Portrét',
},
},
'ReferenceBlackWhite' => 'Černý a bílý referenční bod',
'RelatedImageFileFormat' => 'Obrazový formát',
'RelatedImageHeight' => 'Výška obrázku',
'RelatedImageWidth' => 'Šířka obrázku',
'RelatedSoundFile' => 'Audio soubor',
'ResampleParamsQuality' => {
PrintConv => {
'Low' => 'Méně',
},
},
'ResolutionUnit' => {
Description => 'Jednotka X a Y rozlišení',
PrintConv => {
'None' => 'Žádná',
'inches' => 'palce',
},
},
'Rotation' => {
PrintConv => {
'Horizontal' => '0° (nahoru/vlevo)',
'Horizontal (Normal)' => '0° (nahoru/vlevo)',
'Horizontal (normal)' => '0° (nahoru/vlevo)',
'Rotate 180' => '180° (dolů/vpravo)',
'Rotate 270 CW' => '90° po směru HR (vlevo/dolů)',
'Rotate 90 CW' => '90° ptoti směru HR (vpravo/nahoru)',
'Rotated 180' => '180° (dolů/vpravo)',
'Rotated 270 CW' => '90° po směru HR (vlevo/dolů)',
'Rotated 90 CW' => '90° ptoti směru HR (vpravo/nahoru)',
},
},
'RowsPerStrip' => 'Počet řádek v části',
'SPIFFVersion' => 'SPIFF verze',
'SVGVersion' => 'SVG verze',
'SampleFormat' => 'Formát vzorku',
'SamplesPerPixel' => 'Počet složek',
'Saturation' => {
Description => 'Saturace',
PrintConv => {
'High' => 'Vysoká',
'Low' => 'Nízká',
'Normal' => 'Normální',
},
},
'SceneCaptureType' => {
Description => 'Typ scény',
PrintConv => {
'Landscape' => 'Krajina',
'Night' => 'Noční foto',
'Portrait' => 'Portrét',
},
},
'SceneMode' => {
PrintConv => {
'Aperture Priority' => 'Priorita clony',
'Landscape' => 'Krajina',
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Normal' => 'Normální',
'Portrait' => 'Portrét',
'Shutter Priority' => 'Priorita času',
'Spot' => 'Středový bod',
},
},
'SceneModeUsed' => {
PrintConv => {
'Aperture Priority' => 'Priorita clony',
'Landscape' => 'Krajina',
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Portrait' => 'Portrét',
'Shutter Priority' => 'Priorita času',
},
},
'SceneSelect' => {
PrintConv => {
'Night' => 'Noční foto',
},
},
'SceneType' => {
Description => 'Typ scény',
PrintConv => {
'Directly photographed' => 'Přímo pořízený snímek',
},
},
'SecurityClassification' => {
Description => 'Bezpečnostní klasifikace',
PrintConv => {
'Confidential' => 'Důvěrný',
'Restricted' => 'Vyhrazený',
'Secret' => 'Tajný',
'Top Secret' => 'Velmi tajný',
'Unclassified' => 'Neurčeno',
},
},
'SelfTimerMode' => 'Samospoušť',
'SensingMethod' => {
Description => 'Metoda měření',
PrintConv => {
'Color sequential area' => 'Barevný sekvenční plošný sensor',
'Color sequential linear' => 'Barevný sekvenčné-lineární senzor',
'Monochrome area' => 'Monochromatický senzor',
'Monochrome linear' => 'Monochromatický lineární senzor',
'Not defined' => 'Nedefinovaný',
'One-chip color area' => 'Jednočipový barevný senzor',
'Three-chip color area' => 'Tříčipový barevný senzor',
'Trilinear' => 'Třílineární senzor',
'Two-chip color area' => 'Dvoučipový barevný senzor',
},
},
'SequentialShot' => {
PrintConv => {
'None' => 'Žádná',
},
},
'SetButtonCrossKeysFunc' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'Sharpness' => {
Description => 'Doostření',
PrintConv => {
'Hard' => 'Silné',
'Normal' => 'Normální',
'Soft' => 'Lehké',
},
},
'SharpnessFrequency' => {
PrintConv => {
'Low' => 'Méně',
},
},
'ShootingMode' => {
PrintConv => {
'Aperture Priority' => 'Priorita clony',
'Macro' => 'Makro',
'Manual' => 'Manuální',
'Normal' => 'Normální',
'Portrait' => 'Portrét',
'Shutter Priority' => 'Priorita času',
'Spot' => 'Středový bod',
},
},
'ShutterMode' => {
PrintConv => {
'Aperture Priority' => 'Priorita clony',
},
},
'ShutterSpeed' => 'Expoziční čas',
'ShutterSpeedValue' => 'Čas závěrky',
'SlowShutter' => {
PrintConv => {
'None' => 'Žádná',
},
},
'SpatialFrequencyResponse' => 'Spatial frequency response',
'SpecialEffectsOpticalFilter' => {
PrintConv => {
'None' => 'Žádná',
},
},
'SpectralSensitivity' => 'Spektrální citlivost',
'StripByteCounts' => 'Bytů na komprimovanou část',
'StripOffsets' => 'Pozice obrazových dat',
'SubSecCreateDate' => 'Datum a čas generování digitálních dat',
'SubSecDateTimeOriginal' => 'Datum a čas vzniku originálních dat',
'SubSecModifyDate' => 'Datum a čas změny souboru',
'SubSecTime' => 'DateTime 1/100 sekundy',
'SubSecTimeDigitized' => 'DateTimeDigitized 1/100 sekund',
'SubSecTimeOriginal' => 'DateTimeOriginal 1/100 sekund',
'SubfileType' => 'Nový typ podsekce',
'SubjectDistance' => 'Vzdálenost objektu',
'SubjectDistanceRange' => {
Description => 'Rozsah vzdálenosti objektu',
PrintConv => {
'Close' => 'Blízký',
'Distant' => 'Vzdálený',
'Macro' => 'Makro',
'Unknown' => 'Neznámý',
},
},
'SubjectLocation' => 'Pozice hlavního objektu',
'SubjectProgram' => {
PrintConv => {
'None' => 'Žádná',
'Portrait' => 'Portrét',
},
},
'Subsystem' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'T4Options' => 'Plné bity',
'T6Options' => 'Volby T6',
'TargetPrinter' => 'Cílová tiskárna',
'Teleconverter' => {
PrintConv => {
'None' => 'Žádná',
},
},
'Thresholding' => 'Práh',
'ThumbnailImage' => 'Náhled',
'TileByteCounts' => 'Počet bytů prvku',
'TileLength' => 'Délka prvku',
'TileOffsets' => 'Offset prvku',
'TileWidth' => 'Šířka prvku',
'TimeScaleParamsQuality' => {
PrintConv => {
'Low' => 'Méně',
},
},
'ToneCurve' => {
PrintConv => {
'Manual' => 'Manuální',
},
},
'ToningEffect' => {
PrintConv => {
'None' => 'Žádná',
},
},
'ToningEffectMonochrome' => {
PrintConv => {
'None' => 'Žádná',
},
},
'TransferFunction' => 'Transfer funkce',
'Transformation' => {
PrintConv => {
'Horizontal (normal)' => '0° (nahoru/vlevo)',
'Mirror horizontal' => '0° (nahoru/vpravo)',
'Mirror horizontal and rotate 270 CW' => '90° po směru HR (vlevo/nahoru)',
'Mirror horizontal and rotate 90 CW' => '90° ptoti směru HR (vpravo/dolů)',
'Mirror vertical' => '180° (dolů/vlevo)',
'Rotate 180' => '180° (dolů/vpravo)',
'Rotate 270 CW' => '90° po směru HR (vlevo/dolů)',
'Rotate 90 CW' => '90° ptoti směru HR (vpravo/nahoru)',
},
},
'Trapped' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'UserComment' => 'Komentář',
'UserDef1PictureStyle' => {
PrintConv => {
'Landscape' => 'Krajina',
'Portrait' => 'Portrét',
},
},
'UserDef2PictureStyle' => {
PrintConv => {
'Landscape' => 'Krajina',
'Portrait' => 'Portrét',
},
},
'UserDef3PictureStyle' => {
PrintConv => {
'Landscape' => 'Krajina',
'Portrait' => 'Portrét',
},
},
'VRDVersion' => 'VRD verze',
'Version' => 'Verze',
'VignetteControl' => {
PrintConv => {
'Normal' => 'Normální',
},
},
'WBAdjLighting' => {
PrintConv => {
'Daylight' => 'Denní světlo',
'Flash' => 'Blesk',
'None' => 'Žádná',
},
},
'WhiteBalance' => {
Description => 'Vyvážení bílé',
PrintConv => {
'Cloudy' => 'Zataženo',
'Day White Fluorescent' => 'Denní zářivka',
'Daylight' => 'Denní světlo',
'Daylight Fluorescent' => 'Denní světlo',
'DaylightFluorescent' => 'Denní světlo',
'Flash' => 'Blesk',
'Fluorescent' => 'Žárovka',
'Manual' => 'Manuální',
'Shade' => 'Stíny',
'Shadow' => 'Stíny',
'Tungsten' => 'Zářivka',
'Unknown' => 'Neznámý',
'White Fluorescent' => 'Bílá zářivka',
'WhiteFluorescent' => 'Bílá zářivka',
},
},
'WhiteBalanceAdj' => {
PrintConv => {
'Cloudy' => 'Zataženo',
'Daylight' => 'Denní světlo',
'Flash' => 'Blesk',
'Fluorescent' => 'Žárovka',
'Shade' => 'Stíny',
'Tungsten' => 'Zářivka',
},
},
'WhiteBalanceMode' => {
PrintConv => {
'Unknown' => 'Neznámý',
},
},
'WhiteBalanceSet' => {
PrintConv => {
'Cloudy' => 'Zataženo',
'Daylight' => 'Denní světlo',
'DaylightFluorescent' => 'Denní světlo',
'Flash' => 'Blesk',
'Manual' => 'Manuální',
'Shade' => 'Stíny',
'Tungsten' => 'Zářivka',
'WhiteFluorescent' => 'Bílá zářivka',
},
},
'WhitePoint' => 'Chromatičnost bílého bodu',
'XPAuthor' => 'Autor',
'XPComment' => 'Komentář',
'XPKeywords' => 'Klíčová slova',
'XPSubject' => 'Popis',
'XPTitle' => 'Název',
'XPosition' => 'X-pozice',
'XResolution' => 'Rozlišení obrázku na šířku',
'YCbCrCoefficients' => 'Koeficienty transformační YCbCr matrice',
'YCbCrPositioning' => {
Description => 'Y a C pozice',
PrintConv => {
'Centered' => 'Centrované',
'Co-sited' => 'Po stranách',
},
},
'YCbCrSubSampling' => 'Vzorkovací poměr Y k C',
'YPosition' => 'Y-pozice',
'YResolution' => 'Rozlišení obrázku na výšku',
);
1; # end
__END__
=head1 NAME
Image::ExifTool::Lang::cz.pm - ExifTool language-specific tag information
=head1 DESCRIPTION
This file is used by Image::ExifTool to generate localized tag descriptions
and values.
=head1 AUTHOR
Copyright 2003-2009, Phil Harvey (phil at owl.phy.queensu.ca)
This library is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
=head1 ACKNOWLEDGEMENTS
Thanks to Jens Duttke and Petr Michálek for providing this
translation.
=head1 SEE ALSO
L<Image::ExifTool(3pm)|Image::ExifTool>,
L<Image::ExifTool::TagInfoXML(3pm)|Image::ExifTool::TagInfoXML>
=cut
| opf-attic/ref | tools/fits/0.5.0/tools/exiftool/perl/lib/Image/ExifTool/Lang/cz.pm | Perl | apache-2.0 | 42,797 |
package #
Date::Manip::TZ::asthim00;
# Copyright (c) 2008-2014 Sullivan Beck. All rights reserved.
# This program is free software; you can redistribute it and/or modify it
# under the same terms as Perl itself.
# This file was automatically generated. Any changes to this file will
# be lost the next time 'tzdata' is run.
# Generated on: Fri Nov 21 10:41:41 EST 2014
# Data version: tzdata2014j
# Code version: tzcode2014j
# This module contains data from the zoneinfo time zone database. The original
# data was obtained from the URL:
# ftp://ftp.iana.org/tz
use strict;
use warnings;
require 5.010000;
our (%Dates,%LastRule);
END {
undef %Dates;
undef %LastRule;
}
our ($VERSION);
$VERSION='6.48';
END { undef $VERSION; }
%Dates = (
1 =>
[
[ [1,1,2,0,0,0],[1,1,2,5,58,36],'+05:58:36',[5,58,36],
'LMT',0,[1947,8,14,18,1,23],[1947,8,14,23,59,59],
'0001010200:00:00','0001010205:58:36','1947081418:01:23','1947081423:59:59' ],
],
1947 =>
[
[ [1947,8,14,18,1,24],[1947,8,14,23,31,24],'+05:30:00',[5,30,0],
'IST',0,[1987,9,30,18,29,59],[1987,9,30,23,59,59],
'1947081418:01:24','1947081423:31:24','1987093018:29:59','1987093023:59:59' ],
],
1987 =>
[
[ [1987,9,30,18,30,0],[1987,10,1,0,30,0],'+06:00:00',[6,0,0],
'BTT',0,[9999,12,31,0,0,0],[9999,12,31,6,0,0],
'1987093018:30:00','1987100100:30:00','9999123100:00:00','9999123106:00:00' ],
],
);
%LastRule = (
);
1;
| nriley/Pester | Source/Manip/TZ/asthim00.pm | Perl | bsd-2-clause | 1,531 |
#!/usr/bin/perl
require "/perfstat/build/serialize/create/ServiceConfig.pl";
#add create new service
$service = Service->new( serviceName => "conn",
);
#add metric 0
$obj = Metric->new( rrdIndex => 0,
metricName => "http",
friendlyName => "Web Server",
status => "nostatus",
servicePort => "80",
hasEvents => 1,
);
$service->addMetric($obj);
#print out this service
print ("Ref: ref($service)\n");
$serviceName = $service->getServiceName();
$lastUpdate = $service->getLastUpdate();
print ("serviceName: $serviceName\n");
print ("Last Update: $lastUpdate\n");
#print out this services metrics
$arrayLength = $service->getMetricArrayLength();
print ("metric Array Length = $arrayLength\n\n");
for ($counter=0; $counter < $arrayLength; $counter++)
{
$metricObject = $service->{metricArray}->[$counter];
$rrdIndex = $metricObject->getRRDIndex();
$metricName = $metricObject->getMetricName();
$friendlyName = $metricObject->getFriendlyName();
$status = $metricObject->getStatus();
$hasEvents = $metricObject->getHasEvents();
$servicePort = $metricObject->getServicePort();
print ("metricName: $metricName\n");
print ("friendlyName: $friendlyName\n");
print ("status: $status\n");
print ("hasEvents: $hasEvents\n");
print ("servicePort: $servicePort\n");
}
#Store the service
$service->store("$perfhome/etc/configs/SunOs/conn.port.ser") or die("can't store $service->conn.port.ser?\n");
| ktenzer/perfstat | misc/serialize/create/SunOs/120304/conn.port.pl | Perl | apache-2.0 | 1,440 |
=pod
=head1 NAME
SSL_SESSION_get0_id_context,
SSL_SESSION_set1_id_context
- get and set the SSL ID context associated with a session
=head1 SYNOPSIS
#include <openssl/ssl.h>
const unsigned char *SSL_SESSION_get0_id_context(const SSL_SESSION *s,
unsigned int *len);
int SSL_SESSION_set1_id_context(SSL_SESSION *s, const unsigned char *sid_ctx,
unsigned int sid_ctx_len);
=head1 DESCRIPTION
See L<SSL_CTX_set_session_id_context(3)> for further details on session ID
contexts.
SSL_SESSION_get0_id_context() returns the ID context associated with
the SSL/TLS session B<s>. The length of the ID context is written to
B<*len> if B<len> is not NULL.
The value returned is a pointer to an object maintained within B<s> and
should not be released.
SSL_SESSION_set1_id_context() takes a copy of the provided ID context given in
B<sid_ctx> and associates it with the session B<s>. The length of the ID context
is given by B<sid_ctx_len> which must not exceed SSL_MAX_SID_CTX_LENGTH bytes.
=head1 RETURN VALUES
SSL_SESSION_set1_id_context() returns 1 on success or 0 on error.
=head1 SEE ALSO
L<ssl(7)>,
L<SSL_set_session_id_context(3)>
=head1 HISTORY
The SSL_SESSION_get0_id_context() function was added in OpenSSL 1.1.0.
=head1 COPYRIGHT
Copyright 2015-2020 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/SSL_SESSION_get0_id_context.pod | Perl | bsd-3-clause | 1,653 |
#! /usr/bin/env perl
# Copyright 2012-2020 The OpenSSL Project Authors. All Rights Reserved.
#
# Licensed under the Apache License 2.0 (the "License"). You may not use
# this file except in compliance with the License. You can obtain a copy
# in the file LICENSE in the source distribution or at
# https://www.openssl.org/source/license.html
#
# ====================================================================
# Written by Andy Polyakov <appro@openssl.org> for the OpenSSL
# project. The module is, however, dual licensed under OpenSSL and
# CRYPTOGAMS licenses depending on where you obtain it. For further
# details see http://www.openssl.org/~appro/cryptogams/.
# ====================================================================
#
# [Endian-neutral] AES for C64x+.
#
# Even though SPLOOPs are scheduled for 13 cycles, and thus expected
# performance is ~8.5 cycles per byte processed with 128-bit key,
# measured performance turned to be ~10 cycles per byte. Discrepancy
# must be caused by limitations of L1D memory banking(*), see SPRU871
# TI publication for further details. If any consolation it's still
# ~20% faster than TI's linear assembly module anyway... Compared to
# aes_core.c compiled with cl6x 6.0 with -mv6400+ -o2 options this
# code is 3.75x faster and almost 3x smaller (tables included).
#
# (*) This means that there might be subtle correlation between data
# and timing and one can wonder if it can be ... attacked:-(
# On the other hand this also means that *if* one chooses to
# implement *4* T-tables variant [instead of 1 T-table as in
# this implementation, or in addition to], then one ought to
# *interleave* them. Even though it complicates addressing,
# references to interleaved tables would be guaranteed not to
# clash. I reckon that it should be possible to break 8 cycles
# per byte "barrier," i.e. improve by ~20%, naturally at the
# cost of 8x increased pressure on L1D. 8x because you'd have
# to interleave both Te and Td tables...
$output = pop and open STDOUT,">$output";
($TEA,$TEB)=("A5","B5");
($KPA,$KPB)=("A3","B1");
@K=("A6","B6","A7","B7");
@s=("A8","B8","A9","B9");
@Te0=@Td0=("A16","B16","A17","B17");
@Te1=@Td1=("A18","B18","A19","B19");
@Te2=@Td2=("A20","B20","A21","B21");
@Te3=@Td3=("A22","B22","A23","B23");
$code=<<___;
.text
.if .ASSEMBLER_VERSION<7000000
.asg 0,__TI_EABI__
.endif
.if __TI_EABI__
.nocmp
.asg AES_encrypt,_AES_encrypt
.asg AES_decrypt,_AES_decrypt
.asg AES_set_encrypt_key,_AES_set_encrypt_key
.asg AES_set_decrypt_key,_AES_set_decrypt_key
.asg AES_ctr32_encrypt,_AES_ctr32_encrypt
.endif
.asg B3,RA
.asg A4,INP
.asg B4,OUT
.asg A6,KEY
.asg A4,RET
.asg B15,SP
.eval 24,EXT0
.eval 16,EXT1
.eval 8,EXT2
.eval 0,EXT3
.eval 8,TBL1
.eval 16,TBL2
.eval 24,TBL3
.if .BIG_ENDIAN
.eval 24-EXT0,EXT0
.eval 24-EXT1,EXT1
.eval 24-EXT2,EXT2
.eval 24-EXT3,EXT3
.eval 32-TBL1,TBL1
.eval 32-TBL2,TBL2
.eval 32-TBL3,TBL3
.endif
.global _AES_encrypt
_AES_encrypt:
.asmfunc
MVK 1,B2
__encrypt:
.if __TI_EABI__
[B2] LDNDW *INP++,A9:A8 ; load input
|| MVKL \$PCR_OFFSET(AES_Te,__encrypt),$TEA
|| ADDKPC __encrypt,B0
[B2] LDNDW *INP++,B9:B8
|| MVKH \$PCR_OFFSET(AES_Te,__encrypt),$TEA
|| ADD 0,KEY,$KPA
|| ADD 4,KEY,$KPB
.else
[B2] LDNDW *INP++,A9:A8 ; load input
|| MVKL (AES_Te-__encrypt),$TEA
|| ADDKPC __encrypt,B0
[B2] LDNDW *INP++,B9:B8
|| MVKH (AES_Te-__encrypt),$TEA
|| ADD 0,KEY,$KPA
|| ADD 4,KEY,$KPB
.endif
LDW *$KPA++[2],$Te0[0] ; zero round key
|| LDW *$KPB++[2],$Te0[1]
|| MVK 60,A0
|| ADD B0,$TEA,$TEA ; AES_Te
LDW *KEY[A0],B0 ; rounds
|| MVK 1024,A0 ; sizeof(AES_Te)
LDW *$KPA++[2],$Te0[2]
|| LDW *$KPB++[2],$Te0[3]
|| MV $TEA,$TEB
NOP
.if .BIG_ENDIAN
MV A9,$s[0]
|| MV A8,$s[1]
|| MV B9,$s[2]
|| MV B8,$s[3]
.else
MV A8,$s[0]
|| MV A9,$s[1]
|| MV B8,$s[2]
|| MV B9,$s[3]
.endif
XOR $Te0[0],$s[0],$s[0]
|| XOR $Te0[1],$s[1],$s[1]
|| LDW *$KPA++[2],$K[0] ; 1st round key
|| LDW *$KPB++[2],$K[1]
SUB B0,2,B0
SPLOOPD 13
|| MVC B0,ILC
|| LDW *$KPA++[2],$K[2]
|| LDW *$KPB++[2],$K[3]
;;====================================================================
EXTU $s[1],EXT1,24,$Te1[1]
|| EXTU $s[0],EXT3,24,$Te3[0]
LDW *${TEB}[$Te1[1]],$Te1[1] ; Te1[s1>>8], t0
|| LDW *${TEA}[$Te3[0]],$Te3[0] ; Te3[s0>>24], t1
|| XOR $s[2],$Te0[2],$s[2] ; modulo-scheduled
|| XOR $s[3],$Te0[3],$s[3] ; modulo-scheduled
|| EXTU $s[1],EXT3,24,$Te3[1]
|| EXTU $s[0],EXT1,24,$Te1[0]
LDW *${TEB}[$Te3[1]],$Te3[1] ; Te3[s1>>24], t2
|| LDW *${TEA}[$Te1[0]],$Te1[0] ; Te1[s0>>8], t3
|| EXTU $s[2],EXT2,24,$Te2[2]
|| EXTU $s[3],EXT2,24,$Te2[3]
LDW *${TEA}[$Te2[2]],$Te2[2] ; Te2[s2>>16], t0
|| LDW *${TEB}[$Te2[3]],$Te2[3] ; Te2[s3>>16], t1
|| EXTU $s[3],EXT3,24,$Te3[3]
|| EXTU $s[2],EXT1,24,$Te1[2]
LDW *${TEB}[$Te3[3]],$Te3[3] ; Te3[s3>>24], t0
|| LDW *${TEA}[$Te1[2]],$Te1[2] ; Te1[s2>>8], t1
|| EXTU $s[0],EXT2,24,$Te2[0]
|| EXTU $s[1],EXT2,24,$Te2[1]
LDW *${TEA}[$Te2[0]],$Te2[0] ; Te2[s0>>16], t2
|| LDW *${TEB}[$Te2[1]],$Te2[1] ; Te2[s1>>16], t3
|| EXTU $s[3],EXT1,24,$Te1[3]
|| EXTU $s[2],EXT3,24,$Te3[2]
LDW *${TEB}[$Te1[3]],$Te1[3] ; Te1[s3>>8], t2
|| LDW *${TEA}[$Te3[2]],$Te3[2] ; Te3[s2>>24], t3
|| ROTL $Te1[1],TBL1,$Te3[0] ; t0
|| ROTL $Te3[0],TBL3,$Te1[1] ; t1
|| EXTU $s[0],EXT0,24,$Te0[0]
|| EXTU $s[1],EXT0,24,$Te0[1]
LDW *${TEA}[$Te0[0]],$Te0[0] ; Te0[s0], t0
|| LDW *${TEB}[$Te0[1]],$Te0[1] ; Te0[s1], t1
|| ROTL $Te3[1],TBL3,$Te1[0] ; t2
|| ROTL $Te1[0],TBL1,$Te3[1] ; t3
|| EXTU $s[2],EXT0,24,$Te0[2]
|| EXTU $s[3],EXT0,24,$Te0[3]
LDW *${TEA}[$Te0[2]],$Te0[2] ; Te0[s2], t2
|| LDW *${TEB}[$Te0[3]],$Te0[3] ; Te0[s3], t3
|| ROTL $Te2[2],TBL2,$Te2[2] ; t0
|| ROTL $Te2[3],TBL2,$Te2[3] ; t1
|| XOR $K[0],$Te3[0],$s[0]
|| XOR $K[1],$Te1[1],$s[1]
ROTL $Te3[3],TBL3,$Te1[2] ; t0
|| ROTL $Te1[2],TBL1,$Te3[3] ; t1
|| XOR $K[2],$Te1[0],$s[2]
|| XOR $K[3],$Te3[1],$s[3]
|| LDW *$KPA++[2],$K[0] ; next round key
|| LDW *$KPB++[2],$K[1]
ROTL $Te2[0],TBL2,$Te2[0] ; t2
|| ROTL $Te2[1],TBL2,$Te2[1] ; t3
|| XOR $s[0],$Te2[2],$s[0]
|| XOR $s[1],$Te2[3],$s[1]
|| LDW *$KPA++[2],$K[2]
|| LDW *$KPB++[2],$K[3]
ROTL $Te1[3],TBL1,$Te3[2] ; t2
|| ROTL $Te3[2],TBL3,$Te1[3] ; t3
|| XOR $s[0],$Te1[2],$s[0]
|| XOR $s[1],$Te3[3],$s[1]
XOR $s[2],$Te2[0],$s[2]
|| XOR $s[3],$Te2[1],$s[3]
|| XOR $s[0],$Te0[0],$s[0]
|| XOR $s[1],$Te0[1],$s[1]
SPKERNEL
|| XOR.L $s[2],$Te3[2],$s[2]
|| XOR.L $s[3],$Te1[3],$s[3]
;;====================================================================
ADD.D ${TEA},A0,${TEA} ; point to Te4
|| ADD.D ${TEB},A0,${TEB}
|| EXTU $s[1],EXT1,24,$Te1[1]
|| EXTU $s[0],EXT3,24,$Te3[0]
LDBU *${TEB}[$Te1[1]],$Te1[1] ; Te1[s1>>8], t0
|| LDBU *${TEA}[$Te3[0]],$Te3[0] ; Te3[s0>>24], t1
|| XOR $s[2],$Te0[2],$s[2] ; modulo-scheduled
|| XOR $s[3],$Te0[3],$s[3] ; modulo-scheduled
|| EXTU $s[0],EXT0,24,$Te0[0]
|| EXTU $s[1],EXT0,24,$Te0[1]
LDBU *${TEA}[$Te0[0]],$Te0[0] ; Te0[s0], t0
|| LDBU *${TEB}[$Te0[1]],$Te0[1] ; Te0[s1], t1
|| EXTU $s[3],EXT3,24,$Te3[3]
|| EXTU $s[2],EXT1,24,$Te1[2]
LDBU *${TEB}[$Te3[3]],$Te3[3] ; Te3[s3>>24], t0
|| LDBU *${TEA}[$Te1[2]],$Te1[2] ; Te1[s2>>8], t1
|| EXTU $s[2],EXT2,24,$Te2[2]
|| EXTU $s[3],EXT2,24,$Te2[3]
LDBU *${TEA}[$Te2[2]],$Te2[2] ; Te2[s2>>16], t0
|| LDBU *${TEB}[$Te2[3]],$Te2[3] ; Te2[s3>>16], t1
|| EXTU $s[1],EXT3,24,$Te3[1]
|| EXTU $s[0],EXT1,24,$Te1[0]
LDBU *${TEB}[$Te3[1]],$Te3[1] ; Te3[s1>>24], t2
|| LDBU *${TEA}[$Te1[0]],$Te1[0] ; Te1[s0>>8], t3
|| EXTU $s[3],EXT1,24,$Te1[3]
|| EXTU $s[2],EXT3,24,$Te3[2]
LDBU *${TEB}[$Te1[3]],$Te1[3] ; Te1[s3>>8], t2
|| LDBU *${TEA}[$Te3[2]],$Te3[2] ; Te3[s2>>24], t3
|| EXTU $s[2],EXT0,24,$Te0[2]
|| EXTU $s[3],EXT0,24,$Te0[3]
LDBU *${TEA}[$Te0[2]],$Te0[2] ; Te0[s2], t2
|| LDBU *${TEB}[$Te0[3]],$Te0[3] ; Te0[s3], t3
|| EXTU $s[0],EXT2,24,$Te2[0]
|| EXTU $s[1],EXT2,24,$Te2[1]
LDBU *${TEA}[$Te2[0]],$Te2[0] ; Te2[s0>>16], t2
|| LDBU *${TEB}[$Te2[1]],$Te2[1] ; Te2[s1>>16], t3
.if .BIG_ENDIAN
PACK2 $Te0[0],$Te1[1],$Te0[0]
|| PACK2 $Te0[1],$Te1[2],$Te0[1]
PACK2 $Te2[2],$Te3[3],$Te2[2]
|| PACK2 $Te2[3],$Te3[0],$Te2[3]
PACKL4 $Te0[0],$Te2[2],$Te0[0]
|| PACKL4 $Te0[1],$Te2[3],$Te0[1]
XOR $K[0],$Te0[0],$Te0[0] ; s[0]
|| XOR $K[1],$Te0[1],$Te0[1] ; s[1]
PACK2 $Te0[2],$Te1[3],$Te0[2]
|| PACK2 $Te0[3],$Te1[0],$Te0[3]
PACK2 $Te2[0],$Te3[1],$Te2[0]
|| PACK2 $Te2[1],$Te3[2],$Te2[1]
|| BNOP RA
PACKL4 $Te0[2],$Te2[0],$Te0[2]
|| PACKL4 $Te0[3],$Te2[1],$Te0[3]
XOR $K[2],$Te0[2],$Te0[2] ; s[2]
|| XOR $K[3],$Te0[3],$Te0[3] ; s[3]
MV $Te0[0],A9
|| MV $Te0[1],A8
MV $Te0[2],B9
|| MV $Te0[3],B8
|| [B2] STNDW A9:A8,*OUT++
[B2] STNDW B9:B8,*OUT++
.else
PACK2 $Te1[1],$Te0[0],$Te1[1]
|| PACK2 $Te1[2],$Te0[1],$Te1[2]
PACK2 $Te3[3],$Te2[2],$Te3[3]
|| PACK2 $Te3[0],$Te2[3],$Te3[0]
PACKL4 $Te3[3],$Te1[1],$Te1[1]
|| PACKL4 $Te3[0],$Te1[2],$Te1[2]
XOR $K[0],$Te1[1],$Te1[1] ; s[0]
|| XOR $K[1],$Te1[2],$Te1[2] ; s[1]
PACK2 $Te1[3],$Te0[2],$Te1[3]
|| PACK2 $Te1[0],$Te0[3],$Te1[0]
PACK2 $Te3[1],$Te2[0],$Te3[1]
|| PACK2 $Te3[2],$Te2[1],$Te3[2]
|| BNOP RA
PACKL4 $Te3[1],$Te1[3],$Te1[3]
|| PACKL4 $Te3[2],$Te1[0],$Te1[0]
XOR $K[2],$Te1[3],$Te1[3] ; s[2]
|| XOR $K[3],$Te1[0],$Te1[0] ; s[3]
MV $Te1[1],A8
|| MV $Te1[2],A9
MV $Te1[3],B8
|| MV $Te1[0],B9
|| [B2] STNDW A9:A8,*OUT++
[B2] STNDW B9:B8,*OUT++
.endif
.endasmfunc
.global _AES_decrypt
_AES_decrypt:
.asmfunc
MVK 1,B2
__decrypt:
.if __TI_EABI__
[B2] LDNDW *INP++,A9:A8 ; load input
|| MVKL \$PCR_OFFSET(AES_Td,__decrypt),$TEA
|| ADDKPC __decrypt,B0
[B2] LDNDW *INP++,B9:B8
|| MVKH \$PCR_OFFSET(AES_Td,__decrypt),$TEA
|| ADD 0,KEY,$KPA
|| ADD 4,KEY,$KPB
.else
[B2] LDNDW *INP++,A9:A8 ; load input
|| MVKL (AES_Td-__decrypt),$TEA
|| ADDKPC __decrypt,B0
[B2] LDNDW *INP++,B9:B8
|| MVKH (AES_Td-__decrypt),$TEA
|| ADD 0,KEY,$KPA
|| ADD 4,KEY,$KPB
.endif
LDW *$KPA++[2],$Td0[0] ; zero round key
|| LDW *$KPB++[2],$Td0[1]
|| MVK 60,A0
|| ADD B0,$TEA,$TEA ; AES_Td
LDW *KEY[A0],B0 ; rounds
|| MVK 1024,A0 ; sizeof(AES_Td)
LDW *$KPA++[2],$Td0[2]
|| LDW *$KPB++[2],$Td0[3]
|| MV $TEA,$TEB
NOP
.if .BIG_ENDIAN
MV A9,$s[0]
|| MV A8,$s[1]
|| MV B9,$s[2]
|| MV B8,$s[3]
.else
MV A8,$s[0]
|| MV A9,$s[1]
|| MV B8,$s[2]
|| MV B9,$s[3]
.endif
XOR $Td0[0],$s[0],$s[0]
|| XOR $Td0[1],$s[1],$s[1]
|| LDW *$KPA++[2],$K[0] ; 1st round key
|| LDW *$KPB++[2],$K[1]
SUB B0,2,B0
SPLOOPD 13
|| MVC B0,ILC
|| LDW *$KPA++[2],$K[2]
|| LDW *$KPB++[2],$K[3]
;;====================================================================
EXTU $s[1],EXT3,24,$Td3[1]
|| EXTU $s[0],EXT1,24,$Td1[0]
LDW *${TEB}[$Td3[1]],$Td3[1] ; Td3[s1>>24], t0
|| LDW *${TEA}[$Td1[0]],$Td1[0] ; Td1[s0>>8], t1
|| XOR $s[2],$Td0[2],$s[2] ; modulo-scheduled
|| XOR $s[3],$Td0[3],$s[3] ; modulo-scheduled
|| EXTU $s[1],EXT1,24,$Td1[1]
|| EXTU $s[0],EXT3,24,$Td3[0]
LDW *${TEB}[$Td1[1]],$Td1[1] ; Td1[s1>>8], t2
|| LDW *${TEA}[$Td3[0]],$Td3[0] ; Td3[s0>>24], t3
|| EXTU $s[2],EXT2,24,$Td2[2]
|| EXTU $s[3],EXT2,24,$Td2[3]
LDW *${TEA}[$Td2[2]],$Td2[2] ; Td2[s2>>16], t0
|| LDW *${TEB}[$Td2[3]],$Td2[3] ; Td2[s3>>16], t1
|| EXTU $s[3],EXT1,24,$Td1[3]
|| EXTU $s[2],EXT3,24,$Td3[2]
LDW *${TEB}[$Td1[3]],$Td1[3] ; Td1[s3>>8], t0
|| LDW *${TEA}[$Td3[2]],$Td3[2] ; Td3[s2>>24], t1
|| EXTU $s[0],EXT2,24,$Td2[0]
|| EXTU $s[1],EXT2,24,$Td2[1]
LDW *${TEA}[$Td2[0]],$Td2[0] ; Td2[s0>>16], t2
|| LDW *${TEB}[$Td2[1]],$Td2[1] ; Td2[s1>>16], t3
|| EXTU $s[3],EXT3,24,$Td3[3]
|| EXTU $s[2],EXT1,24,$Td1[2]
LDW *${TEB}[$Td3[3]],$Td3[3] ; Td3[s3>>24], t2
|| LDW *${TEA}[$Td1[2]],$Td1[2] ; Td1[s2>>8], t3
|| ROTL $Td3[1],TBL3,$Td1[0] ; t0
|| ROTL $Td1[0],TBL1,$Td3[1] ; t1
|| EXTU $s[0],EXT0,24,$Td0[0]
|| EXTU $s[1],EXT0,24,$Td0[1]
LDW *${TEA}[$Td0[0]],$Td0[0] ; Td0[s0], t0
|| LDW *${TEB}[$Td0[1]],$Td0[1] ; Td0[s1], t1
|| ROTL $Td1[1],TBL1,$Td3[0] ; t2
|| ROTL $Td3[0],TBL3,$Td1[1] ; t3
|| EXTU $s[2],EXT0,24,$Td0[2]
|| EXTU $s[3],EXT0,24,$Td0[3]
LDW *${TEA}[$Td0[2]],$Td0[2] ; Td0[s2], t2
|| LDW *${TEB}[$Td0[3]],$Td0[3] ; Td0[s3], t3
|| ROTL $Td2[2],TBL2,$Td2[2] ; t0
|| ROTL $Td2[3],TBL2,$Td2[3] ; t1
|| XOR $K[0],$Td1[0],$s[0]
|| XOR $K[1],$Td3[1],$s[1]
ROTL $Td1[3],TBL1,$Td3[2] ; t0
|| ROTL $Td3[2],TBL3,$Td1[3] ; t1
|| XOR $K[2],$Td3[0],$s[2]
|| XOR $K[3],$Td1[1],$s[3]
|| LDW *$KPA++[2],$K[0] ; next round key
|| LDW *$KPB++[2],$K[1]
ROTL $Td2[0],TBL2,$Td2[0] ; t2
|| ROTL $Td2[1],TBL2,$Td2[1] ; t3
|| XOR $s[0],$Td2[2],$s[0]
|| XOR $s[1],$Td2[3],$s[1]
|| LDW *$KPA++[2],$K[2]
|| LDW *$KPB++[2],$K[3]
ROTL $Td3[3],TBL3,$Td1[2] ; t2
|| ROTL $Td1[2],TBL1,$Td3[3] ; t3
|| XOR $s[0],$Td3[2],$s[0]
|| XOR $s[1],$Td1[3],$s[1]
XOR $s[2],$Td2[0],$s[2]
|| XOR $s[3],$Td2[1],$s[3]
|| XOR $s[0],$Td0[0],$s[0]
|| XOR $s[1],$Td0[1],$s[1]
SPKERNEL
|| XOR.L $s[2],$Td1[2],$s[2]
|| XOR.L $s[3],$Td3[3],$s[3]
;;====================================================================
ADD.D ${TEA},A0,${TEA} ; point to Td4
|| ADD.D ${TEB},A0,${TEB}
|| EXTU $s[1],EXT3,24,$Td3[1]
|| EXTU $s[0],EXT1,24,$Td1[0]
LDBU *${TEB}[$Td3[1]],$Td3[1] ; Td3[s1>>24], t0
|| LDBU *${TEA}[$Td1[0]],$Td1[0] ; Td1[s0>>8], t1
|| XOR $s[2],$Td0[2],$s[2] ; modulo-scheduled
|| XOR $s[3],$Td0[3],$s[3] ; modulo-scheduled
|| EXTU $s[0],EXT0,24,$Td0[0]
|| EXTU $s[1],EXT0,24,$Td0[1]
LDBU *${TEA}[$Td0[0]],$Td0[0] ; Td0[s0], t0
|| LDBU *${TEB}[$Td0[1]],$Td0[1] ; Td0[s1], t1
|| EXTU $s[2],EXT2,24,$Td2[2]
|| EXTU $s[3],EXT2,24,$Td2[3]
LDBU *${TEA}[$Td2[2]],$Td2[2] ; Td2[s2>>16], t0
|| LDBU *${TEB}[$Td2[3]],$Td2[3] ; Td2[s3>>16], t1
|| EXTU $s[3],EXT1,24,$Td1[3]
|| EXTU $s[2],EXT3,24,$Td3[2]
LDBU *${TEB}[$Td1[3]],$Td1[3] ; Td1[s3>>8], t0
|| LDBU *${TEA}[$Td3[2]],$Td3[2] ; Td3[s2>>24], t1
|| EXTU $s[1],EXT1,24,$Td1[1]
|| EXTU $s[0],EXT3,24,$Td3[0]
LDBU *${TEB}[$Td1[1]],$Td1[1] ; Td1[s1>>8], t2
|| LDBU *${TEA}[$Td3[0]],$Td3[0] ; Td3[s0>>24], t3
|| EXTU $s[0],EXT2,24,$Td2[0]
|| EXTU $s[1],EXT2,24,$Td2[1]
LDBU *${TEA}[$Td2[0]],$Td2[0] ; Td2[s0>>16], t2
|| LDBU *${TEB}[$Td2[1]],$Td2[1] ; Td2[s1>>16], t3
|| EXTU $s[3],EXT3,24,$Td3[3]
|| EXTU $s[2],EXT1,24,$Td1[2]
LDBU *${TEB}[$Td3[3]],$Td3[3] ; Td3[s3>>24], t2
|| LDBU *${TEA}[$Td1[2]],$Td1[2] ; Td1[s2>>8], t3
|| EXTU $s[2],EXT0,24,$Td0[2]
|| EXTU $s[3],EXT0,24,$Td0[3]
LDBU *${TEA}[$Td0[2]],$Td0[2] ; Td0[s2], t2
|| LDBU *${TEB}[$Td0[3]],$Td0[3] ; Td0[s3], t3
.if .BIG_ENDIAN
PACK2 $Td0[0],$Td1[3],$Td0[0]
|| PACK2 $Td0[1],$Td1[0],$Td0[1]
PACK2 $Td2[2],$Td3[1],$Td2[2]
|| PACK2 $Td2[3],$Td3[2],$Td2[3]
PACKL4 $Td0[0],$Td2[2],$Td0[0]
|| PACKL4 $Td0[1],$Td2[3],$Td0[1]
XOR $K[0],$Td0[0],$Td0[0] ; s[0]
|| XOR $K[1],$Td0[1],$Td0[1] ; s[1]
PACK2 $Td0[2],$Td1[1],$Td0[2]
|| PACK2 $Td0[3],$Td1[2],$Td0[3]
PACK2 $Td2[0],$Td3[3],$Td2[0]
|| PACK2 $Td2[1],$Td3[0],$Td2[1]
|| BNOP RA
PACKL4 $Td0[2],$Td2[0],$Td0[2]
|| PACKL4 $Td0[3],$Td2[1],$Td0[3]
XOR $K[2],$Td0[2],$Td0[2] ; s[2]
|| XOR $K[3],$Td0[3],$Td0[3] ; s[3]
MV $Td0[0],A9
|| MV $Td0[1],A8
MV $Td0[2],B9
|| MV $Td0[3],B8
|| [B2] STNDW A9:A8,*OUT++
[B2] STNDW B9:B8,*OUT++
.else
PACK2 $Td1[3],$Td0[0],$Td1[3]
|| PACK2 $Td1[0],$Td0[1],$Td1[0]
PACK2 $Td3[1],$Td2[2],$Td3[1]
|| PACK2 $Td3[2],$Td2[3],$Td3[2]
PACKL4 $Td3[1],$Td1[3],$Td1[3]
|| PACKL4 $Td3[2],$Td1[0],$Td1[0]
XOR $K[0],$Td1[3],$Td1[3] ; s[0]
|| XOR $K[1],$Td1[0],$Td1[0] ; s[1]
PACK2 $Td1[1],$Td0[2],$Td1[1]
|| PACK2 $Td1[2],$Td0[3],$Td1[2]
PACK2 $Td3[3],$Td2[0],$Td3[3]
|| PACK2 $Td3[0],$Td2[1],$Td3[0]
|| BNOP RA
PACKL4 $Td3[3],$Td1[1],$Td1[1]
|| PACKL4 $Td3[0],$Td1[2],$Td1[2]
XOR $K[2],$Td1[1],$Td1[1] ; s[2]
|| XOR $K[3],$Td1[2],$Td1[2] ; s[3]
MV $Td1[3],A8
|| MV $Td1[0],A9
MV $Td1[1],B8
|| MV $Td1[2],B9
|| [B2] STNDW A9:A8,*OUT++
[B2] STNDW B9:B8,*OUT++
.endif
.endasmfunc
___
{
my @K=(@K,@s); # extended key
my @Te4=map("B$_",(16..19));
my @Kx9=@Te0; # used in AES_set_decrypt_key
my @KxB=@Te1;
my @KxD=@Te2;
my @KxE=@Te3;
$code.=<<___;
.asg OUT,BITS
.global _AES_set_encrypt_key
_AES_set_encrypt_key:
__set_encrypt_key:
.asmfunc
MV INP,A0
|| SHRU BITS,5,BITS ; 128-192-256 -> 4-6-8
|| MV KEY,A1
[!A0] B RA
||[!A0] MVK -1,RET
||[!A0] MVK 1,A1 ; only one B RA
[!A1] B RA
||[!A1] MVK -1,RET
||[!A1] MVK 0,A0
|| MVK 0,B0
|| MVK 0,A1
[A0] LDNDW *INP++,A9:A8
|| [A0] CMPEQ 4,BITS,B0
|| [A0] CMPLT 3,BITS,A1
[B0] B key128?
|| [A1] LDNDW *INP++,B9:B8
|| [A0] CMPEQ 6,BITS,B0
|| [A0] CMPLT 5,BITS,A1
[B0] B key192?
|| [A1] LDNDW *INP++,B17:B16
|| [A0] CMPEQ 8,BITS,B0
|| [A0] CMPLT 7,BITS,A1
[B0] B key256?
|| [A1] LDNDW *INP++,B19:B18
.if __TI_EABI__
[A0] ADD 0,KEY,$KPA
|| [A0] ADD 4,KEY,$KPB
|| [A0] MVKL \$PCR_OFFSET(AES_Te4,__set_encrypt_key),$TEA
|| [A0] ADDKPC __set_encrypt_key,B6
[A0] MVKH \$PCR_OFFSET(AES_Te4,__set_encrypt_key),$TEA
[A0] ADD B6,$TEA,$TEA ; AES_Te4
.else
[A0] ADD 0,KEY,$KPA
|| [A0] ADD 4,KEY,$KPB
|| [A0] MVKL (AES_Te4-__set_encrypt_key),$TEA
|| [A0] ADDKPC __set_encrypt_key,B6
[A0] MVKH (AES_Te4-__set_encrypt_key),$TEA
[A0] ADD B6,$TEA,$TEA ; AES_Te4
.endif
NOP
NOP
BNOP RA,5
|| MVK -2,RET ; unknown bit length
|| MVK 0,B0 ; redundant
;;====================================================================
;;====================================================================
key128?:
.if .BIG_ENDIAN
MV A9,$K[0]
|| MV A8,$K[1]
|| MV B9,$Te4[2]
|| MV B8,$K[3]
.else
MV A8,$K[0]
|| MV A9,$K[1]
|| MV B8,$Te4[2]
|| MV B9,$K[3]
.endif
MVK 256,A0
|| MVK 9,B0
SPLOOPD 14
|| MVC B0,ILC
|| MV $TEA,$TEB
|| ADD $TEA,A0,A30 ; rcon
;;====================================================================
LDW *A30++[1],A31 ; rcon[i]
|| MV $Te4[2],$K[2]
|| EXTU $K[3],EXT1,24,$Te4[0]
LDBU *${TEB}[$Te4[0]],$Te4[0]
|| MV $K[3],A0
|| EXTU $K[3],EXT2,24,$Te4[1]
LDBU *${TEB}[$Te4[1]],$Te4[1]
|| EXTU A0,EXT3,24,A0
|| EXTU $K[3],EXT0,24,$Te4[3]
.if .BIG_ENDIAN
LDBU *${TEA}[A0],$Te4[3]
|| LDBU *${TEB}[$Te4[3]],A0
.else
LDBU *${TEA}[A0],A0
|| LDBU *${TEB}[$Te4[3]],$Te4[3]
.endif
STW $K[0],*$KPA++[2]
|| STW $K[1],*$KPB++[2]
STW $K[2],*$KPA++[2]
|| STW $K[3],*$KPB++[2]
XOR A31,$K[0],$K[0] ; ^=rcon[i]
.if .BIG_ENDIAN
PACK2 $Te4[0],$Te4[1],$Te4[1]
PACK2 $Te4[3],A0,$Te4[3]
PACKL4 $Te4[1],$Te4[3],$Te4[3]
.else
PACK2 $Te4[1],$Te4[0],$Te4[1]
PACK2 $Te4[3],A0,$Te4[3]
PACKL4 $Te4[3],$Te4[1],$Te4[3]
.endif
XOR $Te4[3],$K[0],$Te4[0] ; K[0]
XOR $Te4[0],$K[1],$K[1] ; K[1]
MV $Te4[0],$K[0]
|| XOR $K[1],$K[2],$Te4[2] ; K[2]
XOR $Te4[2],$K[3],$K[3] ; K[3]
SPKERNEL
;;====================================================================
BNOP RA
MV $Te4[2],$K[2]
|| STW $K[0],*$KPA++[2]
|| STW $K[1],*$KPB++[2]
STW $K[2],*$KPA++[2]
|| STW $K[3],*$KPB++[2]
MVK 10,B0 ; rounds
STW B0,*++${KPB}[15]
MVK 0,RET
;;====================================================================
;;====================================================================
key192?:
.if .BIG_ENDIAN
MV A9,$K[0]
|| MV A8,$K[1]
|| MV B9,$K[2]
|| MV B8,$K[3]
MV B17,$Te4[2]
|| MV B16,$K[5]
.else
MV A8,$K[0]
|| MV A9,$K[1]
|| MV B8,$K[2]
|| MV B9,$K[3]
MV B16,$Te4[2]
|| MV B17,$K[5]
.endif
MVK 256,A0
|| MVK 6,B0
MV $TEA,$TEB
|| ADD $TEA,A0,A30 ; rcon
;;====================================================================
loop192?:
LDW *A30++[1],A31 ; rcon[i]
|| MV $Te4[2],$K[4]
|| EXTU $K[5],EXT1,24,$Te4[0]
LDBU *${TEB}[$Te4[0]],$Te4[0]
|| MV $K[5],A0
|| EXTU $K[5],EXT2,24,$Te4[1]
LDBU *${TEB}[$Te4[1]],$Te4[1]
|| EXTU A0,EXT3,24,A0
|| EXTU $K[5],EXT0,24,$Te4[3]
.if .BIG_ENDIAN
LDBU *${TEA}[A0],$Te4[3]
|| LDBU *${TEB}[$Te4[3]],A0
.else
LDBU *${TEA}[A0],A0
|| LDBU *${TEB}[$Te4[3]],$Te4[3]
.endif
STW $K[0],*$KPA++[2]
|| STW $K[1],*$KPB++[2]
STW $K[2],*$KPA++[2]
|| STW $K[3],*$KPB++[2]
STW $K[4],*$KPA++[2]
|| STW $K[5],*$KPB++[2]
XOR A31,$K[0],$K[0] ; ^=rcon[i]
.if .BIG_ENDIAN
PACK2 $Te4[0],$Te4[1],$Te4[1]
|| PACK2 $Te4[3],A0,$Te4[3]
PACKL4 $Te4[1],$Te4[3],$Te4[3]
.else
PACK2 $Te4[1],$Te4[0],$Te4[1]
|| PACK2 $Te4[3],A0,$Te4[3]
PACKL4 $Te4[3],$Te4[1],$Te4[3]
.endif
BDEC loop192?,B0
|| XOR $Te4[3],$K[0],$Te4[0] ; K[0]
XOR $Te4[0],$K[1],$K[1] ; K[1]
MV $Te4[0],$K[0]
|| XOR $K[1],$K[2],$Te4[2] ; K[2]
XOR $Te4[2],$K[3],$K[3] ; K[3]
MV $Te4[2],$K[2]
|| XOR $K[3],$K[4],$Te4[2] ; K[4]
XOR $Te4[2],$K[5],$K[5] ; K[5]
;;====================================================================
BNOP RA
STW $K[0],*$KPA++[2]
|| STW $K[1],*$KPB++[2]
STW $K[2],*$KPA++[2]
|| STW $K[3],*$KPB++[2]
MVK 12,B0 ; rounds
STW B0,*++${KPB}[7]
MVK 0,RET
;;====================================================================
;;====================================================================
key256?:
.if .BIG_ENDIAN
MV A9,$K[0]
|| MV A8,$K[1]
|| MV B9,$K[2]
|| MV B8,$K[3]
MV B17,$K[4]
|| MV B16,$K[5]
|| MV B19,$Te4[2]
|| MV B18,$K[7]
.else
MV A8,$K[0]
|| MV A9,$K[1]
|| MV B8,$K[2]
|| MV B9,$K[3]
MV B16,$K[4]
|| MV B17,$K[5]
|| MV B18,$Te4[2]
|| MV B19,$K[7]
.endif
MVK 256,A0
|| MVK 6,B0
MV $TEA,$TEB
|| ADD $TEA,A0,A30 ; rcon
;;====================================================================
loop256?:
LDW *A30++[1],A31 ; rcon[i]
|| MV $Te4[2],$K[6]
|| EXTU $K[7],EXT1,24,$Te4[0]
LDBU *${TEB}[$Te4[0]],$Te4[0]
|| MV $K[7],A0
|| EXTU $K[7],EXT2,24,$Te4[1]
LDBU *${TEB}[$Te4[1]],$Te4[1]
|| EXTU A0,EXT3,24,A0
|| EXTU $K[7],EXT0,24,$Te4[3]
.if .BIG_ENDIAN
LDBU *${TEA}[A0],$Te4[3]
|| LDBU *${TEB}[$Te4[3]],A0
.else
LDBU *${TEA}[A0],A0
|| LDBU *${TEB}[$Te4[3]],$Te4[3]
.endif
STW $K[0],*$KPA++[2]
|| STW $K[1],*$KPB++[2]
STW $K[2],*$KPA++[2]
|| STW $K[3],*$KPB++[2]
STW $K[4],*$KPA++[2]
|| STW $K[5],*$KPB++[2]
STW $K[6],*$KPA++[2]
|| STW $K[7],*$KPB++[2]
|| XOR A31,$K[0],$K[0] ; ^=rcon[i]
.if .BIG_ENDIAN
PACK2 $Te4[0],$Te4[1],$Te4[1]
|| PACK2 $Te4[3],A0,$Te4[3]
PACKL4 $Te4[1],$Te4[3],$Te4[3]
||[!B0] B done256?
.else
PACK2 $Te4[1],$Te4[0],$Te4[1]
|| PACK2 $Te4[3],A0,$Te4[3]
PACKL4 $Te4[3],$Te4[1],$Te4[3]
||[!B0] B done256?
.endif
XOR $Te4[3],$K[0],$Te4[0] ; K[0]
XOR $Te4[0],$K[1],$K[1] ; K[1]
MV $Te4[0],$K[0]
|| XOR $K[1],$K[2],$Te4[2] ; K[2]
XOR $Te4[2],$K[3],$K[3] ; K[3]
MV $Te4[2],$K[2]
|| [B0] EXTU $K[3],EXT0,24,$Te4[0]
|| [B0] SUB B0,1,B0
LDBU *${TEB}[$Te4[0]],$Te4[0]
|| MV $K[3],A0
|| EXTU $K[3],EXT1,24,$Te4[1]
LDBU *${TEB}[$Te4[1]],$Te4[1]
|| EXTU A0,EXT2,24,A0
|| EXTU $K[3],EXT3,24,$Te4[3]
.if .BIG_ENDIAN
LDBU *${TEA}[A0],$Te4[3]
|| LDBU *${TEB}[$Te4[3]],A0
NOP 3
PACK2 $Te4[0],$Te4[1],$Te4[1]
PACK2 $Te4[3],A0,$Te4[3]
|| B loop256?
PACKL4 $Te4[1],$Te4[3],$Te4[3]
.else
LDBU *${TEA}[A0],A0
|| LDBU *${TEB}[$Te4[3]],$Te4[3]
NOP 3
PACK2 $Te4[1],$Te4[0],$Te4[1]
PACK2 $Te4[3],A0,$Te4[3]
|| B loop256?
PACKL4 $Te4[3],$Te4[1],$Te4[3]
.endif
XOR $Te4[3],$K[4],$Te4[0] ; K[4]
XOR $Te4[0],$K[5],$K[5] ; K[5]
MV $Te4[0],$K[4]
|| XOR $K[5],$K[6],$Te4[2] ; K[6]
XOR $Te4[2],$K[7],$K[7] ; K[7]
;;====================================================================
done256?:
BNOP RA
STW $K[0],*$KPA++[2]
|| STW $K[1],*$KPB++[2]
STW $K[2],*$KPA++[2]
|| STW $K[3],*$KPB++[2]
MVK 14,B0 ; rounds
STW B0,*--${KPB}[1]
MVK 0,RET
.endasmfunc
.global _AES_set_decrypt_key
_AES_set_decrypt_key:
.asmfunc
B __set_encrypt_key ; guarantee local call
MV KEY,B30 ; B30 is not modified
MV RA, B31 ; B31 is not modified
ADDKPC ret?,RA,2
ret?: ; B0 holds rounds or zero
[!B0] BNOP B31 ; return if zero
[B0] SHL B0,4,A0 ; offset to last round key
[B0] SHRU B0,1,B1
[B0] SUB B1,1,B1
[B0] MVK 0x0000001B,B3 ; AES polynomial
[B0] MVKH 0x07000000,B3
SPLOOPD 9 ; flip round keys
|| MVC B1,ILC
|| MV B30,$KPA
|| ADD B30,A0,$KPB
|| MVK 16,A0 ; sizeof(round key)
;;====================================================================
LDW *${KPA}[0],A16
|| LDW *${KPB}[0],B16
LDW *${KPA}[1],A17
|| LDW *${KPB}[1],B17
LDW *${KPA}[2],A18
|| LDW *${KPB}[2],B18
LDW *${KPA}[3],A19
|| ADD $KPA,A0,$KPA
|| LDW *${KPB}[3],B19
|| SUB $KPB,A0,$KPB
NOP
STW B16,*${KPA}[-4]
|| STW A16,*${KPB}[4]
STW B17,*${KPA}[-3]
|| STW A17,*${KPB}[5]
STW B18,*${KPA}[-2]
|| STW A18,*${KPB}[6]
STW B19,*${KPA}[-1]
|| STW A19,*${KPB}[7]
SPKERNEL
;;====================================================================
SUB B0,1,B0 ; skip last round
|| ADD B30,A0,$KPA ; skip first round
|| ADD B30,A0,$KPB
|| MVC GFPGFR,B30 ; save GFPGFR
LDW *${KPA}[0],$K[0]
|| LDW *${KPB}[1],$K[1]
|| MVC B3,GFPGFR
LDW *${KPA}[2],$K[2]
|| LDW *${KPB}[3],$K[3]
MVK 0x00000909,A24
|| MVK 0x00000B0B,B24
MVKH 0x09090000,A24
|| MVKH 0x0B0B0000,B24
MVC B0,ILC
|| SUB B0,1,B0
GMPY4 $K[0],A24,$Kx9[0] ; ·0x09
|| GMPY4 $K[1],A24,$Kx9[1]
|| MVK 0x00000D0D,A25
|| MVK 0x00000E0E,B25
GMPY4 $K[2],A24,$Kx9[2]
|| GMPY4 $K[3],A24,$Kx9[3]
|| MVKH 0x0D0D0000,A25
|| MVKH 0x0E0E0000,B25
GMPY4 $K[0],B24,$KxB[0] ; ·0x0B
|| GMPY4 $K[1],B24,$KxB[1]
GMPY4 $K[2],B24,$KxB[2]
|| GMPY4 $K[3],B24,$KxB[3]
SPLOOP 11 ; InvMixColumns
;;====================================================================
GMPY4 $K[0],A25,$KxD[0] ; ·0x0D
|| GMPY4 $K[1],A25,$KxD[1]
|| SWAP2 $Kx9[0],$Kx9[0] ; rotate by 16
|| SWAP2 $Kx9[1],$Kx9[1]
|| MV $K[0],$s[0] ; this or DINT
|| MV $K[1],$s[1]
|| [B0] LDW *${KPA}[4],$K[0]
|| [B0] LDW *${KPB}[5],$K[1]
GMPY4 $K[2],A25,$KxD[2]
|| GMPY4 $K[3],A25,$KxD[3]
|| SWAP2 $Kx9[2],$Kx9[2]
|| SWAP2 $Kx9[3],$Kx9[3]
|| MV $K[2],$s[2]
|| MV $K[3],$s[3]
|| [B0] LDW *${KPA}[6],$K[2]
|| [B0] LDW *${KPB}[7],$K[3]
GMPY4 $s[0],B25,$KxE[0] ; ·0x0E
|| GMPY4 $s[1],B25,$KxE[1]
|| XOR $Kx9[0],$KxB[0],$KxB[0]
|| XOR $Kx9[1],$KxB[1],$KxB[1]
GMPY4 $s[2],B25,$KxE[2]
|| GMPY4 $s[3],B25,$KxE[3]
|| XOR $Kx9[2],$KxB[2],$KxB[2]
|| XOR $Kx9[3],$KxB[3],$KxB[3]
ROTL $KxB[0],TBL3,$KxB[0]
|| ROTL $KxB[1],TBL3,$KxB[1]
|| SWAP2 $KxD[0],$KxD[0] ; rotate by 16
|| SWAP2 $KxD[1],$KxD[1]
ROTL $KxB[2],TBL3,$KxB[2]
|| ROTL $KxB[3],TBL3,$KxB[3]
|| SWAP2 $KxD[2],$KxD[2]
|| SWAP2 $KxD[3],$KxD[3]
XOR $KxE[0],$KxD[0],$KxE[0]
|| XOR $KxE[1],$KxD[1],$KxE[1]
|| [B0] GMPY4 $K[0],A24,$Kx9[0] ; ·0x09
|| [B0] GMPY4 $K[1],A24,$Kx9[1]
|| ADDAW $KPA,4,$KPA
XOR $KxE[2],$KxD[2],$KxE[2]
|| XOR $KxE[3],$KxD[3],$KxE[3]
|| [B0] GMPY4 $K[2],A24,$Kx9[2]
|| [B0] GMPY4 $K[3],A24,$Kx9[3]
|| ADDAW $KPB,4,$KPB
XOR $KxB[0],$KxE[0],$KxE[0]
|| XOR $KxB[1],$KxE[1],$KxE[1]
|| [B0] GMPY4 $K[0],B24,$KxB[0] ; ·0x0B
|| [B0] GMPY4 $K[1],B24,$KxB[1]
XOR $KxB[2],$KxE[2],$KxE[2]
|| XOR $KxB[3],$KxE[3],$KxE[3]
|| [B0] GMPY4 $K[2],B24,$KxB[2]
|| [B0] GMPY4 $K[3],B24,$KxB[3]
|| STW $KxE[0],*${KPA}[-4]
|| STW $KxE[1],*${KPB}[-3]
STW $KxE[2],*${KPA}[-2]
|| STW $KxE[3],*${KPB}[-1]
|| [B0] SUB B0,1,B0
SPKERNEL
;;====================================================================
BNOP B31,3
MVC B30,GFPGFR ; restore GFPGFR(*)
MVK 0,RET
.endasmfunc
___
# (*) Even though ABI doesn't specify GFPGFR as non-volatile, there
# are code samples out there that *assume* its default value.
}
{
my ($inp,$out,$blocks,$key,$ivp)=("A4","B4","A6","B6","A8");
$code.=<<___;
.global _AES_ctr32_encrypt
_AES_ctr32_encrypt:
.asmfunc
LDNDW *${ivp}[0],A31:A30 ; load counter value
|| MV $blocks,A2 ; reassign $blocks
|| DMV RA,$key,B27:B26 ; reassign RA and $key
LDNDW *${ivp}[1],B31:B30
|| MVK 0,B2 ; don't let __encrypt load input
|| MVK 0,A1 ; and postpone writing output
.if .BIG_ENDIAN
NOP
.else
NOP 4
SWAP2 B31,B31 ; keep least significant 32 bits
SWAP4 B31,B31 ; in host byte order
.endif
ctr32_loop?:
[A2] BNOP __encrypt
|| [A1] XOR A29,A9,A9 ; input^Ek(counter)
|| [A1] XOR A28,A8,A8
|| [A2] LDNDW *INP++,A29:A28 ; load input
[!A2] BNOP B27 ; return
|| [A1] XOR B29,B9,B9
|| [A1] XOR B28,B8,B8
|| [A2] LDNDW *INP++,B29:B28
.if .BIG_ENDIAN
[A1] STNDW A9:A8,*OUT++ ; save output
|| [A2] DMV A31,A30,A9:A8 ; pass counter value to __encrypt
[A1] STNDW B9:B8,*OUT++
|| [A2] DMV B31,B30,B9:B8
|| [A2] ADD B30,1,B30 ; counter++
.else
[A1] STNDW A9:A8,*OUT++ ; save output
|| [A2] DMV A31,A30,A9:A8
|| [A2] SWAP2 B31,B0
|| [A2] ADD B31,1,B31 ; counter++
[A1] STNDW B9:B8,*OUT++
|| [A2] MV B30,B8
|| [A2] SWAP4 B0,B9
.endif
[A2] ADDKPC ctr32_loop?,RA ; return to ctr32_loop?
|| [A2] MV B26,KEY ; pass $key
|| [A2] SUB A2,1,A2 ; $blocks--
||[!A1] MVK 1,A1
NOP
NOP
.endasmfunc
___
}
# Tables are kept in endian-neutral manner
$code.=<<___;
.if __TI_EABI__
.sect ".text:aes_asm.const"
.else
.sect ".const:aes_asm"
.endif
.align 128
AES_Te:
.byte 0xc6,0x63,0x63,0xa5, 0xf8,0x7c,0x7c,0x84
.byte 0xee,0x77,0x77,0x99, 0xf6,0x7b,0x7b,0x8d
.byte 0xff,0xf2,0xf2,0x0d, 0xd6,0x6b,0x6b,0xbd
.byte 0xde,0x6f,0x6f,0xb1, 0x91,0xc5,0xc5,0x54
.byte 0x60,0x30,0x30,0x50, 0x02,0x01,0x01,0x03
.byte 0xce,0x67,0x67,0xa9, 0x56,0x2b,0x2b,0x7d
.byte 0xe7,0xfe,0xfe,0x19, 0xb5,0xd7,0xd7,0x62
.byte 0x4d,0xab,0xab,0xe6, 0xec,0x76,0x76,0x9a
.byte 0x8f,0xca,0xca,0x45, 0x1f,0x82,0x82,0x9d
.byte 0x89,0xc9,0xc9,0x40, 0xfa,0x7d,0x7d,0x87
.byte 0xef,0xfa,0xfa,0x15, 0xb2,0x59,0x59,0xeb
.byte 0x8e,0x47,0x47,0xc9, 0xfb,0xf0,0xf0,0x0b
.byte 0x41,0xad,0xad,0xec, 0xb3,0xd4,0xd4,0x67
.byte 0x5f,0xa2,0xa2,0xfd, 0x45,0xaf,0xaf,0xea
.byte 0x23,0x9c,0x9c,0xbf, 0x53,0xa4,0xa4,0xf7
.byte 0xe4,0x72,0x72,0x96, 0x9b,0xc0,0xc0,0x5b
.byte 0x75,0xb7,0xb7,0xc2, 0xe1,0xfd,0xfd,0x1c
.byte 0x3d,0x93,0x93,0xae, 0x4c,0x26,0x26,0x6a
.byte 0x6c,0x36,0x36,0x5a, 0x7e,0x3f,0x3f,0x41
.byte 0xf5,0xf7,0xf7,0x02, 0x83,0xcc,0xcc,0x4f
.byte 0x68,0x34,0x34,0x5c, 0x51,0xa5,0xa5,0xf4
.byte 0xd1,0xe5,0xe5,0x34, 0xf9,0xf1,0xf1,0x08
.byte 0xe2,0x71,0x71,0x93, 0xab,0xd8,0xd8,0x73
.byte 0x62,0x31,0x31,0x53, 0x2a,0x15,0x15,0x3f
.byte 0x08,0x04,0x04,0x0c, 0x95,0xc7,0xc7,0x52
.byte 0x46,0x23,0x23,0x65, 0x9d,0xc3,0xc3,0x5e
.byte 0x30,0x18,0x18,0x28, 0x37,0x96,0x96,0xa1
.byte 0x0a,0x05,0x05,0x0f, 0x2f,0x9a,0x9a,0xb5
.byte 0x0e,0x07,0x07,0x09, 0x24,0x12,0x12,0x36
.byte 0x1b,0x80,0x80,0x9b, 0xdf,0xe2,0xe2,0x3d
.byte 0xcd,0xeb,0xeb,0x26, 0x4e,0x27,0x27,0x69
.byte 0x7f,0xb2,0xb2,0xcd, 0xea,0x75,0x75,0x9f
.byte 0x12,0x09,0x09,0x1b, 0x1d,0x83,0x83,0x9e
.byte 0x58,0x2c,0x2c,0x74, 0x34,0x1a,0x1a,0x2e
.byte 0x36,0x1b,0x1b,0x2d, 0xdc,0x6e,0x6e,0xb2
.byte 0xb4,0x5a,0x5a,0xee, 0x5b,0xa0,0xa0,0xfb
.byte 0xa4,0x52,0x52,0xf6, 0x76,0x3b,0x3b,0x4d
.byte 0xb7,0xd6,0xd6,0x61, 0x7d,0xb3,0xb3,0xce
.byte 0x52,0x29,0x29,0x7b, 0xdd,0xe3,0xe3,0x3e
.byte 0x5e,0x2f,0x2f,0x71, 0x13,0x84,0x84,0x97
.byte 0xa6,0x53,0x53,0xf5, 0xb9,0xd1,0xd1,0x68
.byte 0x00,0x00,0x00,0x00, 0xc1,0xed,0xed,0x2c
.byte 0x40,0x20,0x20,0x60, 0xe3,0xfc,0xfc,0x1f
.byte 0x79,0xb1,0xb1,0xc8, 0xb6,0x5b,0x5b,0xed
.byte 0xd4,0x6a,0x6a,0xbe, 0x8d,0xcb,0xcb,0x46
.byte 0x67,0xbe,0xbe,0xd9, 0x72,0x39,0x39,0x4b
.byte 0x94,0x4a,0x4a,0xde, 0x98,0x4c,0x4c,0xd4
.byte 0xb0,0x58,0x58,0xe8, 0x85,0xcf,0xcf,0x4a
.byte 0xbb,0xd0,0xd0,0x6b, 0xc5,0xef,0xef,0x2a
.byte 0x4f,0xaa,0xaa,0xe5, 0xed,0xfb,0xfb,0x16
.byte 0x86,0x43,0x43,0xc5, 0x9a,0x4d,0x4d,0xd7
.byte 0x66,0x33,0x33,0x55, 0x11,0x85,0x85,0x94
.byte 0x8a,0x45,0x45,0xcf, 0xe9,0xf9,0xf9,0x10
.byte 0x04,0x02,0x02,0x06, 0xfe,0x7f,0x7f,0x81
.byte 0xa0,0x50,0x50,0xf0, 0x78,0x3c,0x3c,0x44
.byte 0x25,0x9f,0x9f,0xba, 0x4b,0xa8,0xa8,0xe3
.byte 0xa2,0x51,0x51,0xf3, 0x5d,0xa3,0xa3,0xfe
.byte 0x80,0x40,0x40,0xc0, 0x05,0x8f,0x8f,0x8a
.byte 0x3f,0x92,0x92,0xad, 0x21,0x9d,0x9d,0xbc
.byte 0x70,0x38,0x38,0x48, 0xf1,0xf5,0xf5,0x04
.byte 0x63,0xbc,0xbc,0xdf, 0x77,0xb6,0xb6,0xc1
.byte 0xaf,0xda,0xda,0x75, 0x42,0x21,0x21,0x63
.byte 0x20,0x10,0x10,0x30, 0xe5,0xff,0xff,0x1a
.byte 0xfd,0xf3,0xf3,0x0e, 0xbf,0xd2,0xd2,0x6d
.byte 0x81,0xcd,0xcd,0x4c, 0x18,0x0c,0x0c,0x14
.byte 0x26,0x13,0x13,0x35, 0xc3,0xec,0xec,0x2f
.byte 0xbe,0x5f,0x5f,0xe1, 0x35,0x97,0x97,0xa2
.byte 0x88,0x44,0x44,0xcc, 0x2e,0x17,0x17,0x39
.byte 0x93,0xc4,0xc4,0x57, 0x55,0xa7,0xa7,0xf2
.byte 0xfc,0x7e,0x7e,0x82, 0x7a,0x3d,0x3d,0x47
.byte 0xc8,0x64,0x64,0xac, 0xba,0x5d,0x5d,0xe7
.byte 0x32,0x19,0x19,0x2b, 0xe6,0x73,0x73,0x95
.byte 0xc0,0x60,0x60,0xa0, 0x19,0x81,0x81,0x98
.byte 0x9e,0x4f,0x4f,0xd1, 0xa3,0xdc,0xdc,0x7f
.byte 0x44,0x22,0x22,0x66, 0x54,0x2a,0x2a,0x7e
.byte 0x3b,0x90,0x90,0xab, 0x0b,0x88,0x88,0x83
.byte 0x8c,0x46,0x46,0xca, 0xc7,0xee,0xee,0x29
.byte 0x6b,0xb8,0xb8,0xd3, 0x28,0x14,0x14,0x3c
.byte 0xa7,0xde,0xde,0x79, 0xbc,0x5e,0x5e,0xe2
.byte 0x16,0x0b,0x0b,0x1d, 0xad,0xdb,0xdb,0x76
.byte 0xdb,0xe0,0xe0,0x3b, 0x64,0x32,0x32,0x56
.byte 0x74,0x3a,0x3a,0x4e, 0x14,0x0a,0x0a,0x1e
.byte 0x92,0x49,0x49,0xdb, 0x0c,0x06,0x06,0x0a
.byte 0x48,0x24,0x24,0x6c, 0xb8,0x5c,0x5c,0xe4
.byte 0x9f,0xc2,0xc2,0x5d, 0xbd,0xd3,0xd3,0x6e
.byte 0x43,0xac,0xac,0xef, 0xc4,0x62,0x62,0xa6
.byte 0x39,0x91,0x91,0xa8, 0x31,0x95,0x95,0xa4
.byte 0xd3,0xe4,0xe4,0x37, 0xf2,0x79,0x79,0x8b
.byte 0xd5,0xe7,0xe7,0x32, 0x8b,0xc8,0xc8,0x43
.byte 0x6e,0x37,0x37,0x59, 0xda,0x6d,0x6d,0xb7
.byte 0x01,0x8d,0x8d,0x8c, 0xb1,0xd5,0xd5,0x64
.byte 0x9c,0x4e,0x4e,0xd2, 0x49,0xa9,0xa9,0xe0
.byte 0xd8,0x6c,0x6c,0xb4, 0xac,0x56,0x56,0xfa
.byte 0xf3,0xf4,0xf4,0x07, 0xcf,0xea,0xea,0x25
.byte 0xca,0x65,0x65,0xaf, 0xf4,0x7a,0x7a,0x8e
.byte 0x47,0xae,0xae,0xe9, 0x10,0x08,0x08,0x18
.byte 0x6f,0xba,0xba,0xd5, 0xf0,0x78,0x78,0x88
.byte 0x4a,0x25,0x25,0x6f, 0x5c,0x2e,0x2e,0x72
.byte 0x38,0x1c,0x1c,0x24, 0x57,0xa6,0xa6,0xf1
.byte 0x73,0xb4,0xb4,0xc7, 0x97,0xc6,0xc6,0x51
.byte 0xcb,0xe8,0xe8,0x23, 0xa1,0xdd,0xdd,0x7c
.byte 0xe8,0x74,0x74,0x9c, 0x3e,0x1f,0x1f,0x21
.byte 0x96,0x4b,0x4b,0xdd, 0x61,0xbd,0xbd,0xdc
.byte 0x0d,0x8b,0x8b,0x86, 0x0f,0x8a,0x8a,0x85
.byte 0xe0,0x70,0x70,0x90, 0x7c,0x3e,0x3e,0x42
.byte 0x71,0xb5,0xb5,0xc4, 0xcc,0x66,0x66,0xaa
.byte 0x90,0x48,0x48,0xd8, 0x06,0x03,0x03,0x05
.byte 0xf7,0xf6,0xf6,0x01, 0x1c,0x0e,0x0e,0x12
.byte 0xc2,0x61,0x61,0xa3, 0x6a,0x35,0x35,0x5f
.byte 0xae,0x57,0x57,0xf9, 0x69,0xb9,0xb9,0xd0
.byte 0x17,0x86,0x86,0x91, 0x99,0xc1,0xc1,0x58
.byte 0x3a,0x1d,0x1d,0x27, 0x27,0x9e,0x9e,0xb9
.byte 0xd9,0xe1,0xe1,0x38, 0xeb,0xf8,0xf8,0x13
.byte 0x2b,0x98,0x98,0xb3, 0x22,0x11,0x11,0x33
.byte 0xd2,0x69,0x69,0xbb, 0xa9,0xd9,0xd9,0x70
.byte 0x07,0x8e,0x8e,0x89, 0x33,0x94,0x94,0xa7
.byte 0x2d,0x9b,0x9b,0xb6, 0x3c,0x1e,0x1e,0x22
.byte 0x15,0x87,0x87,0x92, 0xc9,0xe9,0xe9,0x20
.byte 0x87,0xce,0xce,0x49, 0xaa,0x55,0x55,0xff
.byte 0x50,0x28,0x28,0x78, 0xa5,0xdf,0xdf,0x7a
.byte 0x03,0x8c,0x8c,0x8f, 0x59,0xa1,0xa1,0xf8
.byte 0x09,0x89,0x89,0x80, 0x1a,0x0d,0x0d,0x17
.byte 0x65,0xbf,0xbf,0xda, 0xd7,0xe6,0xe6,0x31
.byte 0x84,0x42,0x42,0xc6, 0xd0,0x68,0x68,0xb8
.byte 0x82,0x41,0x41,0xc3, 0x29,0x99,0x99,0xb0
.byte 0x5a,0x2d,0x2d,0x77, 0x1e,0x0f,0x0f,0x11
.byte 0x7b,0xb0,0xb0,0xcb, 0xa8,0x54,0x54,0xfc
.byte 0x6d,0xbb,0xbb,0xd6, 0x2c,0x16,0x16,0x3a
AES_Te4:
.byte 0x63, 0x7c, 0x77, 0x7b, 0xf2, 0x6b, 0x6f, 0xc5
.byte 0x30, 0x01, 0x67, 0x2b, 0xfe, 0xd7, 0xab, 0x76
.byte 0xca, 0x82, 0xc9, 0x7d, 0xfa, 0x59, 0x47, 0xf0
.byte 0xad, 0xd4, 0xa2, 0xaf, 0x9c, 0xa4, 0x72, 0xc0
.byte 0xb7, 0xfd, 0x93, 0x26, 0x36, 0x3f, 0xf7, 0xcc
.byte 0x34, 0xa5, 0xe5, 0xf1, 0x71, 0xd8, 0x31, 0x15
.byte 0x04, 0xc7, 0x23, 0xc3, 0x18, 0x96, 0x05, 0x9a
.byte 0x07, 0x12, 0x80, 0xe2, 0xeb, 0x27, 0xb2, 0x75
.byte 0x09, 0x83, 0x2c, 0x1a, 0x1b, 0x6e, 0x5a, 0xa0
.byte 0x52, 0x3b, 0xd6, 0xb3, 0x29, 0xe3, 0x2f, 0x84
.byte 0x53, 0xd1, 0x00, 0xed, 0x20, 0xfc, 0xb1, 0x5b
.byte 0x6a, 0xcb, 0xbe, 0x39, 0x4a, 0x4c, 0x58, 0xcf
.byte 0xd0, 0xef, 0xaa, 0xfb, 0x43, 0x4d, 0x33, 0x85
.byte 0x45, 0xf9, 0x02, 0x7f, 0x50, 0x3c, 0x9f, 0xa8
.byte 0x51, 0xa3, 0x40, 0x8f, 0x92, 0x9d, 0x38, 0xf5
.byte 0xbc, 0xb6, 0xda, 0x21, 0x10, 0xff, 0xf3, 0xd2
.byte 0xcd, 0x0c, 0x13, 0xec, 0x5f, 0x97, 0x44, 0x17
.byte 0xc4, 0xa7, 0x7e, 0x3d, 0x64, 0x5d, 0x19, 0x73
.byte 0x60, 0x81, 0x4f, 0xdc, 0x22, 0x2a, 0x90, 0x88
.byte 0x46, 0xee, 0xb8, 0x14, 0xde, 0x5e, 0x0b, 0xdb
.byte 0xe0, 0x32, 0x3a, 0x0a, 0x49, 0x06, 0x24, 0x5c
.byte 0xc2, 0xd3, 0xac, 0x62, 0x91, 0x95, 0xe4, 0x79
.byte 0xe7, 0xc8, 0x37, 0x6d, 0x8d, 0xd5, 0x4e, 0xa9
.byte 0x6c, 0x56, 0xf4, 0xea, 0x65, 0x7a, 0xae, 0x08
.byte 0xba, 0x78, 0x25, 0x2e, 0x1c, 0xa6, 0xb4, 0xc6
.byte 0xe8, 0xdd, 0x74, 0x1f, 0x4b, 0xbd, 0x8b, 0x8a
.byte 0x70, 0x3e, 0xb5, 0x66, 0x48, 0x03, 0xf6, 0x0e
.byte 0x61, 0x35, 0x57, 0xb9, 0x86, 0xc1, 0x1d, 0x9e
.byte 0xe1, 0xf8, 0x98, 0x11, 0x69, 0xd9, 0x8e, 0x94
.byte 0x9b, 0x1e, 0x87, 0xe9, 0xce, 0x55, 0x28, 0xdf
.byte 0x8c, 0xa1, 0x89, 0x0d, 0xbf, 0xe6, 0x42, 0x68
.byte 0x41, 0x99, 0x2d, 0x0f, 0xb0, 0x54, 0xbb, 0x16
rcon:
.byte 0x01,0x00,0x00,0x00, 0x02,0x00,0x00,0x00
.byte 0x04,0x00,0x00,0x00, 0x08,0x00,0x00,0x00
.byte 0x10,0x00,0x00,0x00, 0x20,0x00,0x00,0x00
.byte 0x40,0x00,0x00,0x00, 0x80,0x00,0x00,0x00
.byte 0x1B,0x00,0x00,0x00, 0x36,0x00,0x00,0x00
.align 128
AES_Td:
.byte 0x51,0xf4,0xa7,0x50, 0x7e,0x41,0x65,0x53
.byte 0x1a,0x17,0xa4,0xc3, 0x3a,0x27,0x5e,0x96
.byte 0x3b,0xab,0x6b,0xcb, 0x1f,0x9d,0x45,0xf1
.byte 0xac,0xfa,0x58,0xab, 0x4b,0xe3,0x03,0x93
.byte 0x20,0x30,0xfa,0x55, 0xad,0x76,0x6d,0xf6
.byte 0x88,0xcc,0x76,0x91, 0xf5,0x02,0x4c,0x25
.byte 0x4f,0xe5,0xd7,0xfc, 0xc5,0x2a,0xcb,0xd7
.byte 0x26,0x35,0x44,0x80, 0xb5,0x62,0xa3,0x8f
.byte 0xde,0xb1,0x5a,0x49, 0x25,0xba,0x1b,0x67
.byte 0x45,0xea,0x0e,0x98, 0x5d,0xfe,0xc0,0xe1
.byte 0xc3,0x2f,0x75,0x02, 0x81,0x4c,0xf0,0x12
.byte 0x8d,0x46,0x97,0xa3, 0x6b,0xd3,0xf9,0xc6
.byte 0x03,0x8f,0x5f,0xe7, 0x15,0x92,0x9c,0x95
.byte 0xbf,0x6d,0x7a,0xeb, 0x95,0x52,0x59,0xda
.byte 0xd4,0xbe,0x83,0x2d, 0x58,0x74,0x21,0xd3
.byte 0x49,0xe0,0x69,0x29, 0x8e,0xc9,0xc8,0x44
.byte 0x75,0xc2,0x89,0x6a, 0xf4,0x8e,0x79,0x78
.byte 0x99,0x58,0x3e,0x6b, 0x27,0xb9,0x71,0xdd
.byte 0xbe,0xe1,0x4f,0xb6, 0xf0,0x88,0xad,0x17
.byte 0xc9,0x20,0xac,0x66, 0x7d,0xce,0x3a,0xb4
.byte 0x63,0xdf,0x4a,0x18, 0xe5,0x1a,0x31,0x82
.byte 0x97,0x51,0x33,0x60, 0x62,0x53,0x7f,0x45
.byte 0xb1,0x64,0x77,0xe0, 0xbb,0x6b,0xae,0x84
.byte 0xfe,0x81,0xa0,0x1c, 0xf9,0x08,0x2b,0x94
.byte 0x70,0x48,0x68,0x58, 0x8f,0x45,0xfd,0x19
.byte 0x94,0xde,0x6c,0x87, 0x52,0x7b,0xf8,0xb7
.byte 0xab,0x73,0xd3,0x23, 0x72,0x4b,0x02,0xe2
.byte 0xe3,0x1f,0x8f,0x57, 0x66,0x55,0xab,0x2a
.byte 0xb2,0xeb,0x28,0x07, 0x2f,0xb5,0xc2,0x03
.byte 0x86,0xc5,0x7b,0x9a, 0xd3,0x37,0x08,0xa5
.byte 0x30,0x28,0x87,0xf2, 0x23,0xbf,0xa5,0xb2
.byte 0x02,0x03,0x6a,0xba, 0xed,0x16,0x82,0x5c
.byte 0x8a,0xcf,0x1c,0x2b, 0xa7,0x79,0xb4,0x92
.byte 0xf3,0x07,0xf2,0xf0, 0x4e,0x69,0xe2,0xa1
.byte 0x65,0xda,0xf4,0xcd, 0x06,0x05,0xbe,0xd5
.byte 0xd1,0x34,0x62,0x1f, 0xc4,0xa6,0xfe,0x8a
.byte 0x34,0x2e,0x53,0x9d, 0xa2,0xf3,0x55,0xa0
.byte 0x05,0x8a,0xe1,0x32, 0xa4,0xf6,0xeb,0x75
.byte 0x0b,0x83,0xec,0x39, 0x40,0x60,0xef,0xaa
.byte 0x5e,0x71,0x9f,0x06, 0xbd,0x6e,0x10,0x51
.byte 0x3e,0x21,0x8a,0xf9, 0x96,0xdd,0x06,0x3d
.byte 0xdd,0x3e,0x05,0xae, 0x4d,0xe6,0xbd,0x46
.byte 0x91,0x54,0x8d,0xb5, 0x71,0xc4,0x5d,0x05
.byte 0x04,0x06,0xd4,0x6f, 0x60,0x50,0x15,0xff
.byte 0x19,0x98,0xfb,0x24, 0xd6,0xbd,0xe9,0x97
.byte 0x89,0x40,0x43,0xcc, 0x67,0xd9,0x9e,0x77
.byte 0xb0,0xe8,0x42,0xbd, 0x07,0x89,0x8b,0x88
.byte 0xe7,0x19,0x5b,0x38, 0x79,0xc8,0xee,0xdb
.byte 0xa1,0x7c,0x0a,0x47, 0x7c,0x42,0x0f,0xe9
.byte 0xf8,0x84,0x1e,0xc9, 0x00,0x00,0x00,0x00
.byte 0x09,0x80,0x86,0x83, 0x32,0x2b,0xed,0x48
.byte 0x1e,0x11,0x70,0xac, 0x6c,0x5a,0x72,0x4e
.byte 0xfd,0x0e,0xff,0xfb, 0x0f,0x85,0x38,0x56
.byte 0x3d,0xae,0xd5,0x1e, 0x36,0x2d,0x39,0x27
.byte 0x0a,0x0f,0xd9,0x64, 0x68,0x5c,0xa6,0x21
.byte 0x9b,0x5b,0x54,0xd1, 0x24,0x36,0x2e,0x3a
.byte 0x0c,0x0a,0x67,0xb1, 0x93,0x57,0xe7,0x0f
.byte 0xb4,0xee,0x96,0xd2, 0x1b,0x9b,0x91,0x9e
.byte 0x80,0xc0,0xc5,0x4f, 0x61,0xdc,0x20,0xa2
.byte 0x5a,0x77,0x4b,0x69, 0x1c,0x12,0x1a,0x16
.byte 0xe2,0x93,0xba,0x0a, 0xc0,0xa0,0x2a,0xe5
.byte 0x3c,0x22,0xe0,0x43, 0x12,0x1b,0x17,0x1d
.byte 0x0e,0x09,0x0d,0x0b, 0xf2,0x8b,0xc7,0xad
.byte 0x2d,0xb6,0xa8,0xb9, 0x14,0x1e,0xa9,0xc8
.byte 0x57,0xf1,0x19,0x85, 0xaf,0x75,0x07,0x4c
.byte 0xee,0x99,0xdd,0xbb, 0xa3,0x7f,0x60,0xfd
.byte 0xf7,0x01,0x26,0x9f, 0x5c,0x72,0xf5,0xbc
.byte 0x44,0x66,0x3b,0xc5, 0x5b,0xfb,0x7e,0x34
.byte 0x8b,0x43,0x29,0x76, 0xcb,0x23,0xc6,0xdc
.byte 0xb6,0xed,0xfc,0x68, 0xb8,0xe4,0xf1,0x63
.byte 0xd7,0x31,0xdc,0xca, 0x42,0x63,0x85,0x10
.byte 0x13,0x97,0x22,0x40, 0x84,0xc6,0x11,0x20
.byte 0x85,0x4a,0x24,0x7d, 0xd2,0xbb,0x3d,0xf8
.byte 0xae,0xf9,0x32,0x11, 0xc7,0x29,0xa1,0x6d
.byte 0x1d,0x9e,0x2f,0x4b, 0xdc,0xb2,0x30,0xf3
.byte 0x0d,0x86,0x52,0xec, 0x77,0xc1,0xe3,0xd0
.byte 0x2b,0xb3,0x16,0x6c, 0xa9,0x70,0xb9,0x99
.byte 0x11,0x94,0x48,0xfa, 0x47,0xe9,0x64,0x22
.byte 0xa8,0xfc,0x8c,0xc4, 0xa0,0xf0,0x3f,0x1a
.byte 0x56,0x7d,0x2c,0xd8, 0x22,0x33,0x90,0xef
.byte 0x87,0x49,0x4e,0xc7, 0xd9,0x38,0xd1,0xc1
.byte 0x8c,0xca,0xa2,0xfe, 0x98,0xd4,0x0b,0x36
.byte 0xa6,0xf5,0x81,0xcf, 0xa5,0x7a,0xde,0x28
.byte 0xda,0xb7,0x8e,0x26, 0x3f,0xad,0xbf,0xa4
.byte 0x2c,0x3a,0x9d,0xe4, 0x50,0x78,0x92,0x0d
.byte 0x6a,0x5f,0xcc,0x9b, 0x54,0x7e,0x46,0x62
.byte 0xf6,0x8d,0x13,0xc2, 0x90,0xd8,0xb8,0xe8
.byte 0x2e,0x39,0xf7,0x5e, 0x82,0xc3,0xaf,0xf5
.byte 0x9f,0x5d,0x80,0xbe, 0x69,0xd0,0x93,0x7c
.byte 0x6f,0xd5,0x2d,0xa9, 0xcf,0x25,0x12,0xb3
.byte 0xc8,0xac,0x99,0x3b, 0x10,0x18,0x7d,0xa7
.byte 0xe8,0x9c,0x63,0x6e, 0xdb,0x3b,0xbb,0x7b
.byte 0xcd,0x26,0x78,0x09, 0x6e,0x59,0x18,0xf4
.byte 0xec,0x9a,0xb7,0x01, 0x83,0x4f,0x9a,0xa8
.byte 0xe6,0x95,0x6e,0x65, 0xaa,0xff,0xe6,0x7e
.byte 0x21,0xbc,0xcf,0x08, 0xef,0x15,0xe8,0xe6
.byte 0xba,0xe7,0x9b,0xd9, 0x4a,0x6f,0x36,0xce
.byte 0xea,0x9f,0x09,0xd4, 0x29,0xb0,0x7c,0xd6
.byte 0x31,0xa4,0xb2,0xaf, 0x2a,0x3f,0x23,0x31
.byte 0xc6,0xa5,0x94,0x30, 0x35,0xa2,0x66,0xc0
.byte 0x74,0x4e,0xbc,0x37, 0xfc,0x82,0xca,0xa6
.byte 0xe0,0x90,0xd0,0xb0, 0x33,0xa7,0xd8,0x15
.byte 0xf1,0x04,0x98,0x4a, 0x41,0xec,0xda,0xf7
.byte 0x7f,0xcd,0x50,0x0e, 0x17,0x91,0xf6,0x2f
.byte 0x76,0x4d,0xd6,0x8d, 0x43,0xef,0xb0,0x4d
.byte 0xcc,0xaa,0x4d,0x54, 0xe4,0x96,0x04,0xdf
.byte 0x9e,0xd1,0xb5,0xe3, 0x4c,0x6a,0x88,0x1b
.byte 0xc1,0x2c,0x1f,0xb8, 0x46,0x65,0x51,0x7f
.byte 0x9d,0x5e,0xea,0x04, 0x01,0x8c,0x35,0x5d
.byte 0xfa,0x87,0x74,0x73, 0xfb,0x0b,0x41,0x2e
.byte 0xb3,0x67,0x1d,0x5a, 0x92,0xdb,0xd2,0x52
.byte 0xe9,0x10,0x56,0x33, 0x6d,0xd6,0x47,0x13
.byte 0x9a,0xd7,0x61,0x8c, 0x37,0xa1,0x0c,0x7a
.byte 0x59,0xf8,0x14,0x8e, 0xeb,0x13,0x3c,0x89
.byte 0xce,0xa9,0x27,0xee, 0xb7,0x61,0xc9,0x35
.byte 0xe1,0x1c,0xe5,0xed, 0x7a,0x47,0xb1,0x3c
.byte 0x9c,0xd2,0xdf,0x59, 0x55,0xf2,0x73,0x3f
.byte 0x18,0x14,0xce,0x79, 0x73,0xc7,0x37,0xbf
.byte 0x53,0xf7,0xcd,0xea, 0x5f,0xfd,0xaa,0x5b
.byte 0xdf,0x3d,0x6f,0x14, 0x78,0x44,0xdb,0x86
.byte 0xca,0xaf,0xf3,0x81, 0xb9,0x68,0xc4,0x3e
.byte 0x38,0x24,0x34,0x2c, 0xc2,0xa3,0x40,0x5f
.byte 0x16,0x1d,0xc3,0x72, 0xbc,0xe2,0x25,0x0c
.byte 0x28,0x3c,0x49,0x8b, 0xff,0x0d,0x95,0x41
.byte 0x39,0xa8,0x01,0x71, 0x08,0x0c,0xb3,0xde
.byte 0xd8,0xb4,0xe4,0x9c, 0x64,0x56,0xc1,0x90
.byte 0x7b,0xcb,0x84,0x61, 0xd5,0x32,0xb6,0x70
.byte 0x48,0x6c,0x5c,0x74, 0xd0,0xb8,0x57,0x42
AES_Td4:
.byte 0x52, 0x09, 0x6a, 0xd5, 0x30, 0x36, 0xa5, 0x38
.byte 0xbf, 0x40, 0xa3, 0x9e, 0x81, 0xf3, 0xd7, 0xfb
.byte 0x7c, 0xe3, 0x39, 0x82, 0x9b, 0x2f, 0xff, 0x87
.byte 0x34, 0x8e, 0x43, 0x44, 0xc4, 0xde, 0xe9, 0xcb
.byte 0x54, 0x7b, 0x94, 0x32, 0xa6, 0xc2, 0x23, 0x3d
.byte 0xee, 0x4c, 0x95, 0x0b, 0x42, 0xfa, 0xc3, 0x4e
.byte 0x08, 0x2e, 0xa1, 0x66, 0x28, 0xd9, 0x24, 0xb2
.byte 0x76, 0x5b, 0xa2, 0x49, 0x6d, 0x8b, 0xd1, 0x25
.byte 0x72, 0xf8, 0xf6, 0x64, 0x86, 0x68, 0x98, 0x16
.byte 0xd4, 0xa4, 0x5c, 0xcc, 0x5d, 0x65, 0xb6, 0x92
.byte 0x6c, 0x70, 0x48, 0x50, 0xfd, 0xed, 0xb9, 0xda
.byte 0x5e, 0x15, 0x46, 0x57, 0xa7, 0x8d, 0x9d, 0x84
.byte 0x90, 0xd8, 0xab, 0x00, 0x8c, 0xbc, 0xd3, 0x0a
.byte 0xf7, 0xe4, 0x58, 0x05, 0xb8, 0xb3, 0x45, 0x06
.byte 0xd0, 0x2c, 0x1e, 0x8f, 0xca, 0x3f, 0x0f, 0x02
.byte 0xc1, 0xaf, 0xbd, 0x03, 0x01, 0x13, 0x8a, 0x6b
.byte 0x3a, 0x91, 0x11, 0x41, 0x4f, 0x67, 0xdc, 0xea
.byte 0x97, 0xf2, 0xcf, 0xce, 0xf0, 0xb4, 0xe6, 0x73
.byte 0x96, 0xac, 0x74, 0x22, 0xe7, 0xad, 0x35, 0x85
.byte 0xe2, 0xf9, 0x37, 0xe8, 0x1c, 0x75, 0xdf, 0x6e
.byte 0x47, 0xf1, 0x1a, 0x71, 0x1d, 0x29, 0xc5, 0x89
.byte 0x6f, 0xb7, 0x62, 0x0e, 0xaa, 0x18, 0xbe, 0x1b
.byte 0xfc, 0x56, 0x3e, 0x4b, 0xc6, 0xd2, 0x79, 0x20
.byte 0x9a, 0xdb, 0xc0, 0xfe, 0x78, 0xcd, 0x5a, 0xf4
.byte 0x1f, 0xdd, 0xa8, 0x33, 0x88, 0x07, 0xc7, 0x31
.byte 0xb1, 0x12, 0x10, 0x59, 0x27, 0x80, 0xec, 0x5f
.byte 0x60, 0x51, 0x7f, 0xa9, 0x19, 0xb5, 0x4a, 0x0d
.byte 0x2d, 0xe5, 0x7a, 0x9f, 0x93, 0xc9, 0x9c, 0xef
.byte 0xa0, 0xe0, 0x3b, 0x4d, 0xae, 0x2a, 0xf5, 0xb0
.byte 0xc8, 0xeb, 0xbb, 0x3c, 0x83, 0x53, 0x99, 0x61
.byte 0x17, 0x2b, 0x04, 0x7e, 0xba, 0x77, 0xd6, 0x26
.byte 0xe1, 0x69, 0x14, 0x63, 0x55, 0x21, 0x0c, 0x7d
.cstring "AES for C64x+, CRYPTOGAMS by <appro\@openssl.org>"
.align 4
___
print $code;
close STDOUT or die "error closing STDOUT: $!";
| jens-maus/amissl | openssl/crypto/aes/asm/aes-c64xplus.pl | Perl | bsd-3-clause | 44,249 |
=pod
=head1 NAME
SSL_get_peer_signature_nid, SSL_get_peer_signature_type_nid,
SSL_get_signature_nid, SSL_get_signature_type_nid - get TLS message signing
types
=head1 SYNOPSIS
#include <openssl/ssl.h>
int SSL_get_peer_signature_nid(SSL *ssl, int *psig_nid);
int SSL_get_peer_signature_type_nid(const SSL *ssl, int *psigtype_nid);
int SSL_get_signature_nid(SSL *ssl, int *psig_nid);
int SSL_get_signature_type_nid(const SSL *ssl, int *psigtype_nid);
=head1 DESCRIPTION
SSL_get_peer_signature_nid() sets B<*psig_nid> to the NID of the digest used
by the peer to sign TLS messages. It is implemented as a macro.
SSL_get_peer_signature_type_nid() sets B<*psigtype_nid> to the signature
type used by the peer to sign TLS messages. Currently the signature type
is the NID of the public key type used for signing except for PSS signing
where it is B<EVP_PKEY_RSA_PSS>. To differentiate between
B<rsa_pss_rsae_*> and B<rsa_pss_pss_*> signatures, it's necessary to check
the type of public key in the peer's certificate.
SSL_get_signature_nid() and SSL_get_signature_type_nid() return the equivalent
information for the local end of the connection.
=head1 RETURN VALUES
These functions return 1 for success and 0 for failure. There are several
possible reasons for failure: the cipher suite has no signature (e.g. it
uses RSA key exchange or is anonymous), the TLS version is below 1.2 or
the functions were called too early, e.g. before the peer signed a message.
=head1 SEE ALSO
L<ssl(7)>, L<SSL_get_peer_certificate(3)>,
=head1 COPYRIGHT
Copyright 2017-2018 The OpenSSL Project Authors. All Rights Reserved.
Licensed under the Apache License 2.0 (the "License"). You may not use
this file except in compliance with the License. You can obtain a copy
in the file LICENSE in the source distribution or at
L<https://www.openssl.org/source/license.html>.
=cut
| jens-maus/amissl | openssl/doc/man3/SSL_get_peer_signature_nid.pod | Perl | bsd-3-clause | 1,874 |
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
use 5.10.0;
use strict;
use warnings;
use Thrift;
use Thrift::Transport;
package Thrift::MemoryBuffer;
use base('Thrift::Transport');
use version 0.77; our $VERSION = version->declare("$Thrift::VERSION");
sub new
{
my $classname = shift;
my $bufferSize= shift || 1024;
my $self = {
buffer => '',
bufferSize => $bufferSize,
wPos => 0,
rPos => 0,
};
return bless($self,$classname);
}
sub isOpen
{
return 1;
}
sub open
{
}
sub close
{
}
sub peek
{
my $self = shift;
return($self->{rPos} < $self->{wPos});
}
sub getBuffer
{
my $self = shift;
return $self->{buffer};
}
sub resetBuffer
{
my $self = shift;
my $new_buffer = shift || '';
$self->{buffer} = $new_buffer;
$self->{bufferSize} = length($new_buffer);
$self->{wPos} = length($new_buffer);
$self->{rPos} = 0;
}
sub available
{
my $self = shift;
return ($self->{wPos} - $self->{rPos});
}
sub read
{
my $self = shift;
my $len = shift;
my $ret;
my $avail = ($self->{wPos} - $self->{rPos});
return '' if $avail == 0;
#how much to give
my $give = $len;
$give = $avail if $avail < $len;
$ret = substr($self->{buffer},$self->{rPos},$give);
$self->{rPos} += $give;
return $ret;
}
sub readAll
{
my $self = shift;
my $len = shift;
my $avail = ($self->{wPos} - $self->{rPos});
if ($avail < $len) {
die TTransportException->new("Attempt to readAll($len) found only $avail available",
Thrift::TTransportException::END_OF_FILE);
}
my $data = '';
my $got = 0;
while (($got = length($data)) < $len) {
$data .= $self->read($len - $got);
}
return $data;
}
sub write
{
my $self = shift;
my $buf = shift;
$self->{buffer} .= $buf;
$self->{wPos} += length($buf);
}
sub flush
{
}
1;
| vasili-v/themis | vendor/github.com/apache/thrift/lib/perl/lib/Thrift/MemoryBuffer.pm | Perl | apache-2.0 | 2,725 |
#!/usr/bin/perl
#
# Create a dot(1) graph file from a directory hierarchy
#
# (C) Copyright 2001 Diomidis Spinellis.
#
# Permission to use, copy, and distribute this software and its
# documentation for any purpose and without fee is hereby granted,
# provided that the above copyright notice appear in all copies and that
# both that copyright notice and this permission notice appear in
# supporting documentation.
#
# THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED
# WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF
# MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE.
#
# $Id$
#
print "#!/usr/local/bin/dot
# Automatically generated file.
# Contains the directory representation of $ARGV[0] generated by $0
#
";
if ($#ARGV != 0) {
print STDERR "$0: usage $0 directory\n";
exit(1);
}
$unix = (-r '/dev/null');
if ($unix) {
open(IN, $cmd = "find $ARGV[0] -type d -print|") || die "Unable to run $cmd: $!\n";
} else {
# Hopefully Windows
open(IN, $cmd = "dir /b/ad/s $ARGV[0]|") || die "Unable to run $cmd: $!\n";
}
while (<IN>) {
chop;
if ($unix) {
@paths = split(/\//, $_);
} else {
@paths = split(/\\/, $_);
}
undef $op;
undef $path;
for $p (@paths) {
$path .= "/$p";
$name = $path;
$name =~ s/[^a-zA-Z0-9]/_/g;
$node{$name} = $p;
$edge{"$op->$name;"} = 1 if ($op);
$op = $name;
}
}
close(IN);
print 'digraph G {
nodesep=.1;
rankdir=LR;
node [height=.15,shape=box,fontname="Helvetica",fontsize=8];
edge [arrowhead=none,arrowtail=none];
'
;
for $i (sort keys %node) {
print "\t$i [label=\"$node{$i}\"];\n";
}
for $i (sort keys %edge) {
print "\t$i\n";
}
print "}\n";
| JasonGross/graphviz-packaging | reference-graphviz-2.39.20141222.0545/contrib/dirgraph/dirgraph.pl | Perl | mit | 1,734 |
###########################################################################
#
# This file is auto-generated by the Perl DateTime Suite locale
# generator (0.05). This code generator comes with the
# DateTime::Locale distribution in the tools/ directory, and is called
# generate-from-cldr.
#
# This file as generated from the CLDR XML locale data. See the
# LICENSE.cldr file included in this distribution for license details.
#
# This file was generated from the source file ss.xml
# The source file version number was 1.27, generated on
# 2009/05/05 23:06:40.
#
# Do not edit this file directly.
#
###########################################################################
package DateTime::Locale::ss;
use strict;
use warnings;
use utf8;
use base 'DateTime::Locale::root';
sub cldr_version { return "1\.7\.1" }
{
my $am_pm_abbreviated = [ "AM", "PM" ];
sub am_pm_abbreviated { return $am_pm_abbreviated }
}
{
my $date_format_full = "EEEE\,\ y\ MMMM\ dd";
sub date_format_full { return $date_format_full }
}
{
my $date_format_long = "y\ MMMM\ d";
sub date_format_long { return $date_format_long }
}
{
my $date_format_medium = "y\ MMM\ d";
sub date_format_medium { return $date_format_medium }
}
{
my $date_format_short = "yy\/MM\/dd";
sub date_format_short { return $date_format_short }
}
{
my $day_format_abbreviated = [ "Mso", "Bil", "Tsa", "Ne", "Hla", "Mgc", "Son" ];
sub day_format_abbreviated { return $day_format_abbreviated }
}
sub day_format_narrow { $_[0]->day_stand_alone_narrow() }
{
my $day_format_wide = [ "uMsombuluko", "Lesibili", "Lesitsatfu", "Lesine", "Lesihlanu", "uMgcibelo", "Lisontfo" ];
sub day_format_wide { return $day_format_wide }
}
sub day_stand_alone_abbreviated { $_[0]->day_format_abbreviated() }
{
my $day_stand_alone_narrow = [ "2", "3", "4", "5", "6", "7", "1" ];
sub day_stand_alone_narrow { return $day_stand_alone_narrow }
}
sub day_stand_alone_wide { $_[0]->day_format_wide() }
{
my $era_abbreviated = [ "BC", "AD" ];
sub era_abbreviated { return $era_abbreviated }
}
sub era_narrow { $_[0]->era_abbreviated() }
{
my $era_wide = [ "BC", "AD" ];
sub era_wide { return $era_wide }
}
{
my $first_day_of_week = "1";
sub first_day_of_week { return $first_day_of_week }
}
{
my $month_format_abbreviated = [ "Bhi", "Van", "Vol", "Mab", "Nkh", "Nhl", "Kho", "Ngc", "Nyo", "Mph", "Lwe", "Ngo" ];
sub month_format_abbreviated { return $month_format_abbreviated }
}
sub month_format_narrow { $_[0]->month_stand_alone_narrow() }
{
my $month_format_wide = [ "Bhimbidvwane", "iNdlovana", "iNdlovu\-lenkhulu", "Mabasa", "iNkhwekhweti", "iNhlaba", "Kholwane", "iNgci", "iNyoni", "iMphala", "Lweti", "iNgongoni" ];
sub month_format_wide { return $month_format_wide }
}
sub month_stand_alone_abbreviated { $_[0]->month_format_abbreviated() }
{
my $month_stand_alone_narrow = [ "1", "2", "3", "4", "5", "6", "7", "8", "9", "10", "11", "12" ];
sub month_stand_alone_narrow { return $month_stand_alone_narrow }
}
sub month_stand_alone_wide { $_[0]->month_format_wide() }
{
my $quarter_format_abbreviated = [ "Q1", "Q2", "Q3", "Q4" ];
sub quarter_format_abbreviated { return $quarter_format_abbreviated }
}
{
my $quarter_format_wide = [ "Q1", "Q2", "Q3", "Q4" ];
sub quarter_format_wide { return $quarter_format_wide }
}
sub quarter_stand_alone_abbreviated { $_[0]->quarter_format_abbreviated() }
sub quarter_stand_alone_wide { $_[0]->quarter_format_wide() }
{
my $time_format_full = "HH\:mm\:ss\ zzzz";
sub time_format_full { return $time_format_full }
}
{
my $time_format_long = "HH\:mm\:ss\ z";
sub time_format_long { return $time_format_long }
}
{
my $time_format_medium = "HH\:mm\:ss";
sub time_format_medium { return $time_format_medium }
}
{
my $time_format_short = "HH\:mm";
sub time_format_short { return $time_format_short }
}
{
my $_format_for_yyQ = "Q\ yy";
sub _format_for_yyQ { return $_format_for_yyQ }
}
{
my $_available_formats =
{
"yyQ" => "Q\ yy"
};
sub _available_formats { return $_available_formats }
}
1;
__END__
=pod
=encoding utf8
=head1 NAME
DateTime::Locale::ss
=head1 SYNOPSIS
use DateTime;
my $dt = DateTime->now( locale => 'ss' );
print $dt->month_name();
=head1 DESCRIPTION
This is the DateTime locale package for Swati.
=head1 DATA
This locale inherits from the L<DateTime::Locale::root> locale.
It contains the following data.
=head2 Days
=head3 Wide (format)
uMsombuluko
Lesibili
Lesitsatfu
Lesine
Lesihlanu
uMgcibelo
Lisontfo
=head3 Abbreviated (format)
Mso
Bil
Tsa
Ne
Hla
Mgc
Son
=head3 Narrow (format)
2
3
4
5
6
7
1
=head3 Wide (stand-alone)
uMsombuluko
Lesibili
Lesitsatfu
Lesine
Lesihlanu
uMgcibelo
Lisontfo
=head3 Abbreviated (stand-alone)
Mso
Bil
Tsa
Ne
Hla
Mgc
Son
=head3 Narrow (stand-alone)
2
3
4
5
6
7
1
=head2 Months
=head3 Wide (format)
Bhimbidvwane
iNdlovana
iNdlovu-lenkhulu
Mabasa
iNkhwekhweti
iNhlaba
Kholwane
iNgci
iNyoni
iMphala
Lweti
iNgongoni
=head3 Abbreviated (format)
Bhi
Van
Vol
Mab
Nkh
Nhl
Kho
Ngc
Nyo
Mph
Lwe
Ngo
=head3 Narrow (format)
1
2
3
4
5
6
7
8
9
10
11
12
=head3 Wide (stand-alone)
Bhimbidvwane
iNdlovana
iNdlovu-lenkhulu
Mabasa
iNkhwekhweti
iNhlaba
Kholwane
iNgci
iNyoni
iMphala
Lweti
iNgongoni
=head3 Abbreviated (stand-alone)
Bhi
Van
Vol
Mab
Nkh
Nhl
Kho
Ngc
Nyo
Mph
Lwe
Ngo
=head3 Narrow (stand-alone)
1
2
3
4
5
6
7
8
9
10
11
12
=head2 Quarters
=head3 Wide (format)
Q1
Q2
Q3
Q4
=head3 Abbreviated (format)
Q1
Q2
Q3
Q4
=head3 Narrow (format)
1
2
3
4
=head3 Wide (stand-alone)
Q1
Q2
Q3
Q4
=head3 Abbreviated (stand-alone)
Q1
Q2
Q3
Q4
=head3 Narrow (stand-alone)
1
2
3
4
=head2 Eras
=head3 Wide
BC
AD
=head3 Abbreviated
BC
AD
=head3 Narrow
BC
AD
=head2 Date Formats
=head3 Full
2008-02-05T18:30:30 = Lesibili, 2008 iNdlovana 05
1995-12-22T09:05:02 = Lesihlanu, 1995 iNgongoni 22
-0010-09-15T04:44:23 = uMgcibelo, -10 iNyoni 15
=head3 Long
2008-02-05T18:30:30 = 2008 iNdlovana 5
1995-12-22T09:05:02 = 1995 iNgongoni 22
-0010-09-15T04:44:23 = -10 iNyoni 15
=head3 Medium
2008-02-05T18:30:30 = 2008 Van 5
1995-12-22T09:05:02 = 1995 Ngo 22
-0010-09-15T04:44:23 = -10 Nyo 15
=head3 Short
2008-02-05T18:30:30 = 08/02/05
1995-12-22T09:05:02 = 95/12/22
-0010-09-15T04:44:23 = -10/09/15
=head3 Default
2008-02-05T18:30:30 = 2008 Van 5
1995-12-22T09:05:02 = 1995 Ngo 22
-0010-09-15T04:44:23 = -10 Nyo 15
=head2 Time Formats
=head3 Full
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 18:30:30 UTC
1995-12-22T09:05:02 = 09:05:02 UTC
-0010-09-15T04:44:23 = 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head3 Short
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 09:05
-0010-09-15T04:44:23 = 04:44
=head3 Default
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 09:05:02
-0010-09-15T04:44:23 = 04:44:23
=head2 Datetime Formats
=head3 Full
2008-02-05T18:30:30 = Lesibili, 2008 iNdlovana 05 18:30:30 UTC
1995-12-22T09:05:02 = Lesihlanu, 1995 iNgongoni 22 09:05:02 UTC
-0010-09-15T04:44:23 = uMgcibelo, -10 iNyoni 15 04:44:23 UTC
=head3 Long
2008-02-05T18:30:30 = 2008 iNdlovana 5 18:30:30 UTC
1995-12-22T09:05:02 = 1995 iNgongoni 22 09:05:02 UTC
-0010-09-15T04:44:23 = -10 iNyoni 15 04:44:23 UTC
=head3 Medium
2008-02-05T18:30:30 = 2008 Van 5 18:30:30
1995-12-22T09:05:02 = 1995 Ngo 22 09:05:02
-0010-09-15T04:44:23 = -10 Nyo 15 04:44:23
=head3 Short
2008-02-05T18:30:30 = 08/02/05 18:30
1995-12-22T09:05:02 = 95/12/22 09:05
-0010-09-15T04:44:23 = -10/09/15 04:44
=head3 Default
2008-02-05T18:30:30 = 2008 Van 5 18:30:30
1995-12-22T09:05:02 = 1995 Ngo 22 09:05:02
-0010-09-15T04:44:23 = -10 Nyo 15 04:44:23
=head2 Available Formats
=head3 d (d)
2008-02-05T18:30:30 = 5
1995-12-22T09:05:02 = 22
-0010-09-15T04:44:23 = 15
=head3 EEEd (d EEE)
2008-02-05T18:30:30 = 5 Bil
1995-12-22T09:05:02 = 22 Hla
-0010-09-15T04:44:23 = 15 Mgc
=head3 Hm (H:mm)
2008-02-05T18:30:30 = 18:30
1995-12-22T09:05:02 = 9:05
-0010-09-15T04:44:23 = 4:44
=head3 hm (h:mm a)
2008-02-05T18:30:30 = 6:30 PM
1995-12-22T09:05:02 = 9:05 AM
-0010-09-15T04:44:23 = 4:44 AM
=head3 Hms (H:mm:ss)
2008-02-05T18:30:30 = 18:30:30
1995-12-22T09:05:02 = 9:05:02
-0010-09-15T04:44:23 = 4:44:23
=head3 hms (h:mm:ss a)
2008-02-05T18:30:30 = 6:30:30 PM
1995-12-22T09:05:02 = 9:05:02 AM
-0010-09-15T04:44:23 = 4:44:23 AM
=head3 M (L)
2008-02-05T18:30:30 = 2
1995-12-22T09:05:02 = 12
-0010-09-15T04:44:23 = 9
=head3 Md (M-d)
2008-02-05T18:30:30 = 2-5
1995-12-22T09:05:02 = 12-22
-0010-09-15T04:44:23 = 9-15
=head3 MEd (E, M-d)
2008-02-05T18:30:30 = Bil, 2-5
1995-12-22T09:05:02 = Hla, 12-22
-0010-09-15T04:44:23 = Mgc, 9-15
=head3 MMM (LLL)
2008-02-05T18:30:30 = Van
1995-12-22T09:05:02 = Ngo
-0010-09-15T04:44:23 = Nyo
=head3 MMMd (MMM d)
2008-02-05T18:30:30 = Van 5
1995-12-22T09:05:02 = Ngo 22
-0010-09-15T04:44:23 = Nyo 15
=head3 MMMEd (E MMM d)
2008-02-05T18:30:30 = Bil Van 5
1995-12-22T09:05:02 = Hla Ngo 22
-0010-09-15T04:44:23 = Mgc Nyo 15
=head3 MMMMd (MMMM d)
2008-02-05T18:30:30 = iNdlovana 5
1995-12-22T09:05:02 = iNgongoni 22
-0010-09-15T04:44:23 = iNyoni 15
=head3 MMMMEd (E MMMM d)
2008-02-05T18:30:30 = Bil iNdlovana 5
1995-12-22T09:05:02 = Hla iNgongoni 22
-0010-09-15T04:44:23 = Mgc iNyoni 15
=head3 ms (mm:ss)
2008-02-05T18:30:30 = 30:30
1995-12-22T09:05:02 = 05:02
-0010-09-15T04:44:23 = 44:23
=head3 y (y)
2008-02-05T18:30:30 = 2008
1995-12-22T09:05:02 = 1995
-0010-09-15T04:44:23 = -10
=head3 yM (y-M)
2008-02-05T18:30:30 = 2008-2
1995-12-22T09:05:02 = 1995-12
-0010-09-15T04:44:23 = -10-9
=head3 yMEd (EEE, y-M-d)
2008-02-05T18:30:30 = Bil, 2008-2-5
1995-12-22T09:05:02 = Hla, 1995-12-22
-0010-09-15T04:44:23 = Mgc, -10-9-15
=head3 yMMM (y MMM)
2008-02-05T18:30:30 = 2008 Van
1995-12-22T09:05:02 = 1995 Ngo
-0010-09-15T04:44:23 = -10 Nyo
=head3 yMMMEd (EEE, y MMM d)
2008-02-05T18:30:30 = Bil, 2008 Van 5
1995-12-22T09:05:02 = Hla, 1995 Ngo 22
-0010-09-15T04:44:23 = Mgc, -10 Nyo 15
=head3 yMMMM (y MMMM)
2008-02-05T18:30:30 = 2008 iNdlovana
1995-12-22T09:05:02 = 1995 iNgongoni
-0010-09-15T04:44:23 = -10 iNyoni
=head3 yQ (y Q)
2008-02-05T18:30:30 = 2008 1
1995-12-22T09:05:02 = 1995 4
-0010-09-15T04:44:23 = -10 3
=head3 yQQQ (y QQQ)
2008-02-05T18:30:30 = 2008 Q1
1995-12-22T09:05:02 = 1995 Q4
-0010-09-15T04:44:23 = -10 Q3
=head3 yyQ (Q yy)
2008-02-05T18:30:30 = 1 08
1995-12-22T09:05:02 = 4 95
-0010-09-15T04:44:23 = 3 -10
=head2 Miscellaneous
=head3 Prefers 24 hour time?
Yes
=head3 Local first day of the week
uMsombuluko
=head1 SUPPORT
See L<DateTime::Locale>.
=head1 AUTHOR
Dave Rolsky <autarch@urth.org>
=head1 COPYRIGHT
Copyright (c) 2008 David Rolsky. All rights reserved. This program is
free software; you can redistribute it and/or modify it under the same
terms as Perl itself.
This module was generated from data provided by the CLDR project, see
the LICENSE.cldr in this distribution for details on the CLDR data's
license.
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/DateTime/Locale/ss.pm | Perl | mit | 11,769 |
#!/usr/bin/env perl
#
# Copyright 2015 Comcast Cable Communications Management, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
use Mojo::Base -strict;
use Schema;
use Test::IntegrationTestHelper;
use strict;
use warnings;
use Data::Dumper;
my $t = Test::Mojo->new('TrafficOps');
#print "t: " . Dumper( $t->ua->server->app->routes->children->[0]->{pattern} );
foreach my $i ( $t->ua->server->app->routes->children ) {
foreach my $j (@$i) {
my $method = $j->{via}->[0]; #GET/POST
my $path = $j->{pattern}{pattern}; #/url
my $package = $j->{pattern}{defaults}{namespace}; # UI/API
my $format = $j->{pattern}{constraints}{format}[0];
my $controller = $j->{pattern}{defaults}{controller};
my $action = $j->{pattern}{defaults}{action};
if ( defined($package) && defined($method) && defined($action) && defined($path) && defined($controller) ) {
#print "$method\t$path \t\t\t\t{:action =>$action, :package =>$package, :controller=>$controller} \n";
my $max_length = 80;
my $method_and_path = sprintf( "%-6s %s", $method, $path );
if ( defined($format) ) {
$method_and_path = $method_and_path . "." . $format;
}
my $method_and_path_length = length($method_and_path);
my $spacing = ' ' x ( $max_length - $method_and_path_length );
my $fully_qualified_package = $package . "::" . $controller . "->" . $action;
my $line = sprintf( "%s %s %s\n", $method_and_path, $spacing, $fully_qualified_package );
print($line);
#printf( "%s\n", '-' x length($line) );
#printf( "%-5s %-40s {:action => %s, :package=> %s, :controller=> %s}\n", $method, $path, $action, $package, $controller );
}
#print "j: " . Dumper( $j->{pattern}{pattern} );
}
}
#print "t: " . Dumper( $t->ua->server->app->routes->children->[0]->pattern );
| petrocc/traffic_control | traffic_ops/app/bin/routes.pl | Perl | apache-2.0 | 2,378 |
use strict;
use DBI;
my $mon = "Nov_2018";
print STDERR "Using $mon for the SQLite database\n";
my $dbfile = "/home3/redwards/SRA/SRAdb/$mon/SRAmetadb.sqlite";
unless (-e $dbfile) {
die "Can not find SQLite file $dbfile";
}
my $dbh = DBI->connect("dbi:SQLite:dbname=$dbfile","","");
my $n=250;
my $inputfile = shift || die "List of SRR IDs, one per line";
my @SRR;
open(IN, $inputfile) || die "$! $inputfile";
my %aruns;
while (<IN>) {
chomp;
push @SRR, $_;
if ($#SRR = $n) {
my %runs = sql_extract(@SRR);
map {push @{$aruns{$_}}, @{$runs{$_}}} keys %runs;
undef @SRR;
}
}
if (@SRR) {
my %runs = sql_extract(@SRR);
map {push @{$aruns{$_}}, @{$runs{$_}}} keys %runs;
}
foreach my $t (keys %aruns) {
print "$t\t", join(",", @{$aruns{$t}}), "\n";
}
sub sql_extract() {
my $q = join('", "', @_);
$q = '"' . $q . '"';
my $exc = $dbh->prepare("select r.run_accession, e.experiment_accession, s.study_accession, s.study_title, s.study_abstract from study s inner join experiment e on s.study_accession = e.study_accession inner join run r on e.experiment_accession = r.experiment_accession where r.run_accession in ($q);");
$exc->execute || die $dbh->errstr;
my %runs;
while (my @r = $exc->fetchrow_array()) {
my $run = shift @r;
my $t=join("\t", @r);
push @{$runs{$t}}, $run;
}
return %runs;
}
| linsalrob/EdwardsLab | sra/runs_to_abstracts.pl | Perl | mit | 1,325 |
# This file is auto-generated by the Perl DateTime Suite time zone
# code generator (0.07) This code generator comes with the
# DateTime::TimeZone module distribution in the tools/ directory
#
# Generated from debian/tzdata/australasia. Olson data version 2008c
#
# Do not edit this file directly.
#
package DateTime::TimeZone::Pacific::Palau;
use strict;
use Class::Singleton;
use DateTime::TimeZone;
use DateTime::TimeZone::OlsonDB;
@DateTime::TimeZone::Pacific::Palau::ISA = ( 'Class::Singleton', 'DateTime::TimeZone' );
my $spans =
[
[
DateTime::TimeZone::NEG_INFINITY,
59958198124,
DateTime::TimeZone::NEG_INFINITY,
59958230400,
32276,
0,
'LMT'
],
[
59958198124,
DateTime::TimeZone::INFINITY,
59958230524,
DateTime::TimeZone::INFINITY,
32400,
0,
'PWT'
],
];
sub olson_version { '2008c' }
sub has_dst_changes { 0 }
sub _max_year { 2018 }
sub _new_instance
{
return shift->_init( @_, spans => $spans );
}
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/DateTime/TimeZone/Pacific/Palau.pm | Perl | mit | 945 |
# toExMS(M)cli.pl
#
# Arranges information in Mascot-exported CVS files to that necessary for
# ExMS. Remember to export to .xlsx afterwards! Made to work with Mascot as run by the IBBR as of 2014-07-18.
# Mascot claims last update 2010-03-30.
#
###############################################################################
# Required Export options:
# (If it isn't listed, this doesn't care about it.)
#
# Protein Hit Information (*)
#
# Peptide Match Information (*)
# Experimental Charge (*)
# Calculated Mr (Da) (*)
# Mass error (Da) (*)
# Start (*)
# End (*)
# Score (*)
# Sequence (*)
# Query title (*)
#
# Sample line follows:
# prot_hit_num,prot_acc,pep_query,pep_rank,pep_isbold,pep_isunique,pep_exp_mz,pep_exp_z,pep_calc_mr,pep_delta,pep_start,pep_end,pep_score,pep_res_before,pep_seq,pep_res_after,pep_scan_title
# 1,NLGT,581,1,1,1,520.2807,1,519.2727,0.0007,130,134,31.09,A,SVVCL,L,File:06262014-Fab-MSMS-1.mzXML Scans:801 RT:4.6696min Charge:1+ Fragmentation:cid
###############################################################################
use strict;
use warnings;
#open (my $DEBUG, '>', "log.txt");
my $infilename = "";
my $outfilename = "";
#get $infilename
print "Please enter the input file:\t";
$infilename = <STDIN>;
chomp $infilename;
#print $DEBUG "Input file is $infilename.\n";
if (-e $infilename && -r $infilename)
{
open (my $ifh, '<', $infilename) or print $DEBUG "Could not open input file $infilename.\n";
#get $outputfilename
print "Enter desired output filename. If none is entered, file will be 'out.csv'.\t";
$outfilename = <STDIN>;
chomp $outfilename;
if ($outfilename eq "") {$outfilename = "out.csv";}
#print $DEBUG "Output file will be $outfilename.\n";
if (open (my $ofh, '>', $outfilename))
{
if (!-w $outfilename) {die "Output file $outfilename is not writable.\n";}
my (@start_end, @AoR, $RT, @row);
my $mode = 0;
my $hit = -1;
my $prot = -1;
my $mz = -1;
my $z = -1;
my $mr = -1;
my $delta = -1;
my $score = -1;
my $start = -1;
my $end = -1;
my $bef = -1;
my $seq = -1;
my $aft = -1;
my $scan = -1;
$RT = 0;
#possible modes:
# 0: initial state
# 1: body of csv, following lines contain almost all of the necessary information
while (<$ifh>)
{
#parsing input file here
if ($_ =~ /^prot_hit.*/ )
{
#header row
$mode = 1;
#print DEBUG $_;
@row = split(/,/, $_);
for (my $i = 0; $i < scalar @row; $i++)
{
#ID fields
if ($row[$i] eq "prot_hit_num") {$hit = $i;}
elsif ($row[$i] eq "prot_acc") {$prot = $i;}
elsif ($row[$i] eq "pep_exp_mz") {$mz = $i;}
elsif ($row[$i] eq "pep_exp_z") {$z = $i;}
elsif ($row[$i] eq "pep_calc_mr") {$mr = $i;}
elsif ($row[$i] eq "pep_delta") {$delta = $i;}
elsif ($row[$i] eq "pep_score") {$score = $i;}
elsif ($row[$i] eq "pep_start") {$start = $i;}
elsif ($row[$i] eq "pep_end") {$end = $i;}
elsif ($row[$i] eq "pep_res_before") {$bef = $i;}
elsif ($row[$i] eq "pep_seq") {$seq = $i;}
elsif ($row[$i] eq "pep_res_after") {$aft = $i;}
elsif ($row[$i] eq "pep_scan_title\n") {$scan = $i;}
else {}
}
push @AoR, "prot_hit_num,prot_acc,pep_exp_mz,pep_exp_z,pep_calc_mr,pep_delta,pep_score,retention time,pep_res_before,pep_seq,pep_res_after,pep_scan_title\n";
}
elsif ($_ =~ /^\d.*/ && $mode == 1)
{
#body section
@row = split(/,/, $_);
#print "$row[16]\n";
if ($row[$scan] =~ /.*RT:(\d+(?:\.\d+)).*/) {$RT = $1;}
chomp($row[$scan]);
push @AoR, join ("", (join (',', ($row[$hit], $row[$prot], $row[$mz], "", $row[$z], $row[$mr], $row[$delta], $row[$score], $RT, $row[$bef], $row[$seq], $row[$aft], $row[$scan], " ", " ", " ", " " . $row[$start] . "-" . $row[$end])), "\n"));
}
else
{
#nothing to see here, move along.
$mode = 0;
}
}
close $ifh;
#Debug prints
#for (my $i = 1; $i < $#AoR; $i++)
#{
#print DEBUG "$AoR[$i]";
#}
#Print to output file.
for (my $i = 0; $i < $#AoR; $i++)
{
print $ofh "$AoR[$i]";
}
close $ofh;
}
else {print $DEBUG "Could not open output file $outfilename.\n";}
}
else
{
if (-e $infilename) {die "Input file $infilename does not exist.\n";}
elsif (-r $infilename) {die "Input file $infilename is not readable.\n";}
else {die "This should be unreachable.\n"}
} | dweber3/MascotCVSconversion | toExMS(M)cli.pl | Perl | mit | 4,384 |
#!/usr/bin/env perl
use warnings;
use strict;
#use utf8;
#use open qw/:std :utf8/;
#use Encode;
sub binize
{
my $str = shift;
$str =~ s/^%//;
pack 'C', hex($str);
}
for my $arg (@ARGV) {
$arg =~ s/%([[:xdigit:]]{2})/binize($1)/ge;
print $arg;
print "\n";
}
__DATA__
1%E5%88%86%E9%92%9F%E6%91%84%E5%BD%B1%E8%AF%8A%E6%89%80%EF%BC%9A%E5%BF%AB%E9%80%9F%E6%8E%8C%E6%8F%A1%E6%95%B0%E7%A0%81%E6%91%84%E5%BD%B1%E7%9A%84%E6%9B%9D%E5%85%89%E7%A7%98%E7%AC%88-%E5%B0%9A%E5%93%81%E5%B7%A5%E4%BD%9C%E5%AE%A4.epub
1分钟摄影诊所:皇冠网店必学商品拍摄技法-尚品工作室.epub
%E4%B8%89%E5%9B%BD%E5%BF%97-%28%E8%A5%BF%E6%99%8B%29%E9%99%88%E5%AF%BF.epub
%E5%B0%8FS%E4%B9%8B%E6%80%80%E5%AD%95%E6%97%A5%E8%AE%B0-%E5%BE%90%E7%86%99%E5%A8%A3.epub
%E7%BB%A3%E5%83%8F%E7%89%88%E5%8F%A4%E5%85%B8%E5%90%8D%E8%91%97%E4%B8%9B%E4%B9%A6%EF%BC%9A%E4%B8%89%E5%9B%BD%E6%BC%94%E4%B9%89-%28%E6%98%8E%29%E7%BD%97%E8%B4%AF%E4%B8%AD.epub
%E7%BB%A3%E5%83%8F%E7%89%88%E5%8F%A4%E5%85%B8%E5%90%8D%E8%91%97%E4%B8%9B%E4%B9%A6%EF%BC%9A%E6%B0%B4%E6%B5%92%E4%BC%A0-%28%E6%98%8E%29%E6%96%BD%E8%80%90%E5%BA%B5.epub
%E7%BB%A3%E5%83%8F%E7%89%88%E5%8F%A4%E5%85%B8%E5%90%8D%E8%91%97%E4%B8%9B%E4%B9%A6%EF%BC%9A%E7%BA%A2%E6%A5%BC%E6%A2%A6-%28%E6%B8%85%29%E6%9B%B9%E9%9B%AA%E8%8A%B9%2C%28%E6%B8%85%29%E9%AB%98%E9%84%82.epub
%E7%BB%A3%E5%83%8F%E7%89%88%E5%8F%A4%E5%85%B8%E5%90%8D%E8%91%97%E4%B8%9B%E4%B9%A6%EF%BC%9A%E8%A5%BF%E6%B8%B8%E8%AE%B0-%28%E6%98%8E%29%E5%90%B4%E6%89%BF%E6%81%A9.epub
THEPRINCIPLESOFECONOMICS-ALFREDMARSHALL(1842-1924).epub
ThePrinciplesofPsychology-WilliamJames(1842-191).epub
中华姓氏大探源-李浩然.epub
傲慢与偏见-(英)夏洛蒂·勃朗特,段鸿欣.epub
唐诗三百首.epub
悟空传:完美纪念版-今何在.epub
悲惨世界-(法)雨果(Hugo.V.),李玉民.epub
藏地密码1-何马.epub
追随你的心-史蒂夫·乔布斯.epub
%5BComic%5D%5B封神演義%5D%5B藤崎竜%5D%5B東立%5D%5BC.C%5DVol_01.zip
| zeroxia/mystery | archive/2013/perl/unescape-html-entity.pl | Perl | mit | 1,926 |
package Tool;
use strict;
use warnings;
use FindBin '$Bin';
use Exporter;
use Exploits;
use Subs;
use Print;
use Banner;
use File::Basename;
## Copy@right Alisam Technology see License.txt
my @ISA = qw(Exporter);
my @EXPORT_OK = qw(tool);
my $scriptv = Subs::scriptv();
my @c = Print::colors();
my @OTHERS = Exploits::OTHERS();
my @TT = Exploits::TT;
my $version = Banner::version();
## INFO TOOL
sub tool {
use Config;
print $c[1]." [::] $TT[0]:: ".$c[3]."$TT[5]\n".$c[1]." [::] $TT[1]:: ".$c[3]."$TT[6] [V $version]\n".$c[1]." [::] $TT[2]:: ".$c[3].$Bin."/",basename($0)," \n";
print $c[1]." [::] $TT[3]:: ".$c[3]."[$^V]\n".$c[1]." [::] $TT[4]:: ".$c[3]."[$Config{ osname} $Config{ archname}]\n".$c[1]." [::] MORE:: ".$c[3]."$OTHERS[21]\n";
print $c[3]." $OTHERS[22]\n";
my $sn3="-" x 80;
print $c[1]."$sn3\n";
}
1; | AlisamTechnology/ATSCAN | inc/lib/lib/Tool.pm | Perl | mit | 927 |
#!/usr/bin/perl -w
#
use strict;
use warnings;
my $verzeichnis = "Wachst:uchschmel?zen.odt";
$verzeichnis =~ s/:/xDpX/g;
print "$verzeichnis\n";
$verzeichnis =~ s/\?/xFzX/g;
print "$verzeichnis\n";
$verzeichnis =~ s/xDpX/:/g;
print "$verzeichnis\n";
$verzeichnis =~ s/xFzX/?/g;
print "$verzeichnis\n";
| GuKKDevel/TestRepository | AnwendungenPerl/test1.pl | Perl | cc0-1.0 | 307 |
#!/usr/local/ensembl/bin/perl -w
use strict;
use Bio::EnsEMBL::Compara::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Compara::GenomicAlign;
use Bio::EnsEMBL::Compara::DnaFrag;
use Bio::EnsEMBL::DnaDnaAlignFeature;
use Getopt::Long;
my $usage = "\nUsage: $0 [options] axtFile|STDIN
Insert into a compara database axt alignments
$0 -host ecs2d.internal.sanger.ac.uk -dbuser ensadmin -dbpass xxxx -port 3352 -dbname ensembl_compara_12_1 \
-conf_file /nfs/acari/abel/src/ensembl_main/ensembl-compara/modules/Bio/EnsEMBL/Compara/Compara.conf -alignment_type BLASTZ_NET -cs_genome_db_id 1 -qy_genome_db_id 2 -axt axt_file
Options:
-host host for compara database
-dbname compara database name
-dbuser username for connection to \"compara_dbname\"
-pass passwd for connection to \"compara_dbname\"
-port 3352
-cs_genome_db_id genome_db_id of the consensus species (e.g. 1 for Homo_sapiens)
-qy_genome_db_id genome_db_id of the query species (e.g. 2 for Mus_musculus)
-alignment_type type of alignment stored e.g. PHUSION_BLASTN_TIGHT(default: BLASTZ_NET_TIGHT)
-conf_file compara conf file
-min_score 300
-axt axt_file
\n";
my $help = 0;
my ($host, $dbname, $dbuser, $pass, $port);
my ($cs_genome_db_id, $qy_genome_db_id,$conf_file);
my $axt_file;
my $min_score = 0;
my $alignment_type = 'BLASTZ_NET_TIGHT';
GetOptions('h' => \$help,
'host=s' => \$host,
'dbname=s' => \$dbname,
'dbuser=s' => \$dbuser,
'pass=s' => \$pass,
'port=s' => \$port,
'cs_genome_db_id=s' => \$cs_genome_db_id,
'qy_genome_db_id=s' => \$qy_genome_db_id,
'alignment_type=s' => \$alignment_type,
'min_score=i' => \$min_score,
'conf_file=s' => \$conf_file,
'axt=s' => \$axt_file);
if ($help) {
print $usage;
exit 0;
}
$| = 1;
unless (defined $host &&
defined $dbname &&
defined $dbuser &&
defined $pass &&
defined $cs_genome_db_id &&
defined $qy_genome_db_id &&
defined $conf_file &&
defined $axt_file) {
print "
!!! IMPORTANT : All following parameters should be defined !!!
host
dbname
dbuser
pass
cs_genome_db_id
qy_genome_db_id
conf_file
axt
";
print $usage;
exit 0;
}
my $db = new Bio::EnsEMBL::Compara::DBSQL::DBAdaptor ('-conf_file' => $conf_file,
'-host' => $host,
'-user' => $dbuser,
'-dbname' => $dbname,
'-port' => $port,
'-pass' => $pass);
foreach my $key (keys %{$db->{'genomes'}}) {
print $key,"\n";
}
my $stored_max_alignment_length;
my $values = $db->get_MetaContainer->list_value_by_key("max_alignment_length");#metacontainer=trick to speed up positn in chr
if(@$values) {
$stored_max_alignment_length = $values->[0];
}
my $gdb_adaptor = $db->get_GenomeDBAdaptor;
my $cs_genome_db = $gdb_adaptor->fetch_by_dbID($cs_genome_db_id);
my $qy_genome_db= $gdb_adaptor->fetch_by_dbID($qy_genome_db_id);
my @genomicaligns;
my $dnafrag_adaptor = $db->get_DnaFragAdaptor;
my $galn_adaptor = $db->get_GenomicAlignAdaptor;
my $cs_dbadaptor= $db->get_db_adaptor($cs_genome_db->name,$cs_genome_db->assembly);
my @cs_chromosomes = @{$cs_dbadaptor->get_SliceAdaptor->fetch_all('toplevel')};
my %cs_chromosomes;
foreach my $chr (@cs_chromosomes) {
$cs_chromosomes{$chr->seq_region_name} = $chr;
}
my $qy_dbadaptor= $db->get_db_adaptor($qy_genome_db->name,$qy_genome_db->assembly);
my @qy_chromosomes = @{$qy_dbadaptor->get_SliceAdaptor->fetch_all('toplevel')};
my %qy_chromosomes;
foreach my $chr (@qy_chromosomes) {
$qy_chromosomes{$chr->seq_region_name} = $chr;
#print STDERR $chr->seq_region_name."\n";
}
# Updating method_link_species if needed (maybe put that in GenomicAlignAdaptor store method)
my $sth_method_link = $db->prepare("SELECT method_link_id FROM method_link WHERE type = ?");
$sth_method_link->execute($alignment_type);
my ($method_link_id) = $sth_method_link->fetchrow_array();
unless (defined $method_link_id) {
warn "There is no type $alignment_type in the method_link table of compara db.
EXIT 1";
exit 1;
}
my $sth_method_link_species = $db->prepare("
SELECT ml.method_link_id
FROM method_link_species mls1, method_link_species mls2, method_link ml
WHERE mls1.method_link_id = ml.method_link_id AND
mls2.method_link_id = ml.method_link_id AND
mls1.genome_db_id = ? AND
mls2.genome_db_id = ? AND
mls1.species_set = mls2.species_set AND
ml.method_link_id = ?");
$sth_method_link_species->execute($cs_genome_db_id,$qy_genome_db_id,$method_link_id);
my ($already_stored) = $sth_method_link_species->fetchrow_array();
unless (defined $already_stored) {
$sth_method_link_species = $db->prepare("SELECT max(species_set) FROM method_link_species where method_link_id = ?");
$sth_method_link_species->execute($method_link_id);
my ($max_species_set) = $sth_method_link_species->fetchrow_array();
$max_species_set = 0 unless (defined $max_species_set);
$sth_method_link_species = $db->prepare("INSERT INTO method_link_species (method_link_id,species_set,genome_db_id) VALUES (?,?,?)");
$sth_method_link_species->execute($method_link_id,$max_species_set + 1,$cs_genome_db_id);
$sth_method_link_species->execute($method_link_id,$max_species_set + 1,$qy_genome_db_id);
}
# Updating genomic_align_genome if needed (maybe put that in GenomicAlignAdaptor store method)
my $sth_genomic_align_genome = $db->prepare("SELECT method_link_id FROM genomic_align_genome WHERE consensus_genome_db_id = ? AND query_genome_db_id = ? AND method_link_id = ?");
$sth_genomic_align_genome->execute($cs_genome_db_id,$qy_genome_db_id,$method_link_id);
($already_stored) = $sth_genomic_align_genome->fetchrow_array();
unless (defined $already_stored) {
$sth_genomic_align_genome = $db->prepare("INSERT INTO genomic_align_genome (consensus_genome_db_id,query_genome_db_id,method_link_id) VALUES (?,?,?)");
$sth_genomic_align_genome->execute($cs_genome_db_id,$qy_genome_db_id,$method_link_id);
}
my $max_alignment_length = 0;
my ($axt_number,$ref_chr,$ref_start,$ref_end,$qy_chr,$qy_start,$qy_end,$qy_strand,$score);
my ($ref_seq,$qy_seq);
my @DnaDnaAlignFeatures;
my %repeated_alignment;
if ($axt_file =~ /\.gz/) {
open AXT, "gunzip -c $axt_file|" ||
die "Can not open $axt_file: $!";
} else {
open AXT, $axt_file ||
die "Can not open $axt_file: $!";
}
print STDERR "Reading axt alignments in progress...\n";
while (my $line = <AXT>) {
if ($line =~ /^(\d+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(\d+)\s+(\d+)\s+([\+\-])\s+(\-?\d+)$/) {
($axt_number,$ref_chr,$ref_start,$ref_end,$qy_chr,$qy_start,$qy_end,$qy_strand,$score) = ($1,$2,$3,$4,$5,$6,$7,$8,$9);
if ($score < $min_score) {
print STDERR "Score $score below min score $min_score: $line";
while ($line =<AXT>) {
last if ($line =~ /^$/);
}
next;
}
$ref_chr =~ s/chr//;
$qy_chr =~ s/chr//;
unless (defined $cs_chromosomes{$ref_chr}) {
print STDERR "ref $ref_chr chromosome not in core: $line";
while ($line =<AXT>) {
last if ($line =~ /^$/);
}
next;
}
unless (defined $qy_chromosomes{$qy_chr}) {
print STDERR "qy $qy_chr chromosome not in core: $line";
while ($line =<AXT>) {
last if ($line =~ /^$/);
}
next;
}
if (defined $repeated_alignment{$ref_chr."_".$ref_start."_".$ref_end."_".$qy_chr."_".$qy_start."_".$qy_end}) {
print STDERR "Repeated alignment: $line";
while ($line =<AXT>) {
last if ($line =~ /^$/);
}
next;
}
$repeated_alignment{$ref_chr."_".$ref_start."_".$ref_end."_".$qy_chr."_".$qy_start."_".$qy_end} = 1;
if ($qy_strand eq "+") {
$qy_strand = 1;
}
if ($qy_strand eq "-") {
$qy_strand = -1;
my $length = $qy_end - $qy_start;
$qy_start = $qy_chromosomes{$qy_chr}->length - $qy_end + 1;
$qy_end = $qy_start + $length;
}
}
if ($line =~ /^[a-zA-Z-]+$/ && defined $ref_seq) {
chomp $line;
$qy_seq = $line;
unless ($qy_seq =~ /^[acgtnACGTN-]+$/) {
warn "qy_seq not acgtn only in axt_number $axt_number\n";
}
} elsif ($line =~ /^[a-zA-Z-]+$/) {
chomp $line;
$ref_seq = $line;
unless ($ref_seq =~ /^[acgtnACGTN-]+$/) {
warn "ref_seq not acgtn only in axt_number $axt_number\n";
}
}
if ($line =~ /^$/) {
my $identity = identity($ref_seq,$qy_seq);
my $cigar_string = cigar_gen($ref_seq,$qy_seq);
my $f = new Bio::EnsEMBL::DnaDnaAlignFeature(-cigar_string => $cigar_string);
$f->seqname($ref_chr);
$f->start($ref_start);
$f->end($ref_end);
$f->strand(1);
$f->hseqname($qy_chr);
$f->hstart($qy_start);
$f->hend($qy_end);
$f->hstrand($qy_strand);
$f->score($score);
$f->percent_id($identity);
push @DnaDnaAlignFeatures,$f ;
undef $ref_seq;
undef $qy_seq;
}
}
close AXT;
print STDERR "Reading axt alignments done\n";
print STDERR "Preparing data for storage for ". scalar @DnaDnaAlignFeatures . " features...\n";
foreach my $f (@DnaDnaAlignFeatures) {
my ($cs_chr,$cs_start,$cs_end,$qy_chr,$qy_start,$qy_end,$qy_strand,$score,$percid,$cigar) = ($f->seqname,$f->start,$f->end,$f->hseqname,$f->hstart,$f->hend,$f->hstrand,$f->score,$f->percent_id,$f->cigar_string);
my $cs_max_alignment_length = $cs_end - $cs_start + 1;
$max_alignment_length = $cs_max_alignment_length if ($max_alignment_length < $cs_max_alignment_length);
my $qy_max_alignment_length = $qy_end - $qy_start + 1;
$max_alignment_length = $qy_max_alignment_length if ($max_alignment_length < $qy_max_alignment_length);
my $cs_dnafrag = new Bio::EnsEMBL::Compara::DnaFrag;
$cs_dnafrag->name($cs_chr);
$cs_dnafrag->genomedb($cs_genome_db);
$cs_dnafrag->type($cs_chromosomes{$cs_chr}->coord_system->name());
$cs_dnafrag->start(1);
$cs_dnafrag->end($cs_chromosomes{$cs_chr}->length);
$dnafrag_adaptor->store_if_needed($cs_dnafrag);
my $qy_dnafrag = new Bio::EnsEMBL::Compara::DnaFrag;
$qy_dnafrag->name($qy_chr);
$qy_dnafrag->genomedb($qy_genome_db);
$qy_dnafrag->type($qy_chromosomes{$qy_chr}->coord_system->name());
$qy_dnafrag->start(1);
$qy_dnafrag->end($qy_chromosomes{$qy_chr}->length);
$dnafrag_adaptor->store_if_needed($qy_dnafrag);
my $genomic_align = new Bio::EnsEMBL::Compara::GenomicAlign;
$genomic_align->consensus_dnafrag($cs_dnafrag);
$genomic_align->consensus_start($cs_start);
$genomic_align->consensus_end($cs_end);
$genomic_align->query_dnafrag($qy_dnafrag);
$genomic_align->query_start($qy_start);
$genomic_align->query_end($qy_end);
$genomic_align->query_strand($qy_strand);
$genomic_align->alignment_type($alignment_type);
$genomic_align->score($score);
$percid = 0 unless (defined $percid);
$genomic_align->perc_id($percid);
$genomic_align->cigar_line($cigar);
$genomic_align->group_id(0);
$genomic_align->level_id(0);
$genomic_align->strands_reversed(0);
$galn_adaptor->store([$genomic_align]);#from genomic align adaptor
# think here to revert cigar_string if strand==-1 !!
}
if (! defined $stored_max_alignment_length) {
$db->get_MetaContainer->store_key_value("max_alignment_length",$max_alignment_length + 1);
} elsif ($stored_max_alignment_length < $max_alignment_length + 1) {
$db->get_MetaContainer->update_key_value("max_alignment_length",$max_alignment_length + 1);
}
print STDERR "Done\n";
sub identity {
my ($ref_seq,$qy_seq) = @_;
my $length = length($ref_seq);
unless (length($qy_seq) == $length) {
warn "reference sequence length ($length bp) and query sequence length (".length($qy_seq)." bp) should be identical
exit 1\n";
exit 1;
}
my @ref_seq_array = split //, $ref_seq;
my @qy_seq_array = split //, $qy_seq;
my $number_identity = 0;
for (my $i=0;$i<$length;$i++) {
if (lc $ref_seq_array[$i] eq lc $qy_seq_array[$i]) {
$number_identity++;
}
}
return int($number_identity/$length*100);
}
sub cigar_gen {
my ($q,$s) = @_;
my @q = split //,$q;
my @s = split //,$s;
my $i = 0;
my @ret = ();
for (; $i <= $#q; $i++) {
my $q = $q[$i];
my $s = $s[$i];
if($q eq "\-") {
push @ret,"D";
next;
}
if($s eq "\-") {
push @ret,"I";
next;
}
push @ret,"M";
}
my $c = 0;
my $ret = "";
for ($i=1; $i <= $#ret; $i++) {
if ($ret[$i] eq $ret[$i-1]) {
$c++;
next;
}
if($c == 0) {
$ret .= $ret[$i-1];
next;
}
$ret .= sprintf "%d$ret[$i-1]",++$c;
$c = 0;
}
if($c == 0) {
$ret .= $ret[$i-1];
} else {
$ret .= sprintf "%d$ret[$i-1]",++$c;
}
return $ret;
}
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-compara/scripts/dumps/LoadAxtAlignments.pl | Perl | apache-2.0 | 12,569 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::devices::cisco::ces::restapi::mode::certificates;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
use centreon::plugins::misc;
sub custom_validity_output {
my ($self, %options) = @_;
return sprintf(
'expires in %s',
$self->{result_values}->{generation_time}
);
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'certificates', type => 1, cb_prefix_output => 'prefix_certificate_output', message_multiple => 'All certificates are ok', skipped_code => { -10 => 1 } }
];
$self->{maps_counters}->{certificates} = [
{ label => 'certificate-expire', nlabel => 'system.certificate.expire.seconds', set => {
key_values => [ { name => 'validity_time' }, { name => 'generation_time' } ],
closure_custom_output => $self->can('custom_validity_output'),
perfdatas => [
{ template => '%d', min => 0, unit => 's' }
]
}
}
];
}
sub prefix_certificate_output {
my ($self, %options) = @_;
return "Certificate '" . $options{instance_value}->{display} . "' ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options, force_new_perfdata => 1);
bless $self, $class;
$self->{version} = '1.0';
$options{options}->add_options(arguments => {
});
centreon::plugins::misc::mymodule_load(
output => $self->{output},
module => 'Date::Parse',
error_msg => "Cannot load module 'Date::Parse'."
);
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my $result = $options{custom}->request_api(
method => 'POST',
url_path => '/putxml',
query_form_post => '<Command><Security><Certificates><Services><Show/></Services></Certificates></Security></Command>',
ForceArray => ['Details']
);
$self->{certificates} = {};
if (defined($result->{ServicesShowResult}->{Details})) {
foreach (@{$result->{ServicesShowResult}->{Details}}) {
my $end_date = Date::Parse::str2time($_->{notAfter});
if (!defined($end_date)) {
$self->{output}->output_add(
severity => 'UNKNOWN',
short_msg => "can't parse date '" . $_->{notAfter} . "'"
);
next;
}
$self->{certificates}->{$_->{item}} = {
display => $_->{SubjectName},
validity_time => $end_date - time(),
generation_time => centreon::plugins::misc::change_seconds(value => $end_date - time())
};
}
}
if (scalar(keys %{$self->{certificates}}) <= 0) {
$self->{output}->add_option_msg(short_msg => 'No certificate found.');
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check certificates validity (since CE 9.2)
=over 8
=item B<--warning-*> B<--critical-*>
Thresholds.
Can be: 'certificate-expire'.
=back
=cut
| centreon/centreon-plugins | hardware/devices/cisco/ces/restapi/mode/certificates.pm | Perl | apache-2.0 | 3,878 |
=head1 NAME
mod_perl 1.0 Win32 Installation Instructions
=head1 Description
This document discusses how to install mod_perl 1.0 under Win32, both
in building from sources and in installing pre-compiled binary
packages.
=head1 Synopsis
Unless you are using an all-in-one package, you should
first install Perl and Apache, either from the sources or as binaries.
The Perl sources are available from L<http://www.cpan.org/src/>,
with directions for building contained in F<README.win32>.
ActiveState also makes the sources available for their binary
builds at L<ftp://ftp.activestate.com/ActivePerl/src/>,
which may contain, in particular, Win32-specific fixes not in the
CPAN Perl sources. As a binary, at present, an ActivePerl-compatible
Perl, compiled with Visual C++, is the most common one used in the Win32
mod_perl/Apache environment; you can obtain such a prebuilt Perl
binary from L<http://www.activestate.com/>.
mod_perl 1 builds and tests successfully with either
an ActivePerl Perl in the 6xx series, based on perl-5.6.1,
or with an ActivePerl Perl in the 8xx series, based on perl-5.8.0
(for the latter, this requires mod_perl-1.29 or later).
If you are using perl-5.8,
you may want to consider mod_perl 2.0, which although still
in a development phase offers several significant performance
improvements for Win32 - see
L<modperl-2 in Win32|docs::2.0::os::win32::install>
for details.
The Apache sources and binaries are available at
L<http://httpd.apache.org/>.
When installing Perl or other related binaries, subtleties may arise
in using path names that have spaces in them - you may, for example,
have to specify F<C:\Program Files\> by the DOS 8.3 path name
F<C:\Progra~1\> in certain Apache directives. If you want to avoid
this, install, if possible, these packages to locations without spaces
in their names (eg, F<C:\Perl> for Perl and F<C:\Apache> for Apache).
In the following, it may be necessary to invoke certain commands
through a DOS prompt. A DOS window may be opened either through
a I<Command Prompt> option of the I<Start> menu, or by choosing
to run, from the Start menu, C<command> or C<cmd>, as appropriate.
=head1 Building from sources
You will need
=over
=item *
patience - mod_perl is considered alpha under Win32.
=item *
MSVC++ 5.0+, Apache version 1.3-dev or higher and Perl 5.004_02 or higher.
=item *
As of version 1.24_01, mod_perl will build on Win32 ActivePerls
based on Perl-5.6.x (builds 6xx). For ActivePerl builds 8xx,
you will need mod_perl-1.29 or later.
For binary compatibility you should use the same compiler in
building mod_perl that was used to compile your Perl binary;
for ActivePerl, this means using VC++ 6.
=back
First obtain the
L<mod_perl 1.0 sources|download::source/Stable_Source_Distribution>
as a C<tar.gz> file -
when unpacked, using Winzip or similar tools, a subdirectory
F<mod_perl-1.xx> will be created.
There are two ways to build mod_perl - with MS Developer Studio,
and through command-line arguments to 'perl Makefile.PL'. In both
cases Apache should previously have been built and installed - if
you are using a binary build of Apache, make sure that you obtain
a binary build that includes the Apache libraries and header files.
If you're building Apache yourself from sources, make sure to
obtain the I<win32-src.zip> archive, which has the necessary
VC++ makefiles.
=head2 Building with MS Developer Studio
=over 3
=item Setup the Perl side
Run, from a DOS window in the top-level directory of the
mod_perl sources,
C:\modperl_src> perl Makefile.PL
C:\modperl_src> nmake
This will set up the Perl side of mod_perl for the library build.
=item Build mod_perl.so
Using MS developer studio,
select "File -> Open Workspace ...",
select "Files of type [Projects (*.dsp)]"
open mod_perl-x.xx/src/modules/win32/mod_perl.dsp
=item Settings
select "Tools -> Options -> [Directories]"
select "Show directories for: [Include files]", and add
C:\Apache\include
. (should expand to C:\...\mod_perl-x.xx\src\modules\perl)
C:\Perl\lib\Core
select "Project -> Add to Project -> Files", adding:
perl.lib (or perl56.lib) (e.g. C:\perl\lib\Core\perl.lib)
ApacheCore.lib (e.g. C:\Apache\ApacheCore.lib)
select "Build -> Set Active Configuration -> [mod_perl - Win32 Release]"
select "Build -> Build mod_perl.so"
You may see some harmless warnings, which can be reduced (along with
the size of the DLL), by setting:
"Project -> Settings -> [C/C++] -> Category: [Code Generation] ->
Use runtime library: [Multithreaded DLL]"
As well, if you are using a mod_ssl enabled Apache, you
should add I<EAPI> to the list of preprocessor definitions under
"Project -> Settings -> [C/C++]"
=item Testing
Once mod_perl.so is built you may test mod_perl with:
C:\modperl_src> nmake test
after which, assuming the tests are OK,
C:\modperl_src> nmake install
will install the Perl side of mod_perl. The mod_perl.so file
built under F<mod_perl-1.xx/src/modules/win32/Release> should
be copied to your Apache modules directory (eg, F<C:\Apache\modules>).
=back
=head2 Building with Makefile.PL arguments
Generating the Makefile as, for example,
C:\modperl_src> perl Makefile.PL APACHE_SRC=\Apache
will build mod_perl (including mod_perl.so) entirely from
the command line. The arguments accepted include
=over 3
=item APACHE_SRC
This can be one of two values: either the path to the Apache build
directory (eg, F<..\apache_1.3.xx>), or to the installed Apache location
(eg, F<\Apache>). This is used to set the locations of ApacheCore.lib
and the Apache header files.
=item INSTALL_DLL
This gives the location of where to install mod_perl.so
(eg, F<\Apache\modules>). No default is assumed - if this argument
is not given, mod_perl.so must be copied manually (in
mod_perl-1.29 or later, INSTALL_DLL, if not supplied,
will assume a default of F<APACHE_SRC/modules>, if this directory
exists).
=item INSTALL_LIB
This option, which is available only in mod_perl-1.29
or later, gives the location of where to install
mod_perl.lib (eg, F<\Apache\libexec>). This library is needed for
building certain 3rd party Apache modules. If this is not supplied,
a default of F<APACHE_SRC/libexec> will be assumed, if this directory
exists.
=item DEBUG
If true (DEBUG=1), a Debug version will be built (this assumes
that a Debug Apache has been built). If false, or not given,
a Release version will be built.
=item EAPI
If true (EAPI=1), EAPI (Extended API) will be defined when
compiling. This is useful when building mod_perl against mod_ssl
patched Apache sources. If false, or not given, EAPI will
not be defined.
=back
After this, running
C:\modperl_src> nmake
C:\modperl_src> nmake test
C:\modperl_src> nmake install
will complete the installation.
This latter method of building mod_perl will also install the
Apache and mod_perl header files, which can then be accessed
through the Apache::src module.
If this build fails, you may want to try the sources obtained
from svn - see the discussion on the
L<Development Source Distribution|download::source/Development_Source_Distribution>
for details. Be aware, though,
that as well as providing bug fixes, there may be new features being
added and tested in the svn versions, so at any given time there are
no guarantees that these packages will build and test successfully.
=head1 Binaries
There are two major types of binary packages
available for Win32 mod_perl - all-in-one Perl/Apache/mod_perl
binaries, and mod_perl ppm (Perl Package Manager) packages.
=head2 All-in-one packages
There are a number of binary packages for Win32 that contain the
necessary Perl and Apache binaries:
=over
=item *
IndigoPerl from
L<http://www.indigostar.com/>,
=item *
XAMPP for Windows from
L<http://www.apachefriends.org/en/xampp-windows.html>
=item *
DeveloperSide.NET for Windows at
L<http://www.devside.net/web/server/free/software>
=item *
zangweb from
L<http://www.arbingersys.com/hostsites/zangweb/>
=back
As well, there is a package
F<perl-win32-bin.exe> from
L<http://www.apache.org/dyn/closer.cgi/perl/win32-bin/> - see the file
F<perl-win32-bin.readme> for a description. If you have trouble
fetching the whole file at once, the directory
L<http://www.apache.org/dyn/closer.cgi/perl/win32-bin/perl-win32-bin/>
contains this distribution split across multiple files -
see F<README.join> for instructions on how to join them.
Alternatively, if you have Perl already, you can get the script
F<distinstall> which, when invoked as
C<perl distinstall>, will fetch and join
the files for you.
As well as including a number of non-core modules, both of these
packages contain mod_perl. See the documentation on the web sites and
that included with the packages for installation instructions. Both of
these also include an ActiveState-compatible C<ppm> (Perl Package
Manager) utility for adding and upgrading modules.
For the adventuresome who want a taste of things to come,
or for those who want to avoid the
L<multithreading limitations of mod_perl
1.0|docs::1.0::os::win32::multithread>, a
mod_perl-2.0/Apache-2.0 binary distribution
is available - see the discussion of
L<modperl-2 on Win32|docs::2.0::os::win32::install>
for details. Be aware though that mod_perl 2.0 is
still in a development phase, and that a minimum Perl version
of 5.8 (ActivePerl 8xx) is required.
=head2 PPM Packages
For ActivePerl users (or compatible), there are also C<PPM>
mod_perl packages available. For this, if you
don't already have it, get and install the latest Win32 Apache binary
from L<http://httpd.apache.org/>.
Both ActivePerl and Apache binaries are available as C<MSI> files for
use by the Microsoft Installer - as discussed on the ActiveState site,
users of Windows 95 and 98 may need to obtain this. In installing
these packages, you may find it convenient when transcribing any
Unix-oriented documentation to choose installation directories that do
not have spaces in their names (eg, F<C:\Perl> and F<C:\Apache>).
After installing Perl and Apache, you can then install mod_perl via
the PPM utility. ActiveState does not maintain mod_perl in their ppm
repository, so you must get it from a different location other than
ActiveState's site. A quick way to do this is to download
the script F<mpinstall> and save it as, for example, I<mpinstall>.
Invoking this as C<perl mpinstall> on a command line
will take you through a dialogue, based on your configuration,
which will determine and install, via C<ppm>, the desired
mod_perl ppm package.
The direct way to install mod_perl via ppm is simply as (broken over
two lines for readability)
C:\> ppm install
http://theoryx5.uwinnipeg.ca/ppmpackages/mod_perl.ppd
for Activeperl 6xx builds, and as
C:\> ppm install
http://theoryx5.uwinnipeg.ca/ppms/mod_perl-1.ppd
for 8xx builds. Another way, which will be useful if you plan
on installing additional Apache modules, is to add the repository
where the mod_perl package is kept to the C<ppm> shell utility.
For C<ppm2> this may be done with the C<set repository alias location>
command, while for C<ppm3> (the default with ActivePerl 8xx) the
appropriate command is C<repository add alias location>; see the
help utility within the C<ppm> shell for details. For 6xx builds,
the appropriate location is
http://theoryx5.uwinnipeg.ca/cgi-bin/ppmserver?urn:/PPMServer
while for for 8xx builds it is
http://theoryx5.uwinnipeg.ca/cgi-bin/ppmserver?urn:/PPMServer58
After this, you can, within the ppm shell, use the C<install>
command to either install C<mod_perl>, for 6xx, or
C<mod_perl-1>, for 8xx. For C<ppm2>, use the C<set save> command to save
the C<theoryx5> repository to your PPM configuration file,
so that future PPM sessions will search this
repository, as well as ActiveState's, for requested packages.
If you are running mod_ssl under Apache, then you should
obtain the C<mod_perl-eapi> package for 6xx or the
C<mod_perl-eapi-1> package for 8xx instead.
Note that, because of binary incompatibilities, one should
I<not> install packages for ActivePerl 8xx from a repository
containing packages for ActivePerl 6xx, and vice-versa, particularly
if these packages contain XS-based modules.
The mod_perl PPM package also includes the necessary Apache DLL
C<mod_perl.so>; a post-installation script should be run which will
offer to copy this file to your Apache modules directory (eg,
I<C:\Apache\modules>). If this fails, you can grab
the appropriate dll and install it manually. For 6xx builds,
this is at L<http://theoryx5.uwinnipeg.ca/ppmpackages/x86/>,
for which the relevant file is either C<mod_perl.so> or,
for EAPI support, C<mod_perl-eapi.so>. For 8xx builds,
the location is L<http://theoryx5.uwinnipeg.ca/ppms/x86/>,
for which the relevant file is either C<mod_perl-1.so> or,
for EAPI support, C<mod_perl-eapi-1.so>. You should then
copy this file to your Apache modules directory and rename
it as C<mod_perl.so>, if necessary.
The mod_perl package available from this site will always
use the latest mod_perl sources compiled against the latest official
Apache release; depending on changes made in Apache, you may or may
not be able to use an earlier Apache binary. However, in the Apache
Win32 world it is particularly a good idea to use the latest version,
for bug and security fixes. If you encounter problems in loading
F<mod_perl.so>, ensure that the mod_perl version you are using matches
that of Apache, make certain C<Perl> is in your C<PATH> environment
variable, or try adding the Apache directive
LoadFile "C:/Path/to/your/Perl/bin/perlxx.dll"
before loading F<mod_perl.so>. If all else fails, a reboot may help.
If the I<theoryx5.uwinnipeg.ca> repository is down, you can
access these packages at
L<http://www.apache.org/dyn/closer.cgi/perl/win32-bin/ppms/>,
for builds 8xx, and
L<http://www.apache.org/dyn/closer.cgi/perl/win32-bin/ppmpackages/>,
for builds 6xx.
=head1 See Also
The directions for L<configuring mod_perl 1.0 on
Win32|docs::1.0::os::win32::config>, the L<mod_perl
documentation|docs::index>, and the
L<FAQs for mod_perl on Win32|docs::general::os::win32::faq>.
Help is also available through the archives of and subscribing to
the L<mod_perl mailing list|maillist::modperl>.
=head1 Maintainers
Maintainer is the person(s) you should contact with updates,
corrections and patches.
=over
=item *
Randy Kobes E<lt>randy@theoryx5.uwinnipeg.caE<gt>
=back
=head1 Authors
=over
=item *
Randy Kobes E<lt>randy@theoryx5.uwinnipeg.caE<gt>
=back
Only the major authors are listed above. For contributors see the
Changes file.
=cut
| Distrotech/mod_perl | docs/src/docs/1.0/os/win32/install.pod | Perl | apache-2.0 | 14,677 |
package WebHooker;
use Mojo::Base 'Mojolicious';
use Mojo::JSON qw(decode_json);
use Mojo::URL;
use Config::GitLike;
use Git::Sub qw(push);
# This method will run once at server start
sub startup {
my $self = shift;
$self->plugin('Config', default => {
repo_dir => '/home/git/repositories',
}
);
# Router
my $r = $self->routes;
# Normal route to controller
$r->post('/:user/*repo' => sub {
my $c = shift;
my $msg = decode_json($c->req->content->asset->slurp);
my $repository = $msg->{repository}->{name};
if (defined($c->config->{authorized}) && $c->config->{authorized}->{$msg->{project_id}} ne $c->param('user').'/'.$c->param('repo')) {
$c->app->log->info('Not authorized');
return $c->render(
text => $repository.' not authorized to mirror to github/'.$c->param('user').'/'.$c->param('repo'),
status => 200
);
}
# Go to the right directory
my $sub_dir = Mojo::URL->new($msg->{repository}->{url})->path;
my $repo_dir = $c->config->{repo_dir};
$repo_dir =~ s#/$##;
my $dir = $repo_dir.'/'.$sub_dir.'/';
return $c->app->log->info($dir.' does not exists or is not a directory. Mirroring for '.$repository.' aborted.') unless (-d $dir);
chdir $dir;
# Check configuration
my $data = Config::GitLike->load_file('config');
my $writer = Config::GitLike->new(confname => 'config');
my $github_url = 'https://'.$c->config->{github}->{user}.':'.$c->config->{github}->{passwd}.'@github.com/';
my $old_config = (defined($data->{'remote.github.url'}) && $data->{'remote.github.url'} !~ m/https/);
unless (defined($data->{'remote.github.url'}) && defined($data->{'remote.github.mirror'}) && !($old_config)) {
if (defined($c->param('user')) && defined($c->param('repo'))) {
$c->app->log->info('git config does not contain github informations or need updating, doing configuration');
$writer->set(
key => 'remote.github.url',
value => $github_url.$c->param('user').'/'.$c->param('repo'),
filename => 'config'
) unless (defined($data->{'remote.github.url'}) && !($old_config));
$writer->set(
key => 'remote.github.mirror',
value => 'true',
filename => 'config'
) unless (defined($data->{'remote.github.mirror'}));
} else {
$c->app->log->info('Error in repository '.$sub_dir.': git config does not contain (enough?) github informations and neither does WebHooker');
return $c->app->log->info('Aborting push for repository '.$sub_dir);
}
}
$c->app->log->info(git::push qw(--quiet github));
$c->app->log->info($repository.' mirrored to github because of '.$msg->{user_name}.' (or at least tryed to be mirrored)');
$c->render(
text => $repository.' mirrored to github (or at least tryed to be mirrored)',
status => 200
);
});
}
1;
| ldidry/webhooker | lib/WebHooker.pm | Perl | apache-2.0 | 3,250 |
#!/usr/bin/env perl
#Author Boris Sadkhin
#Date Created Oct 31, 2014
#Summary : Wrapper for Dan's script for blast reduce
use strict;
use File::Basename;
use Time::HiRes qw( time );
my $start = time();
my $dir = shift;
if(!$dir || not -s $dir){
die "$dir does not exist";
}
my $unit = basename($dir);
my $fasta = "$dir/fasta/$unit.cdhit.fa";#You can use the cdhit fasta, cuz the db you are blasting against is cdhit reduced
my $blast = "$dir/1out/$unit.blastfinal.tab";
my $alphabetize = "$dir/1out/$unit.alphabetized";
if(! -s $blast){
die "$blast does not exist!";
}
if( !-s $fasta){
die "$fasta does not exist!"
}
if(-s $alphabetize){
die "$alphabetize already exists!";
}
#Get Lengths
open F, $fasta or die "Cannot open fasta file $fasta\n";
my %seqlengths;
my $header;
while(my $line =<F>){
chomp $line;
if(substr($line,0,1) eq ">"){
$header = substr($line,1);
}
else{
$seqlengths{$header} += length($line);
}
}
close F;
#Alphabetize the blast, and create a new 1out
open F, $blast or die $! . " Cannot open blast[$blast] file \n";
open O, ">$alphabetize.tmp" or die $! . "Cannot print to $alphabetize \n";
while(my $line = <F>){
chomp $line;
$line=~/^(\w+)\t(\w+)\t(.*)$/;
my $mult=$seqlengths{$1}*$seqlengths{$2};
if($1 lt $2){
print O "$line\t$seqlengths{$1}\t$seqlengths{$2}\n"; #Forward
}else{
print O "$2\t$1\t$3\t$seqlengths{$2}\t$seqlengths{$1}\n"; #Reverse
}
}
close F;
print "Moving $alphabetize.tmp to $alphabetize\n";
system("mv $alphabetize.tmp $alphabetize");
#Print time
my $end = time();
open O, ">$dir/1out/alphabetize.time" or die $!;
printf O ("%.3f\n", $end - $start);
close O;
| EnzymeFunctionInitiative/est-precompute-bw | 5-alphabetize.pl | Perl | apache-2.0 | 1,635 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2021] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=pod
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Production::Pipeline::FASTA::BlastIndexer
=head1 DESCRIPTION
A stub blast indexer of the given GZipped file. The resulting index
is created under the parameter location I<base_path> in I<blast_dir> and then in a
directory defined by the type of dump. The type of dump also changes the file
name generated. Genomic dumps have their release number replaced with the
last repeat masked date.
Allowed parameters are:
=over 8
=item file - The file to index
=item program - The location of the xdformat program
=item molecule - The type of molecule to index. I<dna> and I<pep> are allowed
=item type - Type of index we are creating. I<genomic> and I<genes> are allowed
=item base_path - The base of the dumps
=item release - Required for correct DB naming
=item skip - Skip this iteration of the pipeline
=back
=cut
package Bio::EnsEMBL::Production::Pipeline::FASTA::BlastIndexer;
use strict;
use warnings;
use base qw/Bio::EnsEMBL::Production::Pipeline::FASTA::Indexer/;
use Bio::EnsEMBL::Utils::Exception qw/throw/;
use File::Copy qw/copy/;
use File::Spec;
use POSIX qw/strftime/;
sub param_defaults {
my ($self) = @_;
return {
%{$self->SUPER::param_defaults()},
# program => 'xdformat', #program to use for indexing
# molecule => 'pep', #pep or dna
# type => 'genes', #genes or genomic
# blast_dir => 'blast_type_dir', # sets the type of directory used for
};
}
sub fetch_input {
my ($self) = @_;
return if ! $self->ok_to_index_file();
my $mol = $self->param('molecule');
throw "No molecule param given" unless defined $mol;
if($mol ne 'dna' && $mol ne 'pep') {
throw "param 'molecule' must be set to 'dna' or 'pep'. Value given was '${$mol}'";
}
my $type = $self->param('type');
throw "No type param given" unless defined $type;
if($type ne 'genomic' && $type ne 'genes') {
throw "param 'type' must be set to 'genomic' or 'genes'. Value given was '${$type}'";
}
$self->assert_executable($self->param('program'));
$self->assert_executable('gunzip');
}
sub write_output {
my ($self) = @_;
return if $self->param('skip');
$self->dataflow_output_id({
species => $self->param('species'),
type => $self->param('type'),
molecule => $self->param('molecule'),
index_base => $self->param('index_base')
}, 1);
return;
}
sub target_file {
my ($self, $file) = @_;
my $target_dir = $self->target_dir();
my $target_filename = $self->target_filename($file);
return File::Spec->catfile($target_dir, $target_filename);
}
# Produce a dir like /nfs/path/to/<blast_dir>/genes/XXX && /nfs/path/to/<blast_dir>/dna/XXX
sub target_dir {
my ($self) = @_;
return $self->index_path($self->param('blast_dir'), $self->param('type'));
}
sub db_title {
my ($self, $source_file) = @_;
my ($vol, $dir, $file) = File::Spec->splitpath($source_file);
my $release = $self->param('release');
my $title = $file;
$title =~ s/$release\.//;
return $title;
}
sub db_date {
my ($self) = @_;
return strftime('%d-%m-%Y', gmtime());
}
#Source like Homo_sapiens.GRCh37.68.dna.toplevel.fa
#Filename like Homo_sapiens.GRCh37.20090401.dna.toplevel.fa
sub target_filename {
my ($self, $source_file) = @_;
my ($vol, $dir, $file) = File::Spec->splitpath($source_file);
if($self->param('type') eq 'genomic') {
my @split = split(/\./, $file);
my $rm_date = $self->repeat_mask_date();
splice @split, -3, 0, $rm_date;
return join(q{.}, @split);
}
return $file;
}
1;
| Ensembl/ensembl-production | modules/Bio/EnsEMBL/Production/Pipeline/FASTA/BlastIndexer.pm | Perl | apache-2.0 | 4,474 |
#!/usr/bin/perl
package Alignment::Shrimp2;
use strict;
use warnings;
use File::Basename;
use CQS::PBS;
use CQS::ConfigUtils;
use CQS::SystemUtils;
use CQS::FileUtils;
use CQS::Task;
use CQS::NGSCommon;
use CQS::StringUtils;
our @ISA = qw(CQS::Task);
sub new {
my ($class) = @_;
my $self = $class->SUPER::new();
$self->{_name} = __PACKAGE__;
$self->{_suffix} = "_srp2";
bless $self, $class;
return $self;
}
sub perform {
my ( $self, $config, $section ) = @_;
my ( $task_name, $path_file, $pbs_desc, $target_dir, $log_dir, $pbs_dir, $result_dir, $option, $sh_direct, $cluster ) = $self->init_parameter( $config, $section );
my $shrimp2_index = $config->{$section}{shrimp2_index} or die "define ${section}::shrimp2_index first";
die "shrimp2_index ${shrimp2_index}.genome not exist" if ( !-e "${shrimp2_index}.genome" );
my $is_mirna = $config->{$section}{is_mirna} or die "define ${section}::is_mirna first";
my $mirna = "-M mirna" if $is_mirna or "";
my $output_bam = $config->{$section}{output_bam} or die "define ${section}::output_bam first";
my %raw_files = %{ get_raw_files( $config, $section, "source", ".fastq\$" ) };
my $shfile = $self->get_task_filename( $pbs_dir, $task_name );
open( my $sh, ">$shfile" ) or die "Cannot create $shfile";
print $sh get_run_command($sh_direct);
for my $sample_name ( sort keys %raw_files ) {
my @sample_files = @{ $raw_files{$sample_name} };
my $sampleFile = $sample_files[0];
my $shrimpFile = $sample_name . ".shrimp";
my $sam_file = $sample_name . ".sam";
my $bam_file = $sample_name . ".bam";
my $pbs_file = $self->get_pbs_filename( $pbs_dir, $sample_name );
my $pbs_name = basename($pbs_file);
my $log = $self->get_log_filename( $log_dir, $sample_name );
print $sh "\$MYCMD ./$pbs_name \n";
my $cur_dir = create_directory_or_die( $result_dir . "/$sample_name" );
my $log_desc = $cluster->get_log_description($log);
my $final_file = $output_bam ? $bam_file : $shrimpFile;
my $pbs = $self->open_pbs( $pbs_file, $pbs_desc, $log_desc, $path_file, $cur_dir, $final_file );
if ($output_bam) {
print $pbs "gmapper -L $shrimp2_index $sampleFile $mirna $option --extra-sam-fields > $sam_file
if [ -s $sam_file ]; then
samtools view -S -b $sam_file | samtools sort - $sample_name
samtools index $bam_file
samtools flagstat $bam_file > ${bam_file}.stat
fi
echo finished=`date`
";
}
else {
print $pbs "gmapper -L $shrimp2_index $sampleFile $mirna $option --pretty >$shrimpFile
echo finished=`date`
";
}
$self->close_pbs( $pbs, $pbs_file );
}
close $sh;
if ( is_linux() ) {
chmod 0755, $shfile;
}
print "!!!shell file $shfile created, you can run this shell file to submit all tasks.\n";
}
sub result {
my ( $self, $config, $section, $pattern ) = @_;
my ( $task_name, $path_file, $pbs_desc, $target_dir, $log_dir, $pbs_dir, $result_dir, $option, $sh_direct ) = $self->init_parameter( $config, $section, 0 );
my $output_bam = $config->{$section}{output_bam} or die "define ${section}::output_bam first";
my %raw_files = %{ get_raw_files( $config, $section, "source", ".fastq\$" ) };
my $result = {};
for my $sample_name ( sort keys %raw_files ) {
my $cur_dir = $result_dir . "/$sample_name/";
my @result_files = ();
if ($output_bam) {
push( @result_files, $cur_dir . $sample_name . ".bam" );
}
else {
push( @result_files, $cur_dir . $sample_name . ".shrimp" );
}
$result->{$sample_name} = filter_array( \@result_files, $pattern );
}
return $result;
}
1;
| shengqh/ngsperl | lib/Alignment/Shrimp2.pm | Perl | apache-2.0 | 3,760 |
#!/usr/bin/perl
use warnings;
use strict;
use Carp;
use Core::Upc;
use Core::ProductInfo;
use Spreadsheet::XLSX;
package Parser::OrderParser;
# OrderParser
sub new {
my $class = shift;
my $self = { };
$self->{excel} = undef;
$self->{sheet} = undef;
$self->{row} = undef;
$self->{header} = undef;
$self->{at_eof} = undef;
bless $self, $class;
}
# OrderParser
#
#
sub next {
my $self = shift;
$self->{row}++;
}
# OrderParser
#
#
sub reset {
my $self = shift;
my $sheet = $self->{sheet};
$self->{at_eof} = 0;
$self->{row} = $sheet->{MinRow} + 1;
$self->read_header();
}
# OrderParser
#
#
sub get_order_info {
my $self = shift;
my $excel = $self->{excel};
my @sheets = @{$excel->{Worksheet}};
my $sheet = $sheets[0];
my $row = $self->{row};
my $col;
my @line;
foreach $col ($sheet->{MinCol} .. $sheet->{MaxCol}) {
my $cell = $sheet->{Cells}[$row][$col];
push @line, $cell->{Val};
}
my @header = @{$self->{header}};
my $i = 0;
my $pi = Core::ProductInfo->new();
my $qty = undef;
foreach my $field(@header) {
my $val = $line[$i];
if ($field eq "Item #") {
$pi->set_item_no($val);
} elsif ($field eq "Quantity") {
$qty = $val;
} elsif ($field eq "Description") {
$_ = $val;
s/'/'/g;
s/"/"/g;
s/&/&/g;
$pi->set_description($_);
} elsif ($field eq "Unit") {
if (!defined($val)) {
warn $pi->get_description() . " is supposed to have unit";
} else {
$pi->set_unit($val);
}
} elsif ($field eq "Case Pack") {
if (defined($val)) {
$pi->set_case_pack($val);
}
} elsif ($field eq "Cost") {
if (defined($val)) {
$pi->set_cost($val);
}
} elsif ($field eq "Unit Cost") {
if (defined($val)) {
$pi->set_unit_cost($val);
}
} elsif ($field eq "Retail") {
if (defined($val)) {
$pi->set_retail($val);
}
} elsif ($field eq "Unit UPC") {
if (defined($val) && ($val =~ /(\d)+/)) {
$pi->set_upc(Core::Upc->new($val));
}
} elsif ($field eq "EOQ") {
if (defined($val)) {
$pi->set_eoq($val);
}
} elsif ($field eq "Dep#") {
if (defined($val)) {
$pi->set_department_no($val);
}
} elsif ($field eq "Category") {
} elsif ($field eq "Par") {
if (defined($val)) {
$pi->set_par($val);
}
} elsif ($field eq "Loc") {
if (defined($val)) {
$pi->set_location($val);
}
}
$i++;
}
my %order;
$order{pi} = $pi;
$order{qty} = $qty;
return \%order;
}
# OrderParser
#
sub open {
my $self = shift;
my $fname = shift;
eval {
my $excel = Spreadsheet::XLSX->new($fname);
$self->{excel} = $excel;
my @sheets = @{$excel->{Worksheet}};
my $sheet = $sheets[0];
$self->{sheet} = $sheet;
$self->{row} = $sheet->{MinRow} + 1;
$self->read_header();
}; die $@ if $@;
}
# OrderParser
#
#
sub at_eof {
my $self = shift;
my $sheet = $self->{sheet};
if (!defined($self->{row})) {
print "row undefined!\n";
}
if (!defined($sheet->{MaxRow})) {
print "undefined MaxRow!\n";
}
return $self->{row} > $sheet->{MaxRow};
}
# OrderParser
#
#
#
sub read_header {
my $self = shift;
my $excel = $self->{excel};
my @sheets = @{$excel->{Worksheet}};
my $sheet = $sheets[0];
my $header_row = $sheet->{MinRow};
my @header;
my $cell = $sheet->{Cells}[$header_row][0];
for my $col ($sheet->{MinCol} .. $sheet->{MaxCol}) {
$cell = $sheet->{Cells}[$header_row][$col];
push @header, $cell->{Val};
}
$self->{row} = $header_row + 1;
$self->{col} = $sheet->{MinCol};
$self->{header} = \@header;
# foreach my $c (@header) {
# print $c . "\n";
# }
}
# OrderParser
sub read_next_row {
}
1
| nrusin/inventory | Source/Parser/OrderParser.pm | Perl | apache-2.0 | 3,920 |
#
# Copyright 2015 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::fortinet::fortigate::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
# $options->{options} = options object
$self->{version} = '1.0';
%{$self->{modes}} = (
'cluster-status' => 'centreon::common::fortinet::fortigate::mode::clusterstatus',
'cpu' => 'centreon::common::fortinet::fortigate::mode::cpu',
'disk' => 'centreon::common::fortinet::fortigate::mode::disk',
'hardware' => 'centreon::common::fortinet::fortigate::mode::hardware',
'interfaces' => 'snmp_standard::mode::interfaces',
'ips-stats' => 'centreon::common::fortinet::fortigate::mode::ipsstats',
'list-virtualdomains' => 'centreon::common::fortinet::fortigate::mode::listvirtualdomains',
'memory' => 'centreon::common::fortinet::fortigate::mode::memory',
'sessions' => 'centreon::common::fortinet::fortigate::mode::sessions',
'virus' => 'centreon::common::fortinet::fortigate::mode::virus',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Fortinet Fortigate in SNMP.
=cut
| s-duret/centreon-plugins | network/fortinet/fortigate/plugin.pm | Perl | apache-2.0 | 2,305 |
package Sisimai::Lhost::IMailServer;
use parent 'Sisimai::Lhost';
use feature ':5.10';
use strict;
use warnings;
sub description { 'IPSWITCH IMail Server' }
sub make {
# Detect an error from IMailServer
# @param [Hash] mhead Message headers of a bounce email
# @param [String] mbody Message body of a bounce email
# @return [Hash] Bounce data list and message/rfc822 part
# @return [Undef] failed to parse or the arguments are missing
# @since v4.1.1
my $class = shift;
my $mhead = shift // return undef;
my $mbody = shift // return undef;
my $match = 0;
# X-Mailer: <SMTP32 v8.22>
$match ||= 1 if $mhead->{'subject'} =~ /\AUndeliverable Mail[ ]*\z/;
$match ||= 1 if defined $mhead->{'x-mailer'} && index($mhead->{'x-mailer'}, '<SMTP32 v') == 0;
return undef unless $match;
state $rebackbone = qr|^Original[ ]message[ ]follows[.]|m;
state $startingof = { 'error' => ['Body of message generated response:'] };
state $recommands = {
'conn' => qr/(?:SMTP connection failed,|Unexpected connection response from server:)/,
'ehlo' => qr|Unexpected response to EHLO/HELO:|,
'mail' => qr|Server response to MAIL FROM:|,
'rcpt' => qr|Additional RCPT TO generated following response:|,
'data' => qr|DATA command generated response:|,
};
state $refailures = {
'hostunknown' => qr/Unknown host/,
'userunknown' => qr/\A(?:Unknown user|Invalid final delivery userid)/,
'mailboxfull' => qr/\AUser mailbox exceeds allowed size/,
'securityerror' => qr/\ARequested action not taken: virus detected/,
'undefined' => qr/\Aundeliverable to/,
'expired' => qr/\ADelivery failed \d+ attempts/,
};
my $dscontents = [__PACKAGE__->DELIVERYSTATUS];
my $emailsteak = Sisimai::RFC5322->fillet($mbody, $rebackbone);
my $recipients = 0; # (Integer) The number of 'Final-Recipient' header
my $v = undef;
for my $e ( split("\n", $emailsteak->[0]) ) {
# Read error messages and delivery status lines from the head of the email
# to the previous line of the beginning of the original message.
# Unknown user: kijitora@example.com
#
# Original message follows.
$v = $dscontents->[-1];
if( $e =~ /\A([^ ]+)[ ](.+)[:][ \t]*([^ ]+[@][^ ]+)/ ) {
# Unknown user: kijitora@example.com
if( $v->{'recipient'} ) {
# There are multiple recipient addresses in the message body.
push @$dscontents, __PACKAGE__->DELIVERYSTATUS;
$v = $dscontents->[-1];
}
$v->{'diagnosis'} = $1.' '.$2;
$v->{'recipient'} = $3;
$recipients++;
} elsif( $e =~ /\Aundeliverable[ ]+to[ ]+(.+)\z/ ) {
# undeliverable to kijitora@example.com
if( $v->{'recipient'} ) {
# There are multiple recipient addresses in the message body.
push @$dscontents, __PACKAGE__->DELIVERYSTATUS;
$v = $dscontents->[-1];
}
$v->{'recipient'} = Sisimai::Address->s3s4($1);
$recipients++;
} else {
# Other error message text
$v->{'alterrors'} //= '';
$v->{'alterrors'} .= ' '.$e if $v->{'alterrors'};
$v->{'alterrors'} = $e if index($e, $startingof->{'error'}->[0]) > -1;
}
}
return undef unless $recipients;
for my $e ( @$dscontents ) {
if( exists $e->{'alterrors'} && $e->{'alterrors'} ) {
# Copy alternative error message
$e->{'diagnosis'} = $e->{'alterrors'}.' '.$e->{'diagnosis'};
$e->{'diagnosis'} = Sisimai::String->sweep($e->{'diagnosis'});
delete $e->{'alterrors'};
}
$e->{'diagnosis'} = Sisimai::String->sweep($e->{'diagnosis'});
COMMAND: for my $r ( keys %$recommands ) {
# Detect SMTP command from the message
next unless $e->{'diagnosis'} =~ $recommands->{ $r };
$e->{'command'} = uc $r;
last;
}
SESSION: for my $r ( keys %$refailures ) {
# Verify each regular expression of session errors
next unless $e->{'diagnosis'} =~ $refailures->{ $r };
$e->{'reason'} = $r;
last;
}
}
return { 'ds' => $dscontents, 'rfc822' => $emailsteak->[1] };
}
1;
__END__
=encoding utf-8
=head1 NAME
Sisimai::Lhost::IMailServer - bounce mail parser class for C<IMail Server>.
=head1 SYNOPSIS
use Sisimai::Lhost::IMailServer;
=head1 DESCRIPTION
Sisimai::Lhost::IMailServer parses a bounce email which created by
C<Ipswitch IMail Server>. Methods in the module are called from only
Sisimai::Message.
=head1 CLASS METHODS
=head2 C<B<description()>>
C<description()> returns description string of this module.
print Sisimai::Lhost::IMailServer->description;
=head2 C<B<make(I<header data>, I<reference to body string>)>>
C<make()> method parses a bounced email and return results as a array reference.
See Sisimai::Message for more details.
=head1 AUTHOR
azumakuniyuki
=head1 COPYRIGHT
Copyright (C) 2014-2021 azumakuniyuki, All rights reserved.
=head1 LICENSE
This software is distributed under The BSD 2-Clause License.
=cut
| sisimai/p5-Sisimai | lib/Sisimai/Lhost/IMailServer.pm | Perl | bsd-2-clause | 5,377 |
#
# $Id: Screenshot.pm,v eff9afda3723 2015/01/04 12:34:23 gomor $
#
# xorg::screenshot Brik
#
package Metabrik::Xorg::Screenshot;
use strict;
use warnings;
use base qw(Metabrik::Shell::Command);
sub brik_properties {
return {
revision => '$Revision: eff9afda3723 $',
tags => [ qw(unstable screenshot) ],
attributes => {
output => [ qw(file) ],
},
attributes_default => {
output => 'screenshot.png',
},
commands => {
active_window => [ qw(output_file|OPTIONAL) ],
full_screen => [ qw(output_file|OPTIONAL) ],
},
require_binaries => {
'scrot' => [ ],
},
};
}
sub active_window {
my $self = shift;
my ($output) = @_;
$output ||= $self->output;
$self->log->verbose("active_window: saving to file [$output]");
my $cmd = "scrot --focused --border $output";
$self->system($cmd);
return $output;
}
sub full_screen {
my $self = shift;
my ($output) = @_;
$output ||= $self->output;
$self->log->verbose("full_screen: saving to file [$output]");
my $cmd = "scrot $output";
$self->system($cmd);
return $output;
}
1;
__END__
=head1 NAME
Metabrik::Xorg::Screenshot - xorg::screenshot Brik
=head1 COPYRIGHT AND LICENSE
Copyright (c) 2014-2015, Patrice E<lt>GomoRE<gt> Auffret
You may distribute this module under the terms of The BSD 3-Clause License.
See LICENSE file in the source distribution archive.
=head1 AUTHOR
Patrice E<lt>GomoRE<gt> Auffret
=cut
| gitpan/Metabrik-Repository | lib/Metabrik/Xorg/Screenshot.pm | Perl | bsd-3-clause | 1,515 |
# SNMP::Info::SONMP
#
# Copyright (c) 2012 Eric Miller
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the University of California, Santa Cruz nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
package SNMP::Info::SONMP;
use warnings;
use strict;
use Exporter;
use SNMP::Info;
@SNMP::Info::SONMP::ISA = qw/SNMP::Info Exporter/;
@SNMP::Info::SONMP::EXPORT_OK = qw//;
use vars qw/$VERSION %FUNCS %GLOBALS %MIBS %MUNGE/;
$VERSION = '3.29';
%MIBS = (
'SYNOPTICS-ROOT-MIB' => 'synoptics',
'S5-ETH-MULTISEG-TOPOLOGY-MIB' => 's5EnMsTop',
);
%GLOBALS = (
'sonmp_gid' => 's5EnMsTopIpAddr',
'sonmp_run' => 's5EnMsTopStatus',
);
%FUNCS = (
# From S5-ETH-MULTISEG-TOPOLOGY-MIB::TopNmmTable
'sonmp_topo_slot' => 's5EnMsTopNmmSlot',
'sonmp_topo_port' => 's5EnMsTopNmmPort',
'sonmp_topo_ip' => 's5EnMsTopNmmIpAddr',
'sonmp_topo_seg' => 's5EnMsTopNmmSegId',
'sonmp_topo_mac' => 's5EnMsTopNmmMacAddr',
'sonmp_topo_platform' => 's5EnMsTopNmmChassisType',
'sonmp_topo_localseg' => 's5EnMsTopNmmLocalSeg',
);
%MUNGE = ( 'sonmp_topo_mac' => \&SNMP::Info::munge_mac );
sub index_factor {
return 32;
}
sub slot_offset {
return 1;
}
sub port_offset {
return 0;
}
sub hasSONMP {
my $sonmp = shift;
return 1 if defined $sonmp->sonmp_run();
return;
}
sub sonmp_if {
my $sonmp = shift;
my $partial = shift;
my $sonmp_topo_port = $sonmp->sonmp_topo_port($partial) || {};
my $sonmp_topo_slot = $sonmp->sonmp_topo_slot($partial) || {};
my $index_factor = $sonmp->index_factor();
my $slot_offset = $sonmp->slot_offset();
my $port_offset = $sonmp->port_offset();
my $model = $sonmp->model();
my %sonmp_if;
foreach my $entry ( keys %$sonmp_topo_port ) {
my $port = $sonmp_topo_port->{$entry};
next unless defined $port;
next if $port == 0;
my $slot = $sonmp_topo_slot->{$entry} || 0;
if ( $model eq 'Baystack Hub' ) {
my $comidx = $slot;
if ( !( $comidx % 5 ) ) {
$slot = ( $slot / 5 );
}
elsif ( $comidx =~ /[16]$/ ) {
$slot = int( $slot / 5 );
$port = 25;
}
elsif ( $comidx =~ /[27]$/ ) {
$slot = int( $slot / 5 );
$port = 26;
}
}
my $index = ( ( $slot - $slot_offset ) * $index_factor )
+ ( $port - $port_offset );
$sonmp_if{$entry} = $index;
}
return \%sonmp_if;
}
sub sonmp_ip {
my $sonmp = shift;
my $partial = shift;
my $sonmp_topo_port = $sonmp->sonmp_topo_port($partial) || {};
my $sonmp_topo_ip = $sonmp->sonmp_topo_ip($partial) || {};
my %sonmp_ip;
foreach my $entry ( keys %$sonmp_topo_ip ) {
my $port = $sonmp_topo_port->{$entry};
next unless defined $port;
next if $port == 0;
my $ip = $sonmp_topo_ip->{$entry};
$sonmp_ip{$entry} = $ip;
}
return \%sonmp_ip;
}
sub sonmp_port {
my $sonmp = shift;
my $partial = shift;
my $sonmp_topo_port = $sonmp->sonmp_topo_port($partial) || {};
my $sonmp_topo_seg = $sonmp->sonmp_topo_seg($partial) || {};
my $sonmp_topo_platform = $sonmp->sonmp_topo_platform($partial) || {};
my %sonmp_port;
foreach my $entry ( keys %$sonmp_topo_seg ) {
my $port = $sonmp_topo_port->{$entry};
next unless defined $port;
next if $port == 0;
my $seg = $sonmp_topo_seg->{$entry};
my $platform = $sonmp_topo_platform->{$entry};
# AP-222x Series does not adhere to port numbering
if ( $platform =~ /AccessPoint/i ) {
$sonmp_port{$entry} = 'dp0';
}
# BayHubs send the lower three bytes of the MAC not the slot/port
elsif ( $seg > 4000 ) {
$sonmp_port{$entry} = 'unknown';
}
else {
# Segment id is (256 * remote slot_num) + (remote_port)
my $remote_port = $seg % 256;
my $remote_slot = int( $seg / 256 );
$sonmp_port{$entry} = "$remote_slot.$remote_port";
}
}
return \%sonmp_port;
}
sub sonmp_platform {
my $sonmp = shift;
my $partial = shift;
my $sonmp_topo_port = $sonmp->sonmp_topo_port($partial) || {};
my $sonmp_topo_platform = $sonmp->sonmp_topo_platform($partial) || {};
my %sonmp_platform;
foreach my $entry ( keys %$sonmp_topo_platform ) {
my $port = $sonmp_topo_port->{$entry};
next unless defined $port;
next if $port == 0;
my $platform = $sonmp_topo_platform->{$entry};
$sonmp_platform{$entry} = $platform;
}
return \%sonmp_platform;
}
sub mac {
my $sonmp = shift;
my $sonmp_topo_port = $sonmp->sonmp_topo_port();
my $sonmp_topo_mac = $sonmp->sonmp_topo_mac();
foreach my $entry ( keys %$sonmp_topo_port ) {
my $port = $sonmp_topo_port->{$entry};
next unless $port == 0;
my $mac = $sonmp_topo_mac->{$entry};
return $mac;
}
# Topology turned off, not supported.
return;
}
1;
__END__
=head1 NAME
SNMP::Info::SONMP - SNMP Interface to SynOptics Network Management Protocol
(SONMP)
=head1 AUTHOR
Eric Miller
=head1 SYNOPSIS
my $sonmp = new SNMP::Info (
AutoSpecify => 1,
Debug => 1,
DestHost => 'router',
Community => 'public',
Version => 2
);
my $class = $sonmp->class();
print " Using device sub class : $class\n";
$hassonmp = $sonmp->hasSONMP() ? 'yes' : 'no';
# Print out a map of device ports with CDP neighbors:
my $interfaces = $sonmp->interfaces();
my $sonmp_if = $sonmp->sonmp_if();
my $sonmp_ip = $sonmp->sonmp_ip();
my $sonmp_port = $sonmp->sonmp_port();
foreach my $sonmp_key (keys %$sonmp_ip){
my $iid = $sonmp_if->{$sonmp_key};
my $port = $interfaces->{$iid};
my $neighbor = $sonmp_ip->{$sonmp_key};
my $neighbor_port = $sonmp_port->{$sonmp_key};
print "Port : $port connected to $neighbor / $neighbor_port\n";
}
=head1 DESCRIPTION
SNMP::Info::SONMP is a subclass of SNMP::Info that provides an object oriented
interface to the SynOptics Network Management Protocol (SONMP) information
through SNMP.
SONMP is a Layer 2 protocol that supplies topology information of devices that
also speak SONMP, mostly switches and hubs. SONMP is implemented in
SynOptics, Bay, Nortel, and Avaya devices. SONMP has been rebranded by Bay
then Nortel and may be referred to by several different names, including Nortel
Discovery Protocol (NDP).
Create or use a device subclass that inherits this class. Do not use
directly.
Each device implements a subset of the global and cache entries.
Check the return value to see if that data is held by the device.
=head2 Inherited Classes
None.
=head2 Required MIBs
=over
=item F<SYNOPTICS-ROOT-MIB>
=item F<S5-ETH-MULTISEG-TOPOLOGY-MIB>
=back
=head1 GLOBAL METHODS
These are methods that return scalar values from SNMP
=over
=item $sonmp->index_factor()
Returns a number representing the number of ports reserved per slot or switch
within the device MIB. Defaults to 32.
=item $sonmp->slot_offset()
Returns the offset if slot numbering does not start at 0. Defaults to 1.
=item $sonmp->port_offset()
Returns the offset if port numbering does not start at 0. Defaults to 0.
=item $sonmp->hasSONMP()
Is SONMP is active in this device?
=item $sonmp->sonmp_gid()
Returns the IP that the device is sending out for its Nmm topology info.
(C<s5EnMsTopIpAddr>)
=item $sonmp->sonmp_run()
Returns true if SONMP is on for this device.
(C<s5EnMsTopStatus>)
=item $sonmp->mac()
Returns MAC of the advertised IP address of the device.
=back
=head1 TABLE METHODS
These are methods that return tables of information in the form of a reference
to a hash.
=head2 Layer2 Topology info (C<s5EnMsTopNmmTable>)
=over
=item $sonmp->sonmp_topo_slot()
Returns reference to hash. Key: Table entry, Value:slot number
(C<s5EnMsTopNmmSlot>)
=item $sonmp->sonmp_topo_port()
Returns reference to hash. Key: Table entry, Value:Port Number
(interface iid)
(C<s5EnMsTopNmmPort>)
=item $sonmp->sonmp_topo_ip()
Returns reference to hash. Key: Table entry, Value:Remote IP address of entry
(C<s5EnMsTopNmmIpAddr>)
=item $sonmp->sonmp_topo_seg()
Returns reference to hash. Key: Table entry, Value:Remote Segment ID
(C<s5EnMsTopNmmSegId>)
=item $sonmp->sonmp_topo_mac()
(C<s5EnMsTopNmmMacAddr>)
Returns reference to hash. Key: Table entry, Value:Remote MAC address
=item $sonmp->sonmp_topo_platform
Returns reference to hash. Key: Table entry, Value:Remote Device Type
(C<s5EnMsTopNmmChassisType>)
=item $sonmp->sonmp_topo_localseg
Returns reference to hash. Key: Table entry, Value: Boolean, if
bay_topo_seg() is local.
(C<s5EnMsTopNmmLocalSeg>)
=back
=head2 Common topology information
All entries with port=0 are local and ignored.
=over
=item $sonmp->sonmp_if()
Returns reference to hash. Key: IID, Value: Local port (interfaces)
=item $sonmp->sonmp_ip()
Returns reference to hash. Key: IID, Value: Remote IP address
If multiple entries exist with the same local port, sonmp_if(), with different
IPv4 addresses, sonmp_ip(), there is either a non SONMP device in between two or
more devices or multiple devices which are not directly connected.
Use the data from the Layer2 Topology Table below to dig deeper.
=item $sonmp->sonmp_port()
Returns reference to hash. Key: IID, Value: Remote port (interfaces)
=item $sonmp->sonmp_platform()
Returns reference to hash. Key: IID, Value: Remote device type
=back
=cut
| lucwillems/SNMP-INFO | Info/SONMP.pm | Perl | bsd-3-clause | 11,324 |
#!/usr/bin/perl
# pathfactor.pl
# allows you to replace paths specified as $BETAKITPATH$
# with the $TOP_DIR path
#
# CHANGELOG
#
# 04-JAN-2011 More configurability through
# command line arguments added.
# still, only to be called by
# TOP_DIR/Makefile
#
use strict;
#
# main( inputfile, path, leafdir, varname )
#
#
sub main
{
my($filename,$path,$leafdir,$varname)=@_;
local *DXINFILE;
if( $filename =~ /^$/ )
{
printf("usage: $ ./build/pathfactor.pl <input file> <current dir>\n");
printf("\t\texiting!\n");
exit(2);
}
if( $leafdir =~ /^$/ )
{
$leafdir = "user";
}
if( $varname =~ /^$/ )
{
$varname = "BETAKITPATH";
}
open(DXINFILE, "<" . $filename ) or die("trying to open '$filename' failed.");
# running from $TOPDIR/user
$path =~ s/\/$leafdir(.*)$//;
my $TOPDIR = $path;
printf("newpath = (%s)\n", $path );
while( <DXINFILE> )
{
my $txt_line = $_;
chomp( $txt_line );
$txt_line =~ s/\$$varname\$/$TOPDIR/;
printf("%s\n", $txt_line );
}
close(DXINFILE);
exit(0);
}
if( (@ARGV[0] =~ /^$/) || (@ARGV[1] =~ /^$/) )
{
printf("usage: $ ./build/pathfactor.pl <input file> <current dir> [<leaf_dir> <var_name>]\n");
printf("\t\texiting!\n");
exit(3);
}
main(@ARGV[0],@ARGV[1],@ARGV[2]);
| betasam/betakit | user/build/pathfactor.pl | Perl | bsd-3-clause | 1,327 |
#!/usr/bin/perl
use strict;
use warnings;
use Switch;
use Getopt::Long qw(:config no_ignore_case);
use Sort::Versions;
#constants
our $OUTPUT_DIR="./web-template";
our $SOURCE_DIR="./perun/perun-rpc/src/main/java/cz/metacentrum/perun/rpc/methods";
our @allVersions;
our $versionLimit = 20;
#INFO ABOUT STRUCTURE
# %MANAGERS
# |
# {manager name}
# |
# @METHODS
# |
# {0..x}
# |
# %METHOD
# | |
# {"name"}{javadocs}
# | |
# $methodName @javadocs
# |
# {0..x}
# |
# %javadoc
# | | | |
# {"text","params","throws","return","deprecated","exampleResponse","exampleParams"}
# | | | | | | |
# @text @params @throws $return $deprecated $exampleResponse @exampleParams
# | | |
# {0..x} {0..x} {0..x}
# | | |
# $text $param $throw
#variables
my $managers = {};
our %objectExamples;
my $listPrepend = "[ ";
my $listAppend = " , {...} , {...} ]";
$objectExamples{"AttributeDefinition"} = "{ \"id\" : 2820 , \"friendlyName\" : \"createdAt\" , \"namespace\" : \"urn:perun:vo:attribute-def:core\" , \"type\" : \"java.lang.String\" , \"entity\" : \"vo\" , \"writable\" : true , \"baseFriendlyName\" : \"createdAt\" , \"friendlyNameParameter\" : \"\" , \"unique\" : false , \"displayName\" : \"VO created date\" , \"description\" : \"Date when VO was created.\" , \"beanName\" : \"AttributeDefinition\" }";
$objectExamples{"List<AttributeDefinition>"} = $listPrepend . $objectExamples{"AttributeDefinition"} . $listAppend;
$objectExamples{"List<AttributeDefinition>"} = $objectExamples{"List<AttributeDefinition>"};
$objectExamples{"Attribute"} = "{ \"id\" : 2820 , \"friendlyName\" : \"createdAt\" , \"namespace\" : \"urn:perun:vo:attribute-def:core\" , \"value\" : \"2011-05-17 00:50:06.3\" , \"type\" : \"java.lang.String\" , \"entity\" : \"vo\" , \"writable\" : true , \"baseFriendlyName\" : \"createdAt\" , \"friendlyNameParameter\" : \"\" , \"unique\" : false , \"displayName\" : \"VO created date\" , \"description\" : \"Date when VO was created.\" , \"beanName\" : \"Attribute\" }";
$objectExamples{"List<Attribute>"} = $listPrepend . $objectExamples{"Attribute"} . $listAppend;
$objectExamples{"List<Attribute>"} = $objectExamples{"List<Attribute>"};
$objectExamples{"Map<String,Attribute>"} = "{ \"key\" : " . $objectExamples{"Attribute"} . " , \"key2\" : {...} }";
$objectExamples{"Map<String,Attribute<"} = $objectExamples{"Map<String,Attribute>"};
$objectExamples{"Vo"} = "{ \"id\" : 123 , \"name\" : \"My testing VO\" , \"shortName\" : \"test_vo\" , \"beanName\" : \"Vo\" }";
$objectExamples{"List<Vo>"} = $listPrepend . $objectExamples{"Vo"} . $listAppend;
$objectExamples{"List<Vo>"} = $objectExamples{"List<Vo>"};
$objectExamples{"Facility"} = "{ \"id\" : 24 , \"name\" : \"host.facility.cz\" , \"description\" : \"is optional\" , \"beanName\" : \"Facility\" }";
$objectExamples{"List<Facility>"} = $listPrepend . $objectExamples{"Facility"} . $listAppend;
$objectExamples{"List<Facility>"} = $objectExamples{"List<Facility>"};
$objectExamples{"RichFacility"} = "{ \"id\" : 24 , \"name\" : \"host.facility.cz\" , , \"description\" : \"is optional\" , \"facilityOwners\" : [ { \"id\" : 183 , \"name\" : \"Some Body\" , \"type\" : \"technical\" , \"contact\" : \"mail\@mail.com\" , \"beanName\" : \"Owner\" } ] , \"beanName\" : \"RichFacility\" }";
$objectExamples{"List<RichFacility>"} = $listPrepend . $objectExamples{"RichFacility"} . $listAppend;
$objectExamples{"List<RichFacility>"} = $objectExamples{"List<RichFacility>"};
$objectExamples{"Resource"} = "{ \"id\" : 493 , \"name\" : \"host1.host.cz\" , \"description\" : \"ROOT access to host1.host.cz\" , \"facilityId\" : 24 , \"voId\" : 21 , \"uuid\" : \"542d676f-99b2-4d1c-bc80-a46fd7f34e62\" , \"beanName\" : \"Resource\" }";
$objectExamples{"List<Resource>"} = $listPrepend . $objectExamples{"Resource"} . $listAppend;
$objectExamples{"List<Resource>"} = $objectExamples{"List<Resource>"};
$objectExamples{"ResourceTag"} = "{ \"id\" : 3 , \"tagName\" : \"comp_cluster\" , \"voId\" : 123 , \"beanName\" : \"ResourceTag\" }";
$objectExamples{"List<ResourceTag>"} = $listPrepend . $objectExamples{"ResourceTag"} . $listAppend;
$objectExamples{"List<ResourceTag>"} = $objectExamples{"List<ResourceTag>"};
$objectExamples{"RichResource"} = "{ \"id\" : 493 , \"name\" : \"host1.host.cz\" , \"description\" : \"ROOT access to host1.host.cz\" , \"facilityId\" : 24 , \"voId\" : 123 , \"uuid\" : \"542d676f-99b2-4d1c-bc80-a46fd7f34e62\" , \"beanName\" : \"RichResource\" , \"vo\" : ". $objectExamples{"Vo"} . ", \"facility\" : ". $objectExamples{"Facility"} . " , \"resourceTags\" : ". $objectExamples{"List<ResourceTag>"} . " }";
$objectExamples{"List<RichResource>"} = $listPrepend . $objectExamples{"RichResource"} . $listAppend;
$objectExamples{"List<RichResource>"} = $objectExamples{"List<RichResource>"};
$objectExamples{"Owner"} = "{ \"id\" : 183 , \"name\" : \"Some Body\" , \"type\" : \"administrative\" , \"contact\" : \"mail\@mail.com\" , \"beanName\" : \"Owner\" }";
$objectExamples{"List<Owner>"} = $listPrepend . $objectExamples{"Owner"} . $listAppend;
$objectExamples{"List<Owner>"} = $objectExamples{"List<Owner>"};
$objectExamples{"Group"} = "{ \"id\" : 1061 , \"name\" : \"My group\" , \"shortName\" : \"My group\" , \"description\" : \"My testing group\" , \"parentGroupId\" : null , \"voId\" : 201 , \"uuid\" : \"31e1014b-e994-4cb2-b238-e32aeef87670\" , \"beanName\" : \"Group\" }";
$objectExamples{"List<Group>"} = $listPrepend . $objectExamples{"Group"} . $listAppend;
$objectExamples{"List<Group>"} = $objectExamples{"List<Group>"};
$objectExamples{"RichGroup"} = "{ \"id\" : 1061 , \"name\" : \"My Group\" , \"shortName\" : \"My Group\" , \"description\" : \"My testing group\" , \"parentGroupId\" : null , \"voId\" : 201 , \"uuid\" : \"31e1014b-e994-4cb2-b238-e32aeef87670\" , \"beanName\" : \"RichGroup\" , \"attributes\" : [ { \"value\" : null , \"type\" : \"java.lang.String\" , \"entity\" : \"group\" , \"namespace\" : \"urn:perun:group:attribute-def:def\" , \"friendlyName\" : \"synchronizationEnabled\" , \"writable\" : true , \"baseFriendlyName\" : \"synchronizationEnabled\" , \"friendlyNameParameter\" : \"\" , \"unique\" : false , \"displayName\" : \"Synchronization enabled\" , \"description\" : \"Enables group synchronization from external source.\" , \"id\" : 103 , \"beanName\" : \"Attribute\" } ] }";
$objectExamples{"List<RichGroup>"} = $listPrepend . $objectExamples{"RichGroup"} . $listAppend;
$objectExamples{"List<RichGroup>"} = $objectExamples{"List<RichGroup>"};
$objectExamples{"Member"} = "{ \"id\" : 12 , \"userId\" : 34 , \"voId\" : 42 , \"sourceGroupId\" : null , \"membershipType\" : \"DIRECT\" , \"status\" : \"VALID\" , \"sponsored\" : false , \"beanName\" : \"Member\" }";
$objectExamples{"List<Member>"} = $listPrepend . $objectExamples{"Member"} . $listAppend;
$objectExamples{"List<Member>"} = $objectExamples{"List<Member>"};
$objectExamples{"User"} = "{ \"firstName\" : \"Some\" , \"lastName\" : \"Body\" , \"middleName\" : null , \"titleBefore\" : \"Mgr.\" , \"titleAfter\" : null , \"serviceUser\" : false , \"sponsoredUser\" : false , \"specificUser\" : false , \"majorSpecificType\" : \"NORMAL\" , \"id\" : 34 , \"uuid\" : \"5e5a02dd-f991-4706-a428-69c3ea6c5ce8\" , \"beanName\" : \"User\" }";
$objectExamples{"List<User>"} = $listPrepend . $objectExamples{"User"} . $listAppend;
$objectExamples{"List<User>"} = $objectExamples{"List<User>"};
$objectExamples{"ExtSource"} = "{ \"name\" : \"PERUNPEOPLE\" , \"type\" : \"cz.metacentrum.perun.core.impl.ExtSourceSql\" , \"attributes\" : {} , \"id\" : 2 , \"beanName\" : \"ExtSource\" }";
$objectExamples{"List<ExtSource>"} = $listPrepend . $objectExamples{"ExtSource"} . $listAppend;
$objectExamples{"List<ExtSource>"} = $objectExamples{"List<ExtSource>"};
$objectExamples{"UserExtSource"} = "{ \"userId\": 34 , \"loa\" : 0 , \"extSource\" : " . $objectExamples{"ExtSource"} . " , \"login\" : \"my_login\" , \"persistent\" : true , \"id\" : 312 , \"lastAccess\" : \"2019-06-10 14:07:42.2767\" , \"beanName\" : \"UserExtSource\" }";
$objectExamples{"List<UserExtSource>"} = $listPrepend . $objectExamples{"UserExtSource"} . $listAppend;
$objectExamples{"List<UserExtSource>"} = $objectExamples{"List<UserExtSource>"};
$objectExamples{"RichUser"} = "{ \"firstName\" : \"Some\" , \"lastName\" : \"Body\" , \"middleName\" : null , \"titleBefore\" : \"Mgr.\" , \"titleAfter\" : null , \"serviceUser\" : false , \"sponsoredUser\" : false , \"specificUser\" : false , \"majorSpecificType\" : \"NORMAL\" , \"id\" : 34 , \"uuid\" : \"5e5a02dd-f991-4706-a428-69c3ea6c5ce8\" , \"beanName\" : \"RichUser\" , \"userExtSources\" : " . $objectExamples{"List<UserExtSource>"} . ", \"userAttributes\" : [ { \"value\" : \"my_login\" , \"type\" : \"java.lang.String\" , \"entity\" : \"user\" , \"namespace\" : \"urn:perun:user:attribute-def:def\" , \"friendlyName\" : \"login-namespace:perun\" , \"writable\" : true , \"baseFriendlyName\" : \"login-namespace\" , \"friendlyNameParameter\" : \"perun\" , \"unique\" : false , \"displayName\" : \"Login in namespace: perun\" , \"description\" : \"Logname in namespace 'perun'.\" , \"id\" : 1905 , \"beanName\" : \"Attribute\" } ] }";
$objectExamples{"List<RichUser>"} = $listPrepend . $objectExamples{"RichUser"} . $listAppend;
$objectExamples{"List<RichUser>"} = $objectExamples{"List<RichUser>"};
$objectExamples{"RichMember"} = "{ \"id\" : 12 , \"userId\" : 34 , \"voId\" : 42 , \"sourceGroupId\" : null , \"membershipType\" : \"DIRECT\" , \"status\" : \"VALID\" , \"sponsored\" : false , \"beanName\" : \"RichMember\" , \"user\" : " . $objectExamples{"User"} . " , \"userExtSources\" : " . $objectExamples{"List<UserExtSource>"} . " , \"memberAttributes\" : [ ] , \"userAttributes\" : [ { \"value\" : \"my_login\" , \"type\" : \"java.lang.String\" , \"entity\" : \"user\" , \"namespace\" : \"urn:perun:user:attribute-def:def\" , \"friendlyName\" : \"login-namespace:perun\" , \"writable\" : true , \"baseFriendlyName\" : \"login-namespace\" , \"friendlyNameParameter\" : \"perun\" , \"unique\" : false , \"displayName\" : \"Login in namespace: perun\" , \"description\" : \"Logname in namespace 'perun'.\" , \"id\" : 1905 , \"beanName\" : \"Attribute\" } ] }";
$objectExamples{"List<RichMember>"} = $listPrepend . $objectExamples{"RichMember"} . $listAppend;
$objectExamples{"List<RichMember>"} = $objectExamples{"List<RichMember>"};
$objectExamples{"RTMessage"} = "{ \"ticketNumber\" : 32525 , \"memberPreferredEmail\" : \"mail\@mail.com\" }";
$objectExamples{"Service"} = "{ \"id\" : 290 , \"name\" : \"passwd\" , \"description\" : \"Provision /etc/passwd file.\" , \"delay\" : 10 , \"recurrence\" : 2 , \"enabled\" : true , \"script\" : \"./passwd\" }";
$objectExamples{"List<Service>"} = $listPrepend . $objectExamples{"Service"} . $listAppend;
$objectExamples{"List<Service>"} = $objectExamples{"List<Service>"};
$objectExamples{"ServicesPackage"} = "{ \"id\" : 50 , \"name\" : \"Unix account\" , \"description\" : \"Collection of services for managing unix accounts.\" }";
$objectExamples{"List<ServicesPackage>"} = $listPrepend . $objectExamples{"ServicesPackage"} . $listAppend;
$objectExamples{"List<ServicesPackage>"} = $objectExamples{"List<ServicesPackage>"};
$objectExamples{"Destination"} = "{ \"id\" : 99 , \"destination\" : \"host\@host.cz\" , \"type\" : \"HOST\" , \"propagationType\" : \"PARALLEL\" }";
$objectExamples{"List<Destination>"} = $listPrepend . $objectExamples{"Destination"} . $listAppend;
$objectExamples{"List<Destination>"} = $objectExamples{"List<Destination>"};
$objectExamples{"RichDestination"} = "{ \"id\" : 99 , \"destination\" : \"host\@host.cz\" , \"type\" : \"HOST\" , \"propagationType\" : \"PARALLEL\" , \"service\" : " . $objectExamples{"Service"} . " , \"facility\" : " . $objectExamples{"Facility"} . " }";
$objectExamples{"List<RichDestination>"} = $listPrepend . $objectExamples{"RichDestination"} . $listAppend;
$objectExamples{"List<RichDestination>"} = $objectExamples{"List<RichDestination>"};
$objectExamples{"Host"} = "{ \"id\" : 523 , \"hostname\" : \"host1.host.cz\" }";
$objectExamples{"List<Host>"} = $listPrepend . $objectExamples{"Host"} . $listAppend;
$objectExamples{"List<Host>"} = $objectExamples{"List<Host>"};
$objectExamples{"AuditMessage"} = "{ \"id\" : 249053 , \"msg\" : \"Something happened.\" , \"actor\" : \"actor\@hostname.cz\" , \"createdAt\" : \"2015-03-16 16:00:40.449221\" , \"createdByUid\" : \"34\" , \"fullMessage\" : \"249053 \\\"2015-03-16 16:00:40.449221\\\" \\\"actor\@hostname.cz\\\" Something happened.\" }";
$objectExamples{"List<AuditMessage>"} = $listPrepend . $objectExamples{"AuditMessage"} . $listAppend;
$objectExamples{"List<AuditMessage>"} = $objectExamples{"List<AuditMessage>"};
$objectExamples{"String"} = "\"text\"";
$objectExamples{"boolean"} = "true";
$objectExamples{"Candidate"} = "{ \"id\" : 0 , \"serviceUser\" : false , \"firstName\" : \"Random\" , \"lastName\" : \"Name\" , \"middleName\" : null , \"titleBefore\" : \"Dr.\" , \"titleAfter\" : null , userExtSource : " . $objectExamples{"UserExtSource"} . " , additionalUserExtSources : null , attributes : { \"urn:perun:member:attribute-def:def:organization\" : \"Masarykova univerzita\" , \"urn:perun:member:attribute-def:def:mail\" : \"random\@name.cz\" } }";
$objectExamples{"List<Candidate>"} = $listPrepend . $objectExamples{"Candidate"} . $listAppend;
$objectExamples{"List<Candidate>"} = $objectExamples{"List<Candidate>"};
$objectExamples{"MemberCandidate"} = "{ \"candidate\" : " . $objectExamples{"Candidate"} . " , \"member\" : " . $objectExamples{"Member"} . " , \"richUser\" : " . $objectExamples{"RichUser"} . " }";
$objectExamples{"List<MemberCandidate>"} = $listPrepend . $objectExamples{"MemberCandidate"} . $listAppend;
$objectExamples{"List<MemberCandidate>"} = $objectExamples{"List<MemberCandidate>"};
$objectExamples{"SecurityTeam"} = "{ \"id\" : 924 , \"name\" : \"CSIRT\" , \"description\" : \"My CSIRT\" }";
$objectExamples{"List<SecurityTeam>"} = $listPrepend . $objectExamples{"SecurityTeam"} . $listAppend;
$objectExamples{"List<SecurityTeam>"} = $objectExamples{"List<SecurityTeam>"};
$objectExamples{"Pair<User,String>"} = "{ \"left\" : " . $objectExamples{"User"} ." , \"right\" : \"Some reason\" }";
$objectExamples{"List<Pair<User,String>>"} = $listPrepend . $objectExamples{"Pair<User,String>"} . $listAppend;
$objectExamples{"List<Pair<User,String>>"} = $objectExamples{"List<Pair<User,String>>"};
$objectExamples{"AttributeRights"} = "{ \"attributeId\" : 5 , \"role\" : \"VOADMIN\", \"rights\" : [ \"READ\" , \"WRITE\"] }";
$objectExamples{"List<AttributeRights>"} = $listPrepend . $objectExamples{"AttributeRights"} . $listAppend;
$objectExamples{"List<AttributeRights>"} = $objectExamples{"List<AttributeRights>"};
$objectExamples{"BanOnFacility"} = "{ \"id\" : 3 , \"validityTo\" : 1533638919 , \"description\" : \"banned\" , \"userId\" : 2341 , \"facilityId\" : 233 , \"beanName\" : \"BanOnFacility\" }";
$objectExamples{"List<BanOnFacility>"} = $listPrepend . $objectExamples{"BanOnFacility"} . $listAppend;
$objectExamples{"List<BanOnFacility>"} = $objectExamples{"List<BanOnFacility>"};
$objectExamples{"BanOnResource"} = "{ \"id\" : 4 , \"validityTo\" : 1533638919 , \"description\" : \"banned\" , \"memberId\" : 13541 , \"resourceId\" : 2234 , \"beanName\" : \"BanOnResource\" }";
$objectExamples{"List<BanOnResource>"} = $listPrepend . $objectExamples{"BanOnResource"} . $listAppend;
$objectExamples{"List<BanOnResource>"} = $objectExamples{"List<BanOnResource>"};
$objectExamples{"BanOnVo"} = "{ \"id\" : 2 , \"validityTo\" : 1533638919 , \"description\" : \"banned\" , \"memberId\" : 13541 , \"voId\" : 12 , \"beanName\" : \"BanOnVo\" }";
$objectExamples{"List<BanOnVo>"} = $listPrepend . $objectExamples{"BanOnVo"} . $listAppend;
$objectExamples{"List<BanOnVo>"} = $objectExamples{"List<BanOnVo>"};
$objectExamples{"EnrichedHost"} = "{ \"host\" : " . $objectExamples{"Host"} . " , \"hostAttributes\" : " . $objectExamples{"List<Attribute>"} . " }";
$objectExamples{"List<EnrichedHost>"} = $listPrepend . $objectExamples{"EnrichedHost"} . $listAppend;
$objectExamples{"List<EnrichedHost>"} = $objectExamples{"List<MemberWithSponsors>"};
$objectExamples{"EnrichedResource"} = "{ \"resource\" : " . $objectExamples{"Resource"} . " , \"attributes\" : " . $objectExamples{"List<Attribute>"} . " }";
$objectExamples{"List<EnrichedResource>"} = $listPrepend . $objectExamples{"EnrichedResource"} . $listAppend;
$objectExamples{"List<EnrichedResource>"} = $objectExamples{"List<EnrichedResource>"};
$objectExamples{"MemberWithSponsors"} = "{ \"member\" : " . $objectExamples{"RichMember"} . " , \"sponsors\" : " . $objectExamples{"List<RichUser>"} . " }";
$objectExamples{"List<MemberWithSponsors>"} = $listPrepend . $objectExamples{"MemberWithSponsors"} . $listAppend;
$objectExamples{"List<MemberWithSponsors>"} = $objectExamples{"List<MemberWithSponsors>"};
$objectExamples{"MembersPageQuery"} = "{ \"pageSize\" : 3 , \"offset\" : 0 , \"order\" : \"ASCENDING\" , \"sortColumn\" : \"ID\" , \"searchString\" : \"Doe\" , \"statuses\" : [\"VALID\" , \"EXPIRED\"] , \"groupId\" : 10 , \"groupStatuses\" : [\"VALID\"] }";
$objectExamples{"List<MembersPageQuery>"} = $listPrepend . $objectExamples{"MembersPageQuery"} . $listAppend;
$objectExamples{"List<MembersPageQuery>"} = $objectExamples{"List<MembersPageQuery>"};
$objectExamples{"Paginated<RichMember>"} = "{ \"offset\" : 0 , \"pageSize\" : 3 , \"totalCount\" : 1 , \"data\" : " . $objectExamples{"List<RichMember>"} . " }";
$objectExamples{"Paginated<RichMember>"} = @objectExamples{"Paginated<RichMember>"};
# SUB HELP
# help info
sub help {
return qq{
Generate HTML javadoc for Perun RPC
----------------------------------------
Available options:
--version | -v tag (version) to build
--all-versions | -a builds all tags (versions)
--help | -h prints this help
};
}
# SUB PROCESSFILE
# process every file in directory
sub processFile {
my $file_name = $_[0];
my $dir_path = $_[1];
my $fullPath = $dir_path . "/" . $file_name;
my $managerName;
if($file_name =~ m/^(.*)Method\.java/) {
$managerName = "$1";
} elsif ($file_name =~ m/^(.*)\..*/) {
$managerName = "$1";
} else {
$managerName = $file_name;
}
# open file
print "PROCESSING: " . $managerName . " ($fullPath)\n";
open my $handler, $fullPath or die "Could not open $fullPath";
# phase of looking for method
# 0 - looking for start of javadoc symbols /*# (if found -> 1)
# 1 - looking for parts of one javadoc or end of this javadoc (if end found -> 2)
# 2 - looking for another javadoc (if found -> 1) or name of method (if found -> 0)
my $phase=0; #phase of looking in file
my @methods = ();
my $method = {};
my @params = ();
my @textLines = ();
my @throws = ();
my $deprecated = undef;
my $exampleResponse;
my @exampleParams = ();
my $return;
my @javadocs = (); #array with javadocs of one method
while (my $line = <$handler>) {
# skip every line which start // (one line comment)
next if($line =~ m/^\s*\/\/.*/);
# skip all comments which start /* without hash
# !!! THIS IS NOT IMPLEMENTED, IF THERE IS SOME /* COMMENT ON IMPORTANT PLACE
# IT CAN CREATE BAD DOCUMENTATION, NEED TO SOLVE OR DO NOT USE THIS TYPE OF COMMENTS
switch ($phase) {
case 0 {
if($line =~ m/^\s*\/\*\#/) { $phase=1; }
}
case 1 {
if($line =~ m/^\s*[*]\s*\@param\s*(.*)/) {
push @params, $1;
} elsif($line =~ m/^\s*\*\s*[@]return\s*(.*)/) {
$return="$1";
} elsif($line =~ m/^\s*\*\s*[@]exampleResponse\s*(.*)/) {
$exampleResponse="$1";
} elsif($line =~ m/^\s*\*\s*[@]exampleParam\s*(.*)/) {
push @exampleParams, $1;
} elsif($line =~ m/^\s*\*\s*[@]deprecated\s*(.*)/) {
$deprecated="$1";
} elsif($line =~ m/^\s*\*\s*[@]throw\s*(.*)/) {
push @throws, $1;
} elsif($line =~ m/^\s*\*\//) {
$phase=2;
# local variables for purpose of saving information
my $javadoc={};
my @localParams = @params;
my @localThrows = @throws;
my @localTextLines = @textLines;
my @localExampleParams = @exampleParams;
# save one javadoc
$javadoc->{'params'} = \@localParams;
$javadoc->{'throws'} = \@localThrows;
$javadoc->{'return'} = $return;
$javadoc->{'exampleResponse'} = $exampleResponse;
$javadoc->{'exampleParams'} = \@localExampleParams;
if (defined $deprecated) {
$javadoc->{'deprecated'} = $deprecated;
}
$javadoc->{'text'} = \@localTextLines;
push @javadocs, $javadoc;
#reset all needed variables
@params=();
@textLines=();
@throws=();
@exampleParams=();
undef $return;
undef $exampleResponse;
$deprecated=undef;
$javadoc=();
} elsif($line =~ m/^\s*\*\s*(.*)/) {
push @textLines, $1;
} else {
#skip this line, it is probably space or something nasty, we dont need it
}
}
case 2 {
if($line =~ m/^\s*\/[*]\#/) {
$phase=1;
} elsif($line =~ m/^\s*([a-zA-Z0-9]+)\s*\{.*/) {
$phase=0;
$method->{'name'}=$1;
#local variable for saving all javadocs
my @localJavadocs = @javadocs;
$method->{'javadocs'}= \@localJavadocs;
#local variable for saving one method
my $localMethod = $method;
push @methods, $localMethod;
#reset all needed variables
@javadocs = ();
$method = {};
} else {
#skip this line, it is probably some code or empty line, we dont need it
}
}
}
}
if($phase != 0) {
die "Some phase was not ended correctly for file $file_name and phase $phase!";
}
#save all parsed methods
$managers->{$managerName}=\@methods;
close($handler);
}
sub buildVersion {
my $ver = $_[0];
my $latest = $_[1];
`git -C ./perun/ checkout $ver`;
my $idx = index($ver, "v3.");
my $printVer = $ver;
if ($idx > 0) {
$printVer = substr($ver,1);
}
my $importPathCss = "css";
my $importPathJs = "js";
my $importPathImg = "img";
#open input dir
opendir (DIR, $SOURCE_DIR) or die "Cannot open directory with files (with methods)!";
if ($latest) {
$OUTPUT_DIR = "./web-template/";
} else {
$OUTPUT_DIR = "./web-template/" . $printVer;
$importPathCss = "../css";
$importPathJs = "../js";
$importPathImg = "../img";
}
#create output dir if not exists yet
unless (-d $OUTPUT_DIR) {
mkdir $OUTPUT_DIR;
print $OUTPUT_DIR . " was created. \n";
}
#process all files in dir
while (my $file = readdir(DIR)) {
next if ($file =~ m/^\./);
processFile($file, $SOURCE_DIR)
}
# PRINT MAIN FILE
open FILE,">$OUTPUT_DIR/index.html" or die "Cannot open $OUTPUT_DIR/index.html: $! \n";
print FILE qq{
<!DOCTYPE html>
<html class=" js flexbox canvas canvastext webgl no-touch geolocation postmessage websqldatabase indexeddb hashchange history draganddrop websockets rgba hsla multiplebgs backgroundsize borderimage borderradius boxshadow textshadow opacity cssanimations csscolumns cssgradients cssreflections csstransforms csstransforms3d csstransitions fontface generatedcontent video audio localstorage sessionstorage webworkers applicationcache svg inlinesvg smil svgclippaths overthrow-enabled"><!--<![endif]--><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta charset="utf-8">
<!--[if IE]>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<![endif]-->
<title>RPC API documentation $printVer| Perun - Identity and Access Management System</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="$importPathCss/fonts.css" type="text/css">
<link rel="stylesheet" href="$importPathCss/bootstrap.css" type="text/css">
<link rel="stylesheet" href="$importPathCss/main.css" type="text/css">
<link rel="stylesheet" href="$importPathCss/style.css" type="text/css">
<link rel="shortcut icon" href="$importPathImg/favicons/favicon.ico">
<link rel="icon" sizes="16x16 32x32 64x64" href="$importPathImg/favicons/favicon.ico">
<link rel="icon" type="image/png" sizes="64x64" href="$importPathImg/favicons/favicon-64.png">
<link rel="icon" type="image/png" sizes="32x32" href="$importPathImg/favicons/favicon-32.png">
<link rel="apple-touch-icon" href="$importPathImg/favicons/favicon-57.png">
<link rel="apple-touch-icon" sizes="144x144" href="$importPathImg/favicons/favicon-144.png">
<meta name="msapplication-TileImage" content="$importPathImg/favicons/favicon-white-144.png">
<meta name="msapplication-TileColor" content="#00569c">
<script src="$importPathJs/jquery-1.10.2.min.js"></script>
<script src="$importPathJs/bootstrap.js" type="text/javascript"></script>
</head>
<body class="front-page">
<div id="wrap">
<div class="techspec content">
<div class="push-under-menu"></div>
<div class="container">
<h1>RPC API documentation $printVer</h1>
<div class="col-md-3 list-group">
<a style="color: #005b99; text-align: right;" class="list-group-item" href="/documentation/technical-documentation">Back to Documentation<i style="margin-top: 3px; vertical-align: baseline;" class="glyphicon glyphicon-chevron-left pull-left"></i></a>
<span class="list-group-item"><b>Version: </b><select id="versionSelect" style="width: 100%">
};
my $counter = 1;
for my $v (@allVersions) {
my $idx = index $v, "v3.";
my $pv = $v;
if ($idx > 0) {
$pv = substr($v, 1);
}
print FILE qq^<option value="$pv">$v</option>^;
$counter = $counter+1;
if ($counter > $versionLimit) {
last;
}
}
print FILE qq^
</select>
<script>
if (window.location.href.indexOf("$printVer")) {
\$('select#versionSelect').val("$printVer");
}
\$('select#versionSelect').on('change', function() {
var version = \$('select#versionSelect').children("option:selected").val();
^;
if ($latest) {
print FILE qq^ window.location.assign(version+"/"+window.location.href.split("/").pop()); ^;
} else {
print FILE qq^
if (("v"+version) == "$allVersions[0]") {
window.location.assign("../"+window.location.href.split("/").pop());
} else if (version == "$allVersions[0]") {
window.location.assign("../"+window.location.href.split("/").pop());
} else {
window.location.assign("../"+version+"/"+window.location.href.split("/").pop());
}
^;
}
print FILE qq^
});
</script>
</span>
<span class="list-group-item"><b><u>General</u></b></span>
<a style="color: #005b99;" class="list-group-item" href="index.html"><b>How to use Perun RPC</b></a>
<span class="list-group-item"><b><u>Managers</u></b></span>
^;
foreach my $manager (sort(keys %{$managers})) {
print FILE "<a class=\"list-group-item\" style=\"color: #005b99;\" href=\"rpc-javadoc-$manager.html\">$manager</a>"
}
print FILE "</div><div class=\"col-md-9 pull-right\">";
print FILE qq{
<h2>How to use Perun RPC</h2>
<p class="well warning">Perun RPC is <b>not</b> using traditional REST API, so please read properly, how are your requests handled and what are expected responses.</p>
<h3>Authentication</h3>
<p>Authentication of person / component making a request is done by Apache web server and depends on it’s current configuration. Perun can internally handle identity provided by Kerberos, Shibboleth IdP, Certificate or REMOTE_USER like Apache config. Identity info provided by Apache to Perun is used only to match identity to user object from Perun (if exists).</p>
<h3>Authorization</h3>
<p>Authorization is done on Perun side based on privileges associated with user, which are stored inside Perun. Few methods are accessible without authorization (e.g. in order to allow new users to register to Perun).</p>
<h3>Request type GET / POST</h3>
<p><strong>We recommend to use POST requests all the time.</strong> It’s most simple, since all parameters are transferred in a request body in JSON format and response is the same.</p>
<p>You can optionally use GET requests, but then parameters must be present in a request URL (as a query) and you can call only listing methods (get, list). Methods changing state (create, delete, update, add, remove,…) must be POST.</p>
<a id="url-structure"></a><h3>URL structure</h3>
<pre><code>http(s)://[server]/[authentication]/rpc/[format]/[manager]/[method]?[params]</code></pre>
<dl>
<dt>[server]</dt>
<dd>Is hostname of your Perun instance.</dd>
<dt>[authentication]</dt>
<dd>Is type of expected authentication which must be supported by Perun instance. Standard values are: <i>fed</i> (Shibboleth IDP), <i>krb</i> (Kerberos), <i>cert</i> (Certificate), <i>non</i> (without authorization).</dd>
<dt>[format]</dt>
<dd>Format of data for transfer. Possible values are: <i>json</i>, <i>jsonp</i> and <i>voot</i>.</dd>
<dt>[manager]</dt>
<dd>Name of manager to call method in (in a camel case).</dd>
<dt>[method]</dt>
<dd>Name of method to call from selected manager (in a camel case).</dd>
<dt>[params]</dt>
<dd>Query parameters passed in URL (for GET requests) in a following manner: <pre>?param1=value&param2=value&listParam[]=value1&listParam[]=value2</pre></dd>
</dl>
<a id="passing-parameters"></a><h3>Passing parameters</h3>
<p>When using GET requests, method parameters must be present in a URL (see above for full overview). <pre>URL: ...vosManager/getVoById?id=123</pre>
<p>When using POST, expected parameters are properties of JSON object in request body, eg.: <pre>URL: ...vosManager/getVoById
Body: { "id": 123 }</pre>
<p>If you are passing whole objects (e.g. on object creation), you usually omit not relevant properties and id is set to 0.
<pre>URL: ...vosManager/createVo
Body: { "id": 0 , "name" : "My VO" , "shortName" : "myVo" }</pre>
<p><i>Note: VO object is missing properties like: beanName, createdAt, createdBy etc.</i>
<p>Perun API is using mostly only IDs of objects, so you don’t have to care about exact object properties values. Objects are retrieved only internally and must pass existence and authorization checks.
<a id="http-return-codes"></a><h3>HTTP return codes</h3>
<p><b>If OK, all requests to Perun API returns 200 return code.</b>
<p>When processing of your request throws an exception in java code, <b>response is still 200, but it’s body is replaced with serialized Exception object.</b> Any other return code signalize problem with a server (not found, internal error etc.).
<a id="return-values"></a><h3>Return values</h3>
<p>If response of method call is an object or list of them, correct JSON representation is returned.
<p>If response of method call is null or any primitive type (integer, string, boolean), returned value is simple string. So you will get: <code>null</code> and <b>NOT</b>: <code>{ "value": null }</code>
<h3>Usage of JSON/JSONP formats</h3>
<p>Perun can handle both formats. While <b>both consumes valid JSON as input</b>, second one produces response with padding:
<pre>Request: someUrl?callback=call1&param=value
Response: call1(response);
</pre>
<p>If you omit <em>callback</em> query parameter, you will get: </p><pre>Response: null(response);</pre><p></p>
<p>When using JSONP, returned objects are stripped of non relevant properties like <em>createdAt</em>, <em>createdBy</em>, <em>modifiedAt</em>, <em>modifiedBy</em> etc. You can get them when using standard JSON.</p>
</div>
</div>
</div>
};
print FILE qq{
<script type="text/javascript">
\$(document).ready(function() {
\$("#nav-documentation").addClass('active');
});
</script>
};
close (FILE);
foreach my $manager (sort(keys %{$managers})) {
open FILE,">$OUTPUT_DIR/rpc-javadoc-$manager.html" or die "Cannot open $OUTPUT_DIR/rpc-javadoc-$manager.html: $! \n";
print FILE qq{
<!DOCTYPE html>
<html class=" js flexbox canvas canvastext webgl no-touch geolocation postmessage websqldatabase indexeddb hashchange history draganddrop websockets rgba hsla multiplebgs backgroundsize borderimage borderradius boxshadow textshadow opacity cssanimations csscolumns cssgradients cssreflections csstransforms csstransforms3d csstransitions fontface generatedcontent video audio localstorage sessionstorage webworkers applicationcache svg inlinesvg smil svgclippaths overthrow-enabled"><!--<![endif]--><head><meta http-equiv="Content-Type" content="text/html; charset=UTF-8">
<meta charset="utf-8">
<!--[if IE]>
<meta http-equiv="X-UA-Compatible" content="IE=edge,chrome=1">
<![endif]-->
<title>RPC API documentation $printVer - $manager | Perun - Identity and Access Management System</title>
<meta name="description" content="">
<meta name="viewport" content="width=device-width">
<link rel="stylesheet" href="$importPathCss/fonts.css" type="text/css">
<link rel="stylesheet" href="$importPathCss/bootstrap.css" type="text/css">
<link rel="stylesheet" href="$importPathCss/main.css" type="text/css">
<link rel="stylesheet" href="$importPathCss/style.css" type="text/css">
<link rel="shortcut icon" href="$importPathImg/favicons/favicon.ico">
<link rel="icon" sizes="16x16 32x32 64x64" href="$importPathImg/favicons/favicon.ico">
<link rel="icon" type="image/png" sizes="64x64" href="$importPathImg/favicons/favicon-64.png">
<link rel="icon" type="image/png" sizes="32x32" href="$importPathImg/favicons/favicon-32.png">
<link rel="apple-touch-icon" href="$importPathImg/favicons/favicon-57.png">
<link rel="apple-touch-icon" sizes="144x144" href="$importPathImg/favicons/favicon-144.png">
<meta name="msapplication-TileImage" content="$importPathImg/favicons/favicon-white-144.png">
<meta name="msapplication-TileColor" content="#00569c">
<script src="$importPathJs/jquery-1.10.2.min.js"></script>
<script src="$importPathJs/bootstrap.js" type="text/javascript"></script>
</head>
<body class="front-page">
<div id="wrap">
<div class="techspec content">
<div class="push-under-menu"></div>
<div class="container">
<a id="$manager-title"></a><h1>RPC API documentation $printVer</h1>
<div class="col-md-3 list-group">
<a style="color: #005b99; text-align: right;" class="list-group-item" href="/documentation/technical-documentation">Back to Documentation<i style="margin-top: 3px; vertical-align: baseline;" class="glyphicon glyphicon-chevron-left pull-left"></i></a>
<span class="list-group-item"><b>Version: </b><select id="versionSelect" style="width: 100%">
};
my $counter = 1;
for my $v (@allVersions) {
my $idx = index $v, "v3.";
my $pv = $v;
if ($idx > 0) {
$pv = substr($v, 1);
}
print FILE qq^<option value="$pv">$v</option>^;
$counter = $counter+1;
if ($counter > $versionLimit) {
last;
}
}
print FILE qq^
</select>
<script>
if (window.location.href.indexOf("$printVer")) {
\$('select#versionSelect').val("$printVer");
}
\$('select#versionSelect').on('change', function() {
var version = \$('select#versionSelect').children("option:selected").val();
^;
if ($latest) {
print FILE qq^ window.location.assign(version+"/"+window.location.href.split("/").pop()); ^;
} else {
print FILE qq^
if (("v"+version) == "$allVersions[0]") {
window.location.assign("../"+window.location.href.split("/").pop());
} else if (version == "$allVersions[0]") {
window.location.assign("../"+window.location.href.split("/").pop());
} else {
window.location.assign("../"+version+"/"+window.location.href.split("/").pop());
}
^;
}
print FILE qq^
});
</script>
</span>
<span class="list-group-item"><b><u>General</u></b></span>
<a class="list-group-item" style="color: #005b99;" href="index.html">How to use RPC</a>
<span class="list-group-item"><b><u>Managers</u></b></span>
^;
foreach my $menuManager (sort(keys %{$managers})) {
my $activeLink = "";
if ($menuManager eq $manager) {
$activeLink = "<b>" . $menuManager . "</b>";
} else {
$activeLink = $menuManager;
}
print FILE qq{<a class="list-group-item" style="color: #005b99;" href="rpc-javadoc-$menuManager.html">$activeLink</a>};
}
print FILE "</div>";
#print FILE qq{<div class="panel-group" id="$manager">};
print FILE qq{<div class="col-md-9 pull-right">};
print FILE qq{<h2>$manager</h2>};
my $methods = $managers->{$manager};
my $sortedMethods={};
#prepare sorted methods
foreach my $notSortedMethod (@{$methods}) {
#get names of methods
my $methodName = $notSortedMethod->{'name'};
my $javadocs = $notSortedMethod->{'javadocs'};
$sortedMethods->{$methodName}=$notSortedMethod->{'javadocs'};
}
#print sorted methods
foreach my $sortedMethod (sort(keys %{$sortedMethods})) {
my $javadocs = $sortedMethods->{$sortedMethod};
#print info about javadocs
my $counter = 0;
foreach my $javadoc (@{$javadocs}) {
$counter++;
my $throws = $javadoc->{'throws'};
my $return = $javadoc->{'return'};
my $params = $javadoc->{'params'};
my $texts = $javadoc->{'text'};
my $deprecated = $javadoc->{'deprecated'};
my $exampleResponse = $javadoc->{'exampleResponse'};
my $exampleParamsLocal = $javadoc->{'exampleParams'};
# FILL MAP with example params
# $exampleParams{'param'}->"example_itself"
my %exampleParams = ();
foreach my $parameter (@$exampleParamsLocal) {
if (defined $parameter && $parameter ne "") {
my @rest = split(/ /, $parameter);
splice(@rest, 0, 1);
my $restPar = join(" ", @rest);
$exampleParams{(split(/ /, $parameter))[0]} = $restPar;
}
}
# CREATE ANNOTATION
my $methodAnnotation = "";
if (defined $params) {
foreach my $par (@$params) {
if (defined $par && $par ne "") {
my $par1 = (split(/ /, $par))[1];
$par1 =~ s/\Q<\E/</g;
$par1 =~ s/\Q>\E/>/g;
unless($par1) {
print $sortedMethod . "\n";
}
$methodAnnotation .= $par1;
$methodAnnotation .= " ";
$methodAnnotation .= (split(/ /, $par))[0];
$methodAnnotation .= ", "
}
}
}
if (length($methodAnnotation) >= 2) { $methodAnnotation = substr($methodAnnotation, 0, -2) }
# is deprecated ?
my $depr = "";
if (defined $deprecated) {
if (length $deprecated > 0) {
$depr = qq^<abbr class="pull-right" title="^ . $deprecated . qq^"><b>Deprecated</b></abbr>^;
} else {
#$depr = "<span style=\"padding: 10px 20px; color: #005b99;\" class=\"pull-right\"><b>Deprecated</b></span>";
$depr = qq^<abbr class="pull-right" title="Method is NOT recommended for use, it can be removed in any time."><b>Deprecated</b></abbr>^;
}
}
# PRINT ANNOTATION
print FILE qq{
<div class="panel panel-default" style="margin-bottom: 5px;">
<div class="panel-heading" style="background-color: white;">
<span class="panel-title">
$depr
<a style="color: #005b99;" data-toggle="collapse" data-parent="#$manager" href="#$manager$sortedMethod$counter">
$sortedMethod ( $methodAnnotation )
</a>
</span>
</div>
<div id="$manager$sortedMethod$counter" class="panel-collapse collapse">
<div class="panel-body">
};
# <i class="icon-chevron-left" style="margin-top: 4px; transition: all 0.2s ease-out 0s;"></i>
# PRINT MAIN TEXT
print FILE "<p>";
print FILE join(" " , @{$texts});
# PRINT PARAM TABLE
if (@{$params}) {
print FILE "<table class=\"table\"><tr><th>Parameter name</th><th>Data type</th><th width=\"60%\">Description</th></tr>";
#print params
foreach my $param (@{$params}) {
my @par = split(/ /, $param);
$par[1] =~ s/\Q<\E/</g;
$par[1] =~ s/\Q>\E/>/g;
print FILE '<tr><td>' . $par[0] . "</td><td>" . $par[1] . "</td><td>";
splice(@par, 0, 2);
print FILE join(" ", @par);
print FILE "</td></tr>\n";
}
print FILE "</table>";
}
# PRINT THROWS TABLE
print FILE "<table class=\"table\"><tr><th>Thrown exception</th><th width=\"60%\">Description</th></tr>";
push (@{$throws}, "PrivilegeException When caller is not allowed to call this method. Result may vary based on caller identity and provided parameter values.");
push (@{$throws}, "InternalErrorException When unspecified error occur. See exception param <code>message</code> for explanation.");
push (@{$throws}, "RpcException Wrong usage of API (wrong url, missing param etc.). See exception params <code>message</code> and <code>type</code> for explanation.");
foreach my $throw (sort @{$throws}) {
my @tro = split(/ /, $throw);
splice(@tro, 0, 1);
my $restTro = join(" ", @tro);
print FILE "<tr><td>" . (split(/ /, $throw))[0] . "</td><td>" . $restTro . "</td></tr>"
}
#print FILE '<tr><td>PrivilegeException</td><td>When caller is not allowed to call this method. Result may vary based on caller identity and provided parameter values.</td></tr>';
#print FILE '<tr><td>InternalErrorException</td><td>When unspecified error occur. See exception <code>message</code> param for explanation.</td></tr>';
#print FILE '<tr><td>RpcException</td><td>When caller is using API wrong way (wrong url, missing param etc.). See exception <code>message</code> and <code>type</code> params for explanation.</td></tr>';
print FILE "</table>";
# PRINT RETURN TABLE
print FILE "<table class=\"table\"><tr><th>Return type</th><th width=\"60%\">Description</th></tr>";
if(defined $return) {
my @ret = split(/ /, $return);
# escape <> in return type
$ret[0] =~ s/\Q<\E/</g;
$ret[0] =~ s/\Q>\E/>/g;
print FILE '<tr><td>' . $ret[0] . "</td><td>";
splice(@ret, 0, 1);
print FILE join(" ", @ret);
print FILE "</td></tr>\n";
} else {
print FILE '<tr><td>void</td><td></td></tr>';
}
print FILE "</table>";
# PRINT EXAMPLE URL
my $managerUrl = lcfirst($manager);
print FILE qq{
<p><b>Example URL</b><pre><code>https://[hostname]/krb/rpc/json/$managerUrl/$sortedMethod</code></pre>
};
print FILE "<ul><li><a href=\"index.html#url-structure\"><i>see URL structure</i></a></li></ul>";
# PRINT EXAMPLE PARAMS
if (@{$params}) {
print FILE "<p><b>Example params</b><pre><code>{ ";
#print params
for (my $count = 0; $count < scalar @{$params}; $count++) {
my $param = @{$params}[$count];
my @par = split(/ /, $param);
my $printPar = "{...}";
# If we have fixed example for param, use it
if (exists($exampleParams{$par[0]})) {
$printPar = $exampleParams{$par[0]};
# We don't have fixed example, use generic definition
} elsif ($par[1] eq "int") {
$printPar = int(rand(100));
} elsif ($par[1] eq "List") {
$printPar = "[ {...} , {...} ]";
} elsif ($par[1] eq "String[]" || $par[1] eq "List<String>") {
$printPar = "[ \"text\" , \"text\" ]";
} elsif ($par[1] eq "int[]" || $par[1] eq "List<Integer>") {
$printPar = "[ " . int(rand(100)) . " , " . int(rand(100)) ." ]";
} elsif (exists($objectExamples{$par[1]})) {
$printPar = $objectExamples{$par[1]};
}
print FILE "\"" . $par[0] . "\" : " . $printPar;
if ($count < (scalar @{$params})-1) {
print FILE " , ";
}
}
print FILE " }</code></pre>";
print FILE "<ul><li><a href=\"index.html#passing-parameters\"><i>see Passing params</i></a></li></ul>";
}
# PRINT EXAMPLE RESPONSE
print FILE "<p><b>Example response</b><pre><code>";
if(defined $return) {
my @rt = split(/ /, $return);
if (defined $exampleResponse) {
print FILE $exampleResponse;
} elsif (exists($objectExamples{$rt[0]})) {
print FILE $objectExamples{$rt[0]};
} elsif ($rt[0] eq "int") {
print FILE int(rand(100));
} else {
print FILE "{ ... TODO ... }";
}
} else {
print FILE "null";
}
print FILE "</code></pre>";
print FILE "<ul><li><a href=\"index.html#return-values\"><i>see Return values</i></a></li><li><a href=\"index.html#http-return-codes\"><i>see HTTP return codes</i></a></li></ul>";
print FILE "</p></div></div></div>";
}
}
print FILE qq{</div>};
print FILE qq{
<script type="text/javascript">
\$(document).ready(function() {
\$("#nav-documentation").addClass('active');
});
var url = document.location.toString();
if ( url.match('#') ) {
\$('#'+url.split('#')[1]).addClass('in');
}
</script>
};
close (FILE);
}
}
#START OF MAIN PROGRAM
my $version;
my $buildAll;
my $commit;
GetOptions ("help|h" => sub {print help(); exit 0;},
"version|v=s" => \$version ,
"all-versions|a" => \$buildAll ,
"commit|c=s" => \$commit ,
) || die help();
# clean and checkout perun sources
unless (-d $SOURCE_DIR) {
print "Checking out latest perun...\n";
`git clone http://github.com/CESNET/perun.git perun`;
} else {
print "Wiping-out previously checkouted perun sources...\n";
`rm -rf ./perun/`;
print "Checking out latest perun...\n";
`git clone http://github.com/CESNET/perun.git perun`;
}
# determine all possible versions
@allVersions = `git -C ./perun/ tag --list`;
chomp @allVersions;
@allVersions = reverse sort versioncmp @allVersions;
if ($buildAll) {
if (defined($commit)) {
# we add specific commit to the top
unshift @allVersions, $commit;
}
# Build all versions
my $counter = 1;
for my $ver (@allVersions) {
buildVersion($ver, ($allVersions[0] eq $ver));
$counter = $counter+1;
if ($counter > $versionLimit) {
last;
}
}
} else {
if (defined($commit)) {
print "Building version: $commit\n";
# add commit to all versions
unshift @allVersions, $commit;
buildVersion($commit, 1);
} else {
# Build specified or latest versions
unless (defined($version)) {
# latest version if no build version specified
$version = $allVersions[0];
}
print "Building version: $version\n";
buildVersion($version, ($allVersions[0] eq $version));
}
}
#END OF MAIN PROGRAM
#closing DIR
closedir(DIR);
exit 0;
| zwejra/perun | perun-utils/rpc-methods-javadoc-generator/parseRpcMethods.pl | Perl | bsd-2-clause | 46,806 |
=head1 AmiGO::Worker::GOlr::Closure
Experimental. Get the closure list (default over isa_partof_closure)
for a list of term accs.
=cut
package AmiGO::Worker::GOlr::Closure;
use base ("AmiGO::Worker::GOlr");
#use AmiGO::External::JSON::Solr::GOlr;
#use AmiGO::Worker::GOlr::Term;
=item new
Constructor.
Arguments: n/a
=cut
sub new {
##
my $class = shift;
my $self = $class->SUPER::new();
$self->{AWGC_GOLR_CFNAME} = 'isa_partof_closure';
bless $self, $class;
return $self;
}
=item set_closure_field
Set the closure field name.
Args: string
Returns: n/a
=cut
sub set_closure_field {
my $self = shift;
my $cfname = shift || die 'need a closure field to function';
$self->{AWGC_GOLR_CFNAME} = $cfname;
}
=item get_closure
Get the closure set list for the input accs.
Args: term acc or term acc list ref
Returns: term acc list ref
=cut
sub get_closure {
my $self = shift;
my $term_ids = shift || die 'need a term id or ids to function';
## Only array refs internally.
if( ref $term_ids ne 'ARRAY' ){ $term_ids = [$term_ids]; }
## The actual worker; very similar to AmiGO::Worker::GOlr::Term::new.
my $closure = {};
foreach my $arg (@$term_ids){
my $found_doc = $self->{AEJS_GOLR_DOC}->get_by_id($arg);
if( $found_doc && $found_doc->{$self->{AWGC_GOLR_CFNAME}} ){
## Add everything we found to the closure.
my $c_set = $found_doc->{$self->{AWGC_GOLR_CFNAME}};
foreach my $ci (@$c_set){
$closure->{$ci} = 1;
}
## And add self.
$closure->{$arg} = 1;
}
}
my @set = keys %$closure;
return \@set;
}
1;
| geneontology/amigo | perl/lib/AmiGO/Worker/GOlr/Closure.pm | Perl | bsd-3-clause | 1,609 |
#!/usr/bin/perl -w
use DB_File;
use Fcntl ':flock';
if (!defined($ARGV[0])) {
print "usage: requires .class dump as parameter!\n";
exit;
}
sub bailout
{
untie %bcheckdb if(defined(%bcheckdb));
if(defined(MYLOCK)) {
flock MYLOCK, LOCK_UN;
close(MYLOCK);
}
print @_;
exit 5;
}
sub ask_user
{
my ($dbkey, $dbchunk) = @_;
if (defined($ENV{"BCHECK_UPDATE"})) {
$bcheckdb{$dbkey} = $dbchunk;
return;
}
&bailout("BC problem detected") if (! -t STDIN);
print "(I)gnore / (Q)uit / (U)pdate: ";
my $key;
while(defined(read STDIN, $key, 1)) {
$key = lc($key);
print "got: >$key<\n";
return if ($key eq 'i');
&bailout("BC problem. aborted") if ($key eq 'q');
if ($key eq 'u') {
$bcheckdb{$dbkey} = $dbchunk;
return;
}
print "\n(I)gnore / (Q)uit / (U)pdate: ";
}
}
sub diff_chunk($$)
{
my ($oldl, $newl) = @_;
my @old = split /^/m, $oldl;
my @new = split /^/m, $newl;
my $haschanges = 0;
my $max = $#old > $#new ? $#old : $#new;
die "whoops. key different" if ($old[0] ne $new[0]);
if ($#old != $#new) {
warn ("Structural difference.\n");
print @old;
print "-----------------------------------------------\n";
print @new;
$haschanges = 1;
return;
}
print $old[0];
my ($class) = ($old[0] =~ /^(?:Class |Vtable for )(\S+)/);
my $c = 1;
while ($c < $max) {
my ($o, $n) = ($old[$c], $new[$c]);
chomp $o;
chomp $n;
$c++;
next if ($o eq $n);
if(defined($class) and $n =~ /^(\d+\s+)\w+(::\S+\s*.*)$/) {
print "comparing >$n< against >$1$class$2<\n";
next if ($n eq "$1$class$2");
}
$haschanges = 1;
print "-$o\n+$n\n\n";
}
return $haschanges;
}
local $dblock = $ENV{"HOME"} . "/bcheck.lock";
my $dbfile = $ENV{"HOME"} . "/bcheck.db";
my $cdump = $ARGV[0];
die "file $cdump is not readable: $!" if (! -f $cdump);
# make sure the advisory lock exists
open(MYLOCK, ">$dblock");
print MYLOCK "";
flock MYLOCK, LOCK_EX;
tie %bcheckdb, 'DB_File', $dbfile;
my $chunk = "";
open (IN, "<$cdump") or die "cannot open $cdump: $!";
while (<IN>) {
chop;
s/0x[0-9a-fA-F]+/0x......../g;
$chunk .= $_ . "\n";
if(/^\s*$/) {
my @lines = split /^/m, $chunk;
my $key = $lines[0];
chomp $key;
if($key !~ /<anonymous struct>/ &&
$key !~ /<anonymous union>/) {
if(defined($bcheckdb{$key})) {
my $dbversion = $bcheckdb{$key};
if($dbversion ne $chunk) {
&ask_user($key, $chunk) if(&diff_chunk($dbversion, $chunk));
}
}
else {
$bcheckdb{$key} = $chunk;
print "NEW: $key\n";
}
}
$chunk = "";
next;
}
}
close(IN);
untie %bcheckdb;
flock MYLOCK, LOCK_UN;
close(MYLOCK);
exit 0;
| cdarkz/kscope-1.6-enhancement | admin/bcheck.pl | Perl | bsd-2-clause | 3,079 |
#!/usr/bin/perl
use strict;
use warnings;
use Getopt::Long;
use List::Util qw( max sum );
#-----------------------------------------------------------------------------
#----------------------------------- MAIN ------------------------------------
#-----------------------------------------------------------------------------
my $usage = "
Synopsis:
perl whamToBed.pl --append --file wham.vcf > wham.bed 2> wham.err
Description:
Converts a classified or unclassified WHAM VCF into the BED format.
If the WC and WP fields are not present in the VCF header whamToBed does not
provide the type of structural variant.
Options:
file - <STRING> - required - filename
-h,--help - <FLAG> - optional - print help statement and die
-a,--append - <FLAG> - optional - concatenate WHAM name to SV annotations (bcbio compatible)
-p,--paired - <FLAG> - optional - output paired breakpoints only (increase specificity)
-b,--buffer - <INT> - optional - add basepair to both sides of the SV call [0]
-s,--singletons - <INT> - optional - add basepair to only unpaired breakpoints [0]
Info:
paired - This option filters out sites were there was no split read support for
the other side of the breakpoint.
";
my ($help);
my $FILE;
my $APPEND = 0;
my $PAIRED = 0;
my $BUFFER = 0;
my $SINGLE_BUFFER = 0;
my $opt_success = GetOptions('help' => \$help,
'file=s' => \$FILE,
'buffer=s' => \$BUFFER,
'append' => \$APPEND,
'singletons=s' => \$SINGLE_BUFFER,
'paired' => \$PAIRED
);
die $usage if $help || ! $opt_success;
if(! defined $FILE){
print STDERR "\nFATAL: file was not provided.\n ";
die $usage;
}
my $ANNOTATED_FLAG = 0;
checkForAnnotations();
if($ANNOTATED_FLAG){
print STDERR "INFO: BED will include SV type and annotation score\n";
}
else{
print STDERR "INFO: BED will NOT include SV type and annotation score\n";
print STDERR " Classify WHAM ouput to get these features in the bed\n";
}
processLines();
#-----------------------------------------------------------------------------
#-------------------------------- SUBROUTINES --------------------------------
#-----------------------------------------------------------------------------
#Checking if the WHAM vcf has been annotated by looking for the WC field in
#the VCF file.
sub checkForAnnotations {
open(my $FH, "<", $FILE) || die "FATAL: could not open $FILE for reading\n";
my $headerFlag = 1;
while(my $line = <$FH>){
if($line =~ /ID=WC/){
$ANNOTATED_FLAG = 1;
last;
}
if($line !~ /^#/){
last;
}
}
close $FH;
}
#-----------------------------------------------------------------------------
sub processLines{
open(my $FH, "<", $FILE) || die "FATAL: could not open $FILE for reading\n";
VCF: while(my $line = <$FH>){
chomp $line;
next VCF if $line =~ /^#/;
my @vcfLine = split /\t/, $line;
my %info = map{ split /=|;/} $vcfLine[7];
my $startPos = $vcfLine[1] - 1 ;
my $endPos = $vcfLine[1] - 1 ;
if($info{'BE'} ne '.'){
$startPos -= $BUFFER;
$endPos += $BUFFER;
}
if($info{"BE"} eq '.'){
next VCF if $PAIRED;
$startPos -= $SINGLE_BUFFER;
$endPos += $SINGLE_BUFFER;
}
else{
my @end = split /,/, $info{BE};
if($end[0] ne $vcfLine[0]){
print STDERR "WARNING: translocations cannot be represented
in bed, skipping: $line\n";
next VCF;
}
else{
$endPos = $end[1];
}
}
if($startPos > $endPos){
my $temp = $startPos;
$startPos = $endPos;
$endPos = $temp;
}
my $bedLine = "$vcfLine[0]\t$startPos";
$bedLine .= "\t$endPos";
if($ANNOTATED_FLAG){
if($APPEND){
$bedLine .= "\twham\_$info{WC}";
}
else{
$bedLine .= "\t$info{WC}";
}
my @probs = split /,/, $info{WP};
my $maxP = max @probs;
my $sumP = sum @probs;
my $normP = $maxP / $sumP;
$bedLine .= "\t$normP";
}
else{
$bedLine .= "\t$vcfLine[0]:$startPos:$endPos\t\.";
}
print "$bedLine\n";
}
close $FH;
}
| zeeev/wham | utils/whamToBed.pl | Perl | mit | 4,158 |
# !!!!!!! DO NOT EDIT THIS FILE !!!!!!!
# This file is machine-generated by lib/unicore/mktables from the Unicode
# database, Version 6.1.0. Any changes made here will be lost!
# !!!!!!! INTERNAL PERL USE ONLY !!!!!!!
# This file is for internal use by core Perl only. The format and even the
# name or existence of this file are subject to change without notice. Don't
# use it directly.
return <<'END';
10280 1029C
END
| efortuna/AndroidSDKClone | ndk_experimental/prebuilt/linux-x86_64/lib/perl5/5.16.2/unicore/lib/Scx/Lyci.pl | Perl | apache-2.0 | 435 |
########################################################################
# Bio::KBase::ObjectAPI::KBaseExpression::DB::RNASeqDifferentialExpressionFile - This is the moose object corresponding to the KBaseExpression.RNASeqDifferentialExpressionFile object
# Authors: Christopher Henry, Scott Devoid, Paul Frybarger
# Contact email: chenry@mcs.anl.gov
# Development location: Mathematics and Computer Science Division, Argonne National Lab
########################################################################
package Bio::KBase::ObjectAPI::KBaseExpression::DB::RNASeqDifferentialExpressionFile;
use Bio::KBase::ObjectAPI::BaseObject;
use Moose;
use namespace::autoclean;
extends 'Bio::KBase::ObjectAPI::BaseObject';
# PARENT:
has parent => (is => 'rw', isa => 'Ref', weak_ref => 1, type => 'parent', metaclass => 'Typed');
# ATTRIBUTES:
has uuid => (is => 'rw', lazy => 1, isa => 'Str', type => 'msdata', metaclass => 'Typed',builder => '_build_uuid');
has _reference => (is => 'rw', lazy => 1, isa => 'Str', type => 'msdata', metaclass => 'Typed',builder => '_build_reference');
has shock_ref => (is => 'rw', isa => 'Str', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
has name => (is => 'rw', isa => 'Str', printOrder => '-1', type => 'attribute', metaclass => 'Typed');
# LINKS:
has shock => (is => 'rw', type => 'link(,,shock_ref)', metaclass => 'Typed', lazy => 1, builder => '_build_shock', clearer => 'clear_shock', isa => 'Ref', weak_ref => 1);
# BUILDERS:
sub _build_shock {
my ($self) = @_;
return $self->getLinkedObject($self->shock_ref());
}
# CONSTANTS:
sub _type { return 'KBaseExpression.RNASeqDifferentialExpressionFile'; }
sub _module { return 'KBaseExpression'; }
sub _class { return 'RNASeqDifferentialExpressionFile'; }
sub _top { return 0; }
my $attributes = [
{
'req' => 0,
'printOrder' => -1,
'name' => 'shock_ref',
'type' => 'Str',
'perm' => 'rw'
},
{
'req' => 0,
'printOrder' => -1,
'name' => 'name',
'type' => 'Str',
'perm' => 'rw'
}
];
my $attribute_map = {shock_ref => 0, name => 1};
sub _attributes {
my ($self, $key) = @_;
if (defined($key)) {
my $ind = $attribute_map->{$key};
if (defined($ind)) {
return $attributes->[$ind];
} else {
return;
}
} else {
return $attributes;
}
}
my $links = [
{
'parent' => undef,
'name' => 'shock',
'attribute' => 'shock_ref',
'clearer' => 'clear_shock',
'class' => undef,
'method' => undef,
'module' => undef,
'field' => undef
}
];
my $link_map = {shock => 0};
sub _links {
my ($self, $key) = @_;
if (defined($key)) {
my $ind = $link_map->{$key};
if (defined($ind)) {
return $links->[$ind];
} else {
return;
}
} else {
return $links;
}
}
my $subobjects = [];
my $subobject_map = {};
sub _subobjects {
my ($self, $key) = @_;
if (defined($key)) {
my $ind = $subobject_map->{$key};
if (defined($ind)) {
return $subobjects->[$ind];
} else {
return;
}
} else {
return $subobjects;
}
}
__PACKAGE__->meta->make_immutable;
1;
| kbase/KBaseFBAModeling | lib/Bio/KBase/ObjectAPI/KBaseExpression/DB/RNASeqDifferentialExpressionFile.pm | Perl | mit | 3,334 |
package DBM::Deep::Storage;
use 5.008_004;
use strict;
use warnings FATAL => 'all';
=head2 flush()
This flushes the filehandle. This takes no parameters and returns nothing.
=cut
sub flush { die "flush must be implemented in a child class" }
=head2 is_writable()
This takes no parameters. It returns a boolean saying if this filehandle is
writable.
Taken from L<http://www.perlmonks.org/?node_id=691054/>.
=cut
sub is_writable { die "is_writable must be implemented in a child class" }
=head1 LOCKING
This is where the actual locking of the storage medium is performed.
Nested locking is supported.
B<NOTE>: It is unclear what will happen if a read lock is taken, then
a write lock is taken as a nested lock, then the write lock is released.
Currently, the only locking method supported is flock(1). This is a
whole-file lock. In the future, more granular locking may be supported.
The API for that is unclear right now.
The following methods manage the locking status. In all cases, they take
a L<DBM::Deep> object and returns nothing.
=over 4
=item * lock_exclusive( $obj )
Take a lock usable for writing.
=item * lock_shared( $obj )
Take a lock usable for reading.
=item * unlock( $obj )
Releases the last lock taken. If this is the outermost lock, then the
object is actually unlocked.
=back
=cut
sub lock_exclusive { die "lock_exclusive must be implemented in a child class" }
sub lock_shared { die "lock_shared must be implemented in a child class" }
sub unlock { die "unlock must be implemented in a child class" }
1;
__END__
| leighpauls/k2cro4 | third_party/perl/perl/vendor/lib/DBM/Deep/Storage.pm | Perl | bsd-3-clause | 1,560 |
#-----------------------------------------------------------
# ctrlpnl.pl
# Get Control Panel info from the Software hive
#
# Change history:
# 20100116 - created
#
# References:
# http://support.microsoft.com/kb/292463
# http://learning.infocollections.com/ebook%202/Computer/
# Operating%20Systems/Windows/Windows.XP.Hacks/
# 0596005113_winxphks-chp-2-sect-3.html
# http://msdn.microsoft.com/en-us/library/cc144195%28VS.85%29.aspx
#
# Notes:
#
# copyright 2010 Quantum Analytics Research, LLC
#-----------------------------------------------------------
package ctrlpnl;
use strict;
my %config = (hive => "Software",
osmask => 22,
hasShortDescr => 1,
hasDescr => 0,
hasRefs => 0,
version => 20100116);
sub getConfig{return %config}
sub getShortDescr {
return "Get Control Panel info from Software hive";
}
sub getDescr{}
sub getRefs {}
sub getHive {return $config{hive};}
sub getVersion {return $config{version};}
my $VERSION = getVersion();
my %comp;
sub pluginmain {
my $class = shift;
my $hive = shift;
::logMsg("Launching ctrlpnl v.".$VERSION);
::rptMsg("ctrlpnl v.".$VERSION); # banner
::rptMsg("(".$config{hive}.") ".getShortDescr()."\n"); # banner
my $reg = Parse::Win32Registry->new($hive);
my $root_key = $reg->get_root_key;
my $key_path = "Microsoft\\Windows\\CurrentVersion\\Control Panel";
my $key;
if ($key = $root_key->get_subkey($key_path)) {
::rptMsg("");
::rptMsg($key_path);
::rptMsg("");
# Cpls section
if (my $cpl = $key->get_subkey("Cpls")) {
my @vals = $cpl->get_list_of_values();
if (scalar @vals > 0) {
::rptMsg("Cpls key");
foreach my $v (@vals) {
my $str = sprintf "%-10s %-50s",$v->get_name(),$v->get_data();
::rptMsg($str);
}
::rptMsg("");
}
else {
::rptMsg("Cpls key has no values.");
}
}
else {
::rptMsg("Cpls key not found.");
}
# don't load section
# The 'don't load' key prevents applets from being loaded
# Be sure to check the user's don't load key, as well
if (my $cpl = $key->get_subkey("don't load")) {
my @vals = $cpl->get_list_of_values();
if (scalar @vals > 0) {
::rptMsg("don't load key");
foreach my $v (@vals) {
::rptMsg($v->get_name());
}
::rptMsg("");
}
else {
::rptMsg("don't load key has no values.");
}
}
else {
::rptMsg("don't load key not found.");
}
# Extended Properties section
if (my $ext = $key->get_subkey("Extended Properties")) {
my @sk = $ext->get_list_of_subkeys();
if (scalar @sk > 0) {
foreach my $s (@sk) {
my @vals = $s->get_list_of_values();
if (scalar @vals > 0) {
::rptMsg($s->get_name()." [".gmtime($s->get_timestamp)." UTC]");
# Ref: http://support.microsoft.com/kb/292463
my %cat = (0x00000000 => "Other Control Panel Options",
0x00000001 => "Appearance and Themes",
0x00000002 => "Printers and Other Hardware",
0x00000003 => "Network and Internet Connections",
0x00000004 => "Sounds, Speech, and Audio Devices",
0x00000005 => "Performance and Maintenance",
0x00000006 => "Date, Time, Language, and Regional Options",
0x00000007 => "Accessibility Options",
0xFFFFFFFF => "No Category");
my %prop;
foreach my $v (@vals) {
push(@{$prop{$v->get_data()}},$v->get_name());
}
foreach my $t (sort {$a <=> $b} keys %prop) {
(exists $cat{$t}) ? (::rptMsg($cat{$t})) : (::rptMsg("Category ".$t));
foreach my $i (@{$prop{$t}}) {
::rptMsg(" ".$i);
}
::rptMsg("");
}
}
}
::rptMsg("");
}
else {
::rptMsg("Extended Properties key has no subkeys.");
}
}
else {
::rptMsg("Extended Properties key not found.");
}
}
else {
::rptMsg($key_path." not found.");
}
}
1; | millmanorama/autopsy | thirdparty/rr-full/plugins/ctrlpnl.pl | Perl | apache-2.0 | 4,046 |
use strict;
use CXGN::Page;
use CXGN::Page::FormattingHelpers qw / page_title_html /;
my $page = CXGN::Page->new();
$page->header();
my $title = page_title_html("Feature: The De Koeyer Lab");
my $photo_dir = "/static_content/community/feature";
print <<HTML;
$title
<center><img src="$photo_dir/200706-01.jpg" alt="De Koeyer Lab Photo" />
<p class="tinytype">
Back row, left to right: R. Griffiths, L. Nolan, M. Lagüe, D. De Koeyer, S. Whitney<br />
Front row, left to right: K. Douglass, Y. Song.
</p>
</center>
<p>Our lab is part of the Agriculture and Agri-Food Canada (AAFC) Potato Research Centre in Fredericton, New Brunswick, Canada. Our on-going research focuses on the application of genomics and bioinformatics to potato improvement.</p>
<p>Our bioinformatics resources were initially established through the <a href="http://www.cpgp.ca">Canadian Potato Genome Project</a> (principle investigators Sharon Regan and Barry Flinn) funded by Genome Canada, Atlantic Canada Opportunities Agency, and the New Brunswick Department of Agriculture and Aquaculture. We established a bioinformatics pipeline for the analysis of potato Expressed Sequence Tags (ESTs) generated by the Canadian Potato Genome Project, the Institute for Genomics Research , and other international collaborators. We were an integral part of the Potato Oligo Chip Initiative (POCI) consortium led by Dr. Christian Bachem, Wageningen University, which developed a 44K 60-mer Agilent microarray chip. This chip is being used by consortium members in several countries for gene expression profiling experiments. The Canadian Potato Genome Project also developed a large collection of activation-tagged potato lines.</p>
<p>Our main research interest currently is the development of bioinformatics and molecular tools for polymorphism discovery. We have developed a pipeline for the identification of single nucleotide polymorphisms (SNP) and haplotypes within potato unigenes. Concurrently with the bioinformatics work on polymorphism discovery, we are adopting high-resolution DNA-melting with an Idaho technology Lightscanner instrument for variant scanning and genotyping. This approach has proven successful for identifying distinct genotypes in homozygous lines, differentiating heterozygous and homozygous lines, and determining the genotypes of both diploid and tetraploid lines. This technology has the potential to replace conventional gel electrophoresis marker assays.</p>
<table width="100%" cellpadding="10">
<tr><td style="text-align:center; vertical-align:middle">
<img src="$photo_dir/200706-02.jpg" alt="shifted melting curves"><br />
<p class="tinytype">
<strong>Figure 1.</strong> Lightscanner melting curves of the PCR products of four tetraploid potato varieties. The red lines represent 3 replicates samples of a homozygote; the grey lines two different varieties with the same heterozygous genotype (3 samples each); and the blue lines represent 3 samples of another heterozygous genotype.
</p>
</td></tr>
</table>
<p>We also participate in collaborative research on genetic mapping of common scab (in collaboration with Claudia Goyer), after-cooking darkening (Gefu-Wang-Pruski; Nova Scotia Agricultural College), cold-sweetening (Xiu-Qing Li), and Colorado potato beetle (Yvan Pelletier) resistance genes and quantitative trait loci using simple-sequence repeat and SNP markers. We have initiated pilot marker-assisted selection projects for PVY resistance, late blight resistance, <em>Verticillium</em> wilt resistance, and tuber-quality traits in collaboration with AAFC potato breeders. Our lab is also involved in microarray data analyses and other aspects of bioinformatics for functional genomics research.</p>
<p>The final and more applied aspects of our work involve developing informatics tools for the potato breeding program and insect resistant germplasm. Potato breeders, like genomics researchers, generate vast quantities of data. Information management systems for breeding programs can improve efficiency as well as contribute to improved decisions related to advancing lines or selecting parents. Mining of breeding data can benefit both breeders and genomics researchers and the development of tools to integrate breeding and genomics data is a long-term goal of our research program.</p>
<p>Wild <em>Solanum</em> species hold a wealth of un-tapped beneficial diversity. We have been involved in a small germplasm enhancement program to introgress insect resistance into cultivated potato germplasm. A success story to date has been the use of <em>S. oplocense</em> as a source of resistance to Colorado potato beetle. Work is continuing to improve the adaptation of this material to the long-day conditions of Canada; however, unexpectedly, the BC1 generation has exceptional processing quality characteristics.</p>
<table width="100%" cellpadding="10">
<tr><td style="text-align:center; vertical-align:middle">
<img src="$photo_dir/200706-03.jpg" alt="potato chips following"><br />
<img src="$photo_dir/200706-04.jpg" alt="storage and defoliation"><br />
<p class="tinytype">
<strong>Figure 2.</strong> Potato chips following 4 months of 7°C storage and defoliation in field by CPB. The left photos represent resistant BC<sub>1</sub> lines derived from <em>S. oplocense</em>, and the right photos represent the susceptible <em>S. tuberosum</em> variety, Shepody.
</p>
</td></tr>
</table>
<p>The research described above is part of a National AAFC project that I am principal investigator for, titled: “Genomics approaches to potato improvement”. Collaborators on the project include Drs. H.W. (Bud) Platt (Charlottetown); Claudia Goyer, Xiu-Qing Li, Agnes Murphy, Xianzhou Nie, Helen Tai, Richard Tarn, and Bernie Zebarth (Fredericton); Qiang Liu (Guelph); and Benoit Bizimungu, Qin Chen, and Larry Kawchuk (Lethbridge). External funding has been provided by the Canada-New Brunswick Embracing Innovation in Agriculture program (New Brunswick Department of Agriculture and Aquaculture) for another current project titled “Genomics-assisted breeding resources for potato improvement”. In collaboration with Dr. Yvan Pelletier, funding for the development of insect resistance germplasm has been provided by the potato industry from Canada and France.</p>
<div style='clear:all;width:100%'> </div>
<p>
<b><u>Contact Information</u></b><br />
Dr. David De Koeyer<br />
Research Scientist/Chercheur<br />
Agriculture and Agri-Food Canada/Agriculture et Agroalimentaire Canada<br />
Telephone/Téléphone: 506-452-4885<br />
Facsimile/Télécopieur: 506-452-3316<br />
P.O. Box 20280/C.P. 20280<br />
850 Lincoln Road/ 850, chemin Lincoln<br />
Fredericton, New Brunswick/Fredericton (Nouveau Brunswick)<br />
CANADA<br />
E3B 4Z7<br />
email: dekoeyerd\@agr.gc.ca
</p>
<img src="$photo_dir/200706-05.jpg" alt="Agriculture and Agri-Food Canada"><br />
<p>
<b><u>Selected Publications</u></b>
<ul style="list-style:none">
<li>X.-Q. Li, R. Griffiths, M. Lagüe, D. DeKoeyer, C. Rothwell, M. Haroon, B. Stevens, C. Xu, V. Gustafson, M. Bonierbale, S. Regan, and B. Flinn. 2007. EST sequencing and analysis from cold-stored and reconditioned potato tubers. Acta Horticulturae (in press).</li>
<li>Pelletier, Y., C. Clark, and D. De Koeyer. 2007. Level and genetic variability of resistance to the Colorado potato beetle (Leptinotarsa decemlineata (Say)) in wild Solanum species. Am. J. Potato Res. (in press).</li>
<li>Gustafson, V., S. Mallubhotla, J. MacDonnell, M. Sanyal-Bagchi, B. Chakravarty, G. Wang-Pruski, C. Rothwell, P. Audy, D. De Koeyer, M. Siahbazi, B. Flinn, and S. Regan. 2006. Transformation and plant regeneration from leaf explants of Solanum tuberosum L. cv. 'Shepody'. Plant Cell Tissue Organ Cult. 85: 361-366.</li>
<li>Regan, S., V. Gustafson, S. Mallubhotla, B. Chakravarty, M. Bagchi, M. Siahbazi, C. Rothwell, R. Sardana, C. Goyer, P. Audy, X.-Q. Li, G. Wang-Pruski, , D. De Koeyer, and B. Flinn. 2005. Finding the perfect potato: using functional genomics to improve disease resistance and tuber quality traits. Can. J. Plant Path. 28: s247-s255.</li>
<li>Flinn, B., C. Rothwell, R. Griffiths, M. Lague, D. De Koeyer, R. Sardana, P. Audy, C. Goyer, X.-Q. Li, G. Wang-Pruski, and S. Regan. 2005. Potato Expressed Sequence Tag Generation and Analyses Using Standard and Unique cDNA Libraries. Plant Mol. Biol. 59: 405-431.</li>
<li>De Koeyer, D.L., N.A. Tinker, C.P. Wight, J. Deyl, V.D. Burrows, L.S. O’Donoughue, A. Lybaert, S.J. Molnar, K.C. Armstrong, G. Fedak, D.M. Wesenberg, B.G. Rossnagel, A.R. McElroy. 2004. A molecular linkage map with associated QTLs from a hulless x covered spring oat population. Theor. Appl. Genet. 108:1285-1298.</li>
</ul></p>
HTML
$page->footer();
| solgenomics/sgn | cgi-bin/community/feature/200706.pl | Perl | mit | 8,844 |
package Codex::Collection::SampleCollection;
use Mouse;
extends 'Codex::Collection';
has '+items' => (
isa => 'ArrayRef[Codex::Sample]'
);
sub completed_samples {
my $self = shift;
my @completed;
foreach my $sample (@{ $self->items }) {
if($sample->upload_status eq "Successfully uploaded.") {
CORE::push(@completed, $sample);
}
}
@completed ? return \@completed : return -1;
}
sub processing_samples {
my $self = shift;
my @processing;
foreach my $sample (@{ $self->items }) {
if($sample->upload_status eq "Uploading and processing...") {
CORE::push(@processing, $sample);
}
}
@processing ? return \@processing : return -1;
}
__PACKAGE__->meta->make_immutable();
1; | mbiokyle29/codex | Codex/Collection/SampleCollection.pm | Perl | mit | 697 |
#!/usr/bin/env perl
use strict;
use warnings;
use File::Basename; # dirname()
# Use current directory to find modules
use FindBin;
use lib $FindBin::Bin;
use McCortexScripts;
use McCortexLinks;
sub print_usage
{
for my $err (@_) { print STDERR "Error: $err\n"; }
print STDERR "" .
"Usage: $0 [--kmer <k>] <graph.ctx> <in.ctp>\n" .
" Interweave link file with graph file in human readable text format.\n";
exit(-1);
}
my $k = 31;
while(@ARGV > 1 && $ARGV[0] =~ /^-./) {
if($ARGV[0] =~ /^-?-k(mer)?$/i) {
my $arg = shift;
$k = shift;
if(!defined($k) || $k !~ /^\d+$/) {
print_usage("$arg <k> requires an argument");
}
}
else { print_usage("Unknown option '$ARGV[0]'"); }
}
my $mccortex = dirname(__FILE__)."/../bin/mccortex";
if(!(-x $mccortex)) { die("Have you compiled McCortex with `make`?"); }
if(@ARGV != 2) { print_usage(); }
my ($ctx_path, $ctp_path) = @ARGV;
my $ctp_fh = open_file($ctp_path);
my $ctp_file = new McCortexLinks($ctp_fh, $ctp_path);
# Pipe graph through McCortex
# graph file reader command
my $cmdline = "$mccortex $k view --quiet --kmers $ctx_path";
my $in;
open($in, '-|', $cmdline) or die $!;
my %kmerlinks = (); # kmer->path string
# Read paths
while(1)
{
my ($kmer, @links) = $ctp_file->next();
if(!defined($kmer)) { last; }
if(defined($kmerlinks{$kmer})) { die("Duplicate kmer: $kmer"); }
$kmerlinks{$kmer} = ctp_link_to_str(@links);
}
close($ctp_fh);
# Read graph file
while(defined(my $line = <$in>))
{
if($line =~ /^([ACGT]+)/) {
my $kmer = $1;
print $line;
if(defined($kmerlinks{$kmer})) {
print $kmerlinks{$kmer};
}
} else {
die("Bad line: '$line'");
}
}
close($in);
| mcveanlab/mccortex | scripts/perl/links-view.pl | Perl | mit | 1,692 |
package ProteomeComparison::ProteomeComparisonClient;
use JSON::RPC::Client;
use POSIX;
use strict;
use Data::Dumper;
use URI;
use Bio::KBase::Exceptions;
my $get_time = sub { time, 0 };
eval {
require Time::HiRes;
$get_time = sub { Time::HiRes::gettimeofday() };
};
use Bio::KBase::AuthToken;
# Client version should match Impl version
# This is a Semantic Version number,
# http://semver.org
our $VERSION = "0.1.0";
=head1 NAME
ProteomeComparison::ProteomeComparisonClient
=head1 DESCRIPTION
A KBase module: ProteomeComparison
=cut
sub new
{
my($class, $url, @args) = @_;
my $self = {
client => ProteomeComparison::ProteomeComparisonClient::RpcClient->new,
url => $url,
headers => [],
};
chomp($self->{hostname} = `hostname`);
$self->{hostname} ||= 'unknown-host';
#
# Set up for propagating KBRPC_TAG and KBRPC_METADATA environment variables through
# to invoked services. If these values are not set, we create a new tag
# and a metadata field with basic information about the invoking script.
#
if ($ENV{KBRPC_TAG})
{
$self->{kbrpc_tag} = $ENV{KBRPC_TAG};
}
else
{
my ($t, $us) = &$get_time();
$us = sprintf("%06d", $us);
my $ts = strftime("%Y-%m-%dT%H:%M:%S.${us}Z", gmtime $t);
$self->{kbrpc_tag} = "C:$0:$self->{hostname}:$$:$ts";
}
push(@{$self->{headers}}, 'Kbrpc-Tag', $self->{kbrpc_tag});
if ($ENV{KBRPC_METADATA})
{
$self->{kbrpc_metadata} = $ENV{KBRPC_METADATA};
push(@{$self->{headers}}, 'Kbrpc-Metadata', $self->{kbrpc_metadata});
}
if ($ENV{KBRPC_ERROR_DEST})
{
$self->{kbrpc_error_dest} = $ENV{KBRPC_ERROR_DEST};
push(@{$self->{headers}}, 'Kbrpc-Errordest', $self->{kbrpc_error_dest});
}
#
# This module requires authentication.
#
# We create an auth token, passing through the arguments that we were (hopefully) given.
{
my $token = Bio::KBase::AuthToken->new(@args);
if (!$token->error_message)
{
$self->{token} = $token->token;
$self->{client}->{token} = $token->token;
}
else
{
#
# All methods in this module require authentication. In this case, if we
# don't have a token, we can't continue.
#
die "Authentication failed: " . $token->error_message;
}
}
my $ua = $self->{client}->ua;
my $timeout = $ENV{CDMI_TIMEOUT} || (30 * 60);
$ua->timeout($timeout);
bless $self, $class;
# $self->_validate_version();
return $self;
}
=head2 compare_proteomes
$return = $obj->compare_proteomes($input)
=over 4
=item Parameter and return types
=begin html
<pre>
$input is a ProteomeComparison.ProteomeComparisonParams
$return is a ProteomeComparison.ProteomeComparisonResult
ProteomeComparisonParams is a reference to a hash where the following keys are defined:
genome1ws has a value which is a string
genome1id has a value which is a string
genome2ws has a value which is a string
genome2id has a value which is a string
sub_bbh_percent has a value which is a float
max_evalue has a value which is a string
output_ws has a value which is a string
output_id has a value which is a string
ProteomeComparisonResult is a reference to a hash where the following keys are defined:
report_name has a value which is a string
report_ref has a value which is a string
pc_ref has a value which is a string
</pre>
=end html
=begin text
$input is a ProteomeComparison.ProteomeComparisonParams
$return is a ProteomeComparison.ProteomeComparisonResult
ProteomeComparisonParams is a reference to a hash where the following keys are defined:
genome1ws has a value which is a string
genome1id has a value which is a string
genome2ws has a value which is a string
genome2id has a value which is a string
sub_bbh_percent has a value which is a float
max_evalue has a value which is a string
output_ws has a value which is a string
output_id has a value which is a string
ProteomeComparisonResult is a reference to a hash where the following keys are defined:
report_name has a value which is a string
report_ref has a value which is a string
pc_ref has a value which is a string
=end text
=item Description
=back
=cut
sub compare_proteomes
{
my($self, @args) = @_;
# Authentication: required
if ((my $n = @args) != 1)
{
Bio::KBase::Exceptions::ArgumentValidationError->throw(error =>
"Invalid argument count for function compare_proteomes (received $n, expecting 1)");
}
{
my($input) = @args;
my @_bad_arguments;
(ref($input) eq 'HASH') or push(@_bad_arguments, "Invalid type for argument 1 \"input\" (value was \"$input\")");
if (@_bad_arguments) {
my $msg = "Invalid arguments passed to compare_proteomes:\n" . join("", map { "\t$_\n" } @_bad_arguments);
Bio::KBase::Exceptions::ArgumentValidationError->throw(error => $msg,
method_name => 'compare_proteomes');
}
}
my $result = $self->{client}->call($self->{url}, $self->{headers}, {
method => "ProteomeComparison.compare_proteomes",
params => \@args,
});
if ($result) {
if ($result->is_error) {
Bio::KBase::Exceptions::JSONRPC->throw(error => $result->error_message,
code => $result->content->{error}->{code},
method_name => 'compare_proteomes',
data => $result->content->{error}->{error} # JSON::RPC::ReturnObject only supports JSONRPC 1.1 or 1.O
);
} else {
return wantarray ? @{$result->result} : $result->result->[0];
}
} else {
Bio::KBase::Exceptions::HTTP->throw(error => "Error invoking method compare_proteomes",
status_line => $self->{client}->status_line,
method_name => 'compare_proteomes',
);
}
}
sub version {
my ($self) = @_;
my $result = $self->{client}->call($self->{url}, $self->{headers}, {
method => "ProteomeComparison.version",
params => [],
});
if ($result) {
if ($result->is_error) {
Bio::KBase::Exceptions::JSONRPC->throw(
error => $result->error_message,
code => $result->content->{code},
method_name => 'compare_proteomes',
);
} else {
return wantarray ? @{$result->result} : $result->result->[0];
}
} else {
Bio::KBase::Exceptions::HTTP->throw(
error => "Error invoking method compare_proteomes",
status_line => $self->{client}->status_line,
method_name => 'compare_proteomes',
);
}
}
sub _validate_version {
my ($self) = @_;
my $svr_version = $self->version();
my $client_version = $VERSION;
my ($cMajor, $cMinor) = split(/\./, $client_version);
my ($sMajor, $sMinor) = split(/\./, $svr_version);
if ($sMajor != $cMajor) {
Bio::KBase::Exceptions::ClientServerIncompatible->throw(
error => "Major version numbers differ.",
server_version => $svr_version,
client_version => $client_version
);
}
if ($sMinor < $cMinor) {
Bio::KBase::Exceptions::ClientServerIncompatible->throw(
error => "Client minor version greater than Server minor version.",
server_version => $svr_version,
client_version => $client_version
);
}
if ($sMinor > $cMinor) {
warn "New client version available for ProteomeComparison::ProteomeComparisonClient\n";
}
if ($sMajor == 0) {
warn "ProteomeComparison::ProteomeComparisonClient version is $svr_version. API subject to change.\n";
}
}
=head1 TYPES
=head2 ProteomeComparisonParams
=over 4
=item Description
string genome1ws - workspace of genome1
string genome1id - id of genome1
string genome2ws - workspace of genome2
string genome2id - id of genome2
float sub_bbh_percent - optional parameter, minimum percent of bit score compared to best bit score, default is 90
string max_evalue - optional parameter, maximum evalue, default is 1e-10
string output_ws - workspace of output object
string output_id - future id of output object
=item Definition
=begin html
<pre>
a reference to a hash where the following keys are defined:
genome1ws has a value which is a string
genome1id has a value which is a string
genome2ws has a value which is a string
genome2id has a value which is a string
sub_bbh_percent has a value which is a float
max_evalue has a value which is a string
output_ws has a value which is a string
output_id has a value which is a string
</pre>
=end html
=begin text
a reference to a hash where the following keys are defined:
genome1ws has a value which is a string
genome1id has a value which is a string
genome2ws has a value which is a string
genome2id has a value which is a string
sub_bbh_percent has a value which is a float
max_evalue has a value which is a string
output_ws has a value which is a string
output_id has a value which is a string
=end text
=back
=head2 ProteomeComparisonResult
=over 4
=item Definition
=begin html
<pre>
a reference to a hash where the following keys are defined:
report_name has a value which is a string
report_ref has a value which is a string
pc_ref has a value which is a string
</pre>
=end html
=begin text
a reference to a hash where the following keys are defined:
report_name has a value which is a string
report_ref has a value which is a string
pc_ref has a value which is a string
=end text
=back
=cut
package ProteomeComparison::ProteomeComparisonClient::RpcClient;
use base 'JSON::RPC::Client';
use POSIX;
use strict;
#
# Override JSON::RPC::Client::call because it doesn't handle error returns properly.
#
sub call {
my ($self, $uri, $headers, $obj) = @_;
my $result;
{
if ($uri =~ /\?/) {
$result = $self->_get($uri);
}
else {
Carp::croak "not hashref." unless (ref $obj eq 'HASH');
$result = $self->_post($uri, $headers, $obj);
}
}
my $service = $obj->{method} =~ /^system\./ if ( $obj );
$self->status_line($result->status_line);
if ($result->is_success) {
return unless($result->content); # notification?
if ($service) {
return JSON::RPC::ServiceObject->new($result, $self->json);
}
return JSON::RPC::ReturnObject->new($result, $self->json);
}
elsif ($result->content_type eq 'application/json')
{
return JSON::RPC::ReturnObject->new($result, $self->json);
}
else {
return;
}
}
sub _post {
my ($self, $uri, $headers, $obj) = @_;
my $json = $self->json;
$obj->{version} ||= $self->{version} || '1.1';
if ($obj->{version} eq '1.0') {
delete $obj->{version};
if (exists $obj->{id}) {
$self->id($obj->{id}) if ($obj->{id}); # if undef, it is notification.
}
else {
$obj->{id} = $self->id || ($self->id('JSON::RPC::Client'));
}
}
else {
# $obj->{id} = $self->id if (defined $self->id);
# Assign a random number to the id if one hasn't been set
$obj->{id} = (defined $self->id) ? $self->id : substr(rand(),2);
}
my $content = $json->encode($obj);
$self->ua->post(
$uri,
Content_Type => $self->{content_type},
Content => $content,
Accept => 'application/json',
@$headers,
($self->{token} ? (Authorization => $self->{token}) : ()),
);
}
1;
| mdejongh/ProteomeComparison | lib/ProteomeComparison/ProteomeComparisonClient.pm | Perl | mit | 11,433 |
#!/usr/bin/env perl
use 5.010;
use strict;
use warnings;
use Bio::Tools::GFF;
use File::Find;
use Cwd;
use Sort::Naturally;
use List::UtilsBy qw(nsort_by);
my $gff = shift;
my $cwd = getcwd();
my @files;
find( sub { push @files, $File::Find::name if -f and /tidy_Ha\d+\.gff3$/ }, $cwd );
my ($expct, $pepct, $mrnact, $cdsct, $genect, $threeprutrct, $fiveprutrct,
$headct, $ingene) = (0, 0, 0, 0, 0, 0, 0, 1, 1);
my ($has_mrna, $has_cds) = (0, 0);
for my $file (nsort @files) {
my ($header, $features) = collect_gff_features($gff, $file);
say $header if $headct;
for my $id (nsort_by { m/\w+\.(\d+)\.\d+/ and $1 } keys %$features) {
my ($parentid, $start, $stop) = split /\./, $id;
$genect++ if $parentid =~ /gene/;
$pepct++ if $parentid =~ /protein_match/;
$expct++ if $parentid =~ /expressed_sequence_match/;
for my $parent (keys %{$features->{$id}}) {
my @parent_feats = split /\|\|/, $parent;
$parent_feats[8] =
_format_parent_attribute($parent_feats[8], $genect, $pepct, $expct);
say join "\t", @parent_feats;
($mrnact, $cdsct, $threeprutrct, $fiveprutrct)
= _check_part_features(\@{$features->{$id}{$parent}}, $mrnact, $cdsct, $threeprutrct, $fiveprutrct);
for my $feat (@{$features->{$id}{$parent}}) {
my @part_feats = split /\|\|/, $feat;
$part_feats[8] =
_format_part_attribute($part_feats[2], $part_feats[8], $genect, $mrnact, $cdsct, $threeprutrct, $fiveprutrct);
say join "\t", @part_feats;
}
}
say "###";
}
$headct = 0;
}
sub collect_gff_features {
my ($gff, $file) = @_;
my $header;
open my $in, '<', $gff or die "\nERROR: Could not open file: $gff\n";
while (<$in>) {
chomp;
next if /^###$/;
if (/^##?\w+/) {
$header .= $_."\n";
}
else {
last;
}
}
close $in;
chomp $header;
my $gffio = Bio::Tools::GFF->new( -file => $file, -gff_version => 3 );
my ($start, $end, $region, $parent, %features);
FEATURE:
while (my $feature = $gffio->next_feature()) {
if ($feature->primary_tag =~ /protein_match|expressed_sequence_match|gene/) {
my @string = split /\t/, $feature->gff_string;
($region) = ($string[8] =~ /ID=?\s+?(protein_match\d+|expressed_sequence_match\d+|gene\d+)/);
($start, $end) = ($feature->start, $feature->end);
$parent = join "||", @string;
}
next FEATURE unless defined $start && defined $end;
if ($feature->primary_tag !~ /protein_match|expressed_sequence_match|gene/) {
if ($feature->start >= $start && $feature->end <= $end) {
push @{$features{$region.".".$start.".".$end}{$parent}},
join "||", split /\t/, $feature->gff_string;
}
}
}
return ($header, \%features);
}
sub _check_part_features {
my ($feats, $mrnact, $cdsct, $threeprutrct, $fiveprutrct) = @_;
my ($has_mrna, $has_cds, $has_thrprutr, $has_fiveprutr) = (0, 0);
for my $feat (@$feats) {
my @part_feats = split /\|\|/, $feat;
#$has_mrna = 1 if $part_feats[2] =~ /mRNA/;
$mrnact++ if $part_feats[2] =~ /mRNA/;
$has_cds = 1
if $part_feats[2] =~ /CDS/ && $part_feats[8] =~ /ID=?\s+?CDS/;
$has_thrprutr = 1
if $part_feats[2] =~ /three_prime_UTR/ && $part_feats[8] =~ /ID=?\s+?three_prime_UTR/;
$has_fiveprutr = 1
if $part_feats[2] =~ /five_prime_UTR/ && $part_feats[8] =~ /ID=?\s+?five_prime_UTR/;
}
#$mrnact++ if $has_mrna;
$cdsct++ if $has_cds;
$threeprutrct++ if $has_thrprutr;
$fiveprutrct++ if $has_fiveprutr;
return ($mrnact, $cdsct, $threeprutrct, $fiveprutrct);
}
sub _format_parent_attribute {
my ($str, $genect, $pepct, $expct) = @_;
$str =~ s/\s\;\s/\;/g;
$str =~ s/\s+/=/g;
$str =~ s/\s+$//;
$str =~ s/=$//;
$str =~ s/=\;/;/g;
$str =~ s/\"//g;
$str =~ s/gene\d+/gene$genect/
if $str =~ /gene/;
$str =~ s/protein_match\d+/protein_match$pepct/
if $str =~ /protein_match/;
$str =~ s/expressed_sequence_match\d+/expressed_sequence_match$expct/
if $str =~ /expressed_sequence_match/;
return $str;
}
sub _format_part_attribute {
my ($tag, $str, $genect, $mrnact, $cdsct, $threeprutrct, $fiveprutrct) = @_;
$str =~ s/\s\;\s/\;/g;
#$str =~ s/\s+/=/g;
$str =~ s/\s+$//;
$str =~ s/=$//;
$str =~ s/=\;/;/g;
$str =~ s/\"//g;
$str =~ s/gene\d+/gene$genect/
if $tag =~ /mRNA/;
$str =~ s/mRNA\d+/mRNA$mrnact/
if $tag =~ /mRNA|exon|three_prime_UTR|five_prime_UTR|CDS/;
$str =~ s/CDS\d+/CDS$cdsct/
if $tag =~ /CDS/;
$str =~ s/three_prime_UTR\d+/three_prime_UTR$threeprutrct/
if $tag =~ /three_prime_UTR/;
$str =~ s/three_prime_UTR\d+/five_prime_UTR$fiveprutrct/
if $tag =~ /five_prime_UTR/;
return $str;
}
| sestaton/sesbio | gene_annotation/gff_create_unique_id.pl | Perl | mit | 4,713 |
/* Part of Optic Planner interface for SWI-Prolog
Author: Andrew Dougherty, Douglas Miles
E-mail: andrewdo@frdcsa.org, logicmoo@gmail.com
WWW: https://github.com/logicmoo/planner_external_api
Copyright (C): 2017, Process Design Center, Breda, The Netherlands.
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/*
":typing" | ":strips" | ":equality" | ":fluents" | ":durative-actions" | ":duration-inequalities" | ":numeric-fluents" | ":action-costs" | ":adl" | ":negative-preconditions" | ":disjunctive-preconditions" | ":existential-preconditions" | "universal-preconditions" | "quantified-preconditions" | ":conditional-effects" | ":timed-initial-literals" | ":preferences" | ":constraints"
":domain-axioms" | ":derived-predicates" ":action-expansions" | ":foreach-expansions" | ":dag-expansions" | ":subgoal-through-axioms" | ":safety-constraints" | ":expression-evaluation" | ":open-world" | ":true-negation" | ":ucpop"
*/
:- module(planner_external_interface, [
planner_program/2, % planner_program(Program)
planner_workspace/2, % planner_workspace(Opr,W)
planner_workspace_program/3, % planner_workspace_program(Opr,W,Program)
planner_requirement/3, % planner_requirement(Opr,W,Require)
planner_init/3, % planner_init(Opr,W,Fact)
planner_predicate/3, % planner_predicate(Opr,W,Predicate)
planner_function/3, % planner_function(Opr,W,Function)
planner_type/3, % planner_type(Opr,W,Sort)
planner_object/3, % planner_object(Opr,W,Object)
planner_derived/4, % planner_derived(Opr,W,Fact,Condition)
planner_axiom/3, % planner_axiom(Opr,W,Axiom)
planner_action/4, % planner_action(Opr,W,Action,Info)
planner_copy_workspace/2, % planner_copy_workspace(+W,?NewWorkspace)
planner_load_file/2, % planner_load_file(+W,+FileName)
planner_current_program/1,
planner_add_program/1,
planner_remove_program/1,
planner_current_workspace/1,
planner_add_workspace/1,
planner_remove_workspace/1,
planner_current_workspace_program/2,
planner_add_workspace_program/2,
planner_remove_workspace_program/2,
planner_current_requirement/2,
planner_add_requirement/2,
planner_remove_requirement/2,
planner_current_init/2,
planner_add_init/2,
planner_remove_init/2,
planner_current_predicate/2,
planner_add_predicate/2,
planner_remove_predicate/2,
planner_current_function/2,
planner_add_function/2,
planner_remove_function/2,
planner_current_type/2,
planner_add_type/2,
planner_remove_type/2,
planner_current_object/2,
planner_add_object/2,
planner_remove_object/2,
planner_current_derived/3,
planner_add_derived/3,
planner_remove_derived/3,
planner_current_axiom/2,
planner_add_axiom/2,
planner_remove_axiom/2,
planner_current_action/3,
planner_add_action/3,
planner_remove_action/3,
planner_get_plan/3, % planner_get_plan(+W,+Goal,-Plan)
planner_get_plan/4, % planner_get_plan(+W,+Planner,+Goal,-Plan)
planner_apply_step/3, % planner_apply_step(+W,+Step,-NewWorkspace)
planner_apply_step/4, % planner_apply_step(+W,+Planner,+Step,-NewWorkspace)
ensure_external_planners/0,
planner_debug/1,
make_api/0
]).
make_api:-
maplist(make_one_api,
[program/2, % program(Program)
workspace/2, % workspace(Opr,W)
workspace_program/3, % workspace_program(Opr,W,Program)
requirement/3, % requirement(Opr,W,Require)
init/3, % init(Opr,W,Fact)
predicate/3, % predicate(Opr,W,Predicate)
function/3, % function(Opr,W,Function)
type/3, % type(Opr,W,Sort)
object/3, % object(Opr,W,Object)
derived/4, % derived(Opr,W,Fact,Conditions)
axiom/3, % axiom(Opr,W,Axiom)
action/4]). % action(Opr,W,Action,Info)
make_exports:-
maplist(make_one_export,
[program/2, % program(Program)
workspace/2, % workspace(Opr,W)
workspace_program/3, % workspace_program(Opr,W,Program)
requirement/3, % requirement(Opr,W,Require)
init/3, % init(Opr,W,Fact)
predicate/3, % predicate(Opr,W,Predicate)
function/3, % function(Opr,W,Function)
type/3, % type(Opr,W,Sort)
object/3, % object(Opr,W,Object)
derived/4, % derived(Opr,W,Fact,Conditions)
axiom/3, % axiom(Opr,W,Axiom)
action/4]). % action(Opr,W,Action,Info)
make_one_export(F/Am1):- A is Am1-1,
make_one_export_fa(current,F,A),
make_one_export_fa(add,F,A),
make_one_export_fa(remove,F,A),!.
make_one_export_fa(C,F,A):- format('\n planner_~w_~w/~w, ',[C,F,A]).
make_one_api(F/4):- !,
make_one_api_34(current,"+Workspace, ?",F,nondet,"Gets each",?,", Conds"),
make_one_api_34(add,"+Workspace, +",F,det,"Adds one",+,", Conds"),
make_one_api_34(remove,"+Workspace, +",F,det,"Removes one",-,", Conds"),!.
make_one_api(F/3):- !,
make_one_api_34(current,"+Workspace, ?",F,nondet,"Gets each",?,""),
make_one_api_34(add,"+Workspace, +",F,det,"Adds one",+,""),
make_one_api_34(remove,"+Workspace, +",F,det,"Removes one",-,""),!.
make_one_api(F/2):- !,
make_one_api_2(current,"?",F,nondet,"Gets each",?,""),
make_one_api_2(add,"+",F,det,"Adds one",+,""),
make_one_api_2(remove,"+",F,det,"Removes one",-,""),!.
make_one_api_2(Current,Mode,F,Det,String,Quest,ExtraArgs):-
make_one_api_234(Current,Mode,F,Det,String,Quest,"",ExtraArgs).
make_one_api_34(Current,Mode,F,Det,String,Quest,ExtraArgs):-
make_one_api_234(Current,Mode,F,Det,String,Quest,"Workspace, ",ExtraArgs).
make_one_api_234(Current,Mode,F,Det,String,Quest,WSC,ExtraArgs):-
toPropercase(F,CapsF),
(Quest=='?' -> From = "contained in";
Quest=='+' -> From = "into";
Quest=='-' -> From = "from"),
upcase_atom(F,UP),
format('
%! planner_~w_~w(~w~w~w) is ~w.
%
% ~w ~w ~w the Workspace.
%
% (PDDL''s :~w directive)
%
planner_~w_~w(~w~w~w):-
planner_~w(~w, ~w~w~w).
',[Current,F,Mode,CapsF,ExtraArgs,Det,
String,CapsF,From,
UP,
Current,F,WSC,CapsF,ExtraArgs,
F,Quest,WSC,CapsF,ExtraArgs]).
%! planner_current_program(?Program) is nondet.
%
% Gets each Program contained in the Workspace.
%
% (PDDL's :PROGRAM directive)
%
planner_current_program(Program):-
planner_program(?, Program).
%! planner_add_program(+Program) is det.
%
% Adds one Program into the Workspace.
%
% (PDDL's :PROGRAM directive)
%
planner_add_program(Program):-
planner_program(+, Program).
%! planner_remove_program(+Program) is det.
%
% Removes one Program from the Workspace.
%
% (PDDL's :PROGRAM directive)
%
planner_remove_program(Program):-
planner_program(-, Program).
%! planner_current_workspace(?Workspace) is nondet.
%
% Gets each Workspace contained in the Workspace.
%
% (PDDL's :WORKSPACE directive)
%
planner_current_workspace(Workspace):-
planner_workspace(?, Workspace).
%! planner_add_workspace(+Workspace) is det.
%
% Adds one Workspace into the Workspace.
%
% (PDDL's :WORKSPACE directive)
%
planner_add_workspace(Workspace):-
planner_workspace(+, Workspace).
%! planner_remove_workspace(+Workspace) is det.
%
% Removes one Workspace from the Workspace.
%
% (PDDL's :WORKSPACE directive)
%
planner_remove_workspace(Workspace):-
planner_workspace(-, Workspace).
%! planner_current_workspace_program(+Workspace, ?Workspace_Program) is nondet.
%
% Gets each Workspace_Program contained in the Workspace.
%
% (PDDL's :WORKSPACE_PROGRAM directive)
%
planner_current_workspace_program(Workspace, Workspace_Program):-
planner_workspace_program(?, Workspace, Workspace_Program).
%! planner_add_workspace_program(+Workspace, +Workspace_Program) is det.
%
% Adds one Workspace_Program into the Workspace.
%
% (PDDL's :WORKSPACE_PROGRAM directive)
%
planner_add_workspace_program(Workspace, Workspace_Program):-
planner_workspace_program(+, Workspace, Workspace_Program).
%! planner_remove_workspace_program(+Workspace, +Workspace_Program) is det.
%
% Removes one Workspace_Program from the Workspace.
%
% (PDDL's :WORKSPACE_PROGRAM directive)
%
planner_remove_workspace_program(Workspace, Workspace_Program):-
planner_workspace_program(-, Workspace, Workspace_Program).
%! planner_current_requirement(+Workspace, ?Requirement) is nondet.
%
% Gets each Requirement contained in the Workspace.
%
% (PDDL's :REQUIREMENT directive)
%
planner_current_requirement(Workspace, Requirement):-
planner_requirement(?, Workspace, Requirement).
%! planner_add_requirement(+Workspace, +Requirement) is det.
%
% Adds one Requirement into the Workspace.
%
% (PDDL's :REQUIREMENT directive)
%
planner_add_requirement(Workspace, Requirement):-
planner_requirement(+, Workspace, Requirement).
%! planner_remove_requirement(+Workspace, +Requirement) is det.
%
% Removes one Requirement from the Workspace.
%
% (PDDL's :REQUIREMENT directive)
%
planner_remove_requirement(Workspace, Requirement):-
planner_requirement(-, Workspace, Requirement).
%! planner_current_init(+Workspace, ?Init) is nondet.
%
% Gets each Init contained in the Workspace.
%
% (PDDL's :INIT directive)
%
planner_current_init(Workspace, Init):-
planner_init(?, Workspace, Init).
%! planner_add_init(+Workspace, +Init) is det.
%
% Adds one Init into the Workspace.
%
% (PDDL's :INIT directive)
%
planner_add_init(Workspace, Init):-
planner_init(+, Workspace, Init).
%! planner_remove_init(+Workspace, +Init) is det.
%
% Removes one Init from the Workspace.
%
% (PDDL's :INIT directive)
%
planner_remove_init(Workspace, Init):-
planner_init(-, Workspace, Init).
%! planner_current_predicate(+Workspace, ?Predicate) is nondet.
%
% Gets each Predicate contained in the Workspace.
%
% (PDDL's :PREDICATE directive)
%
planner_current_predicate(Workspace, Predicate):-
planner_predicate(?, Workspace, Predicate).
%! planner_add_predicate(+Workspace, +Predicate) is det.
%
% Adds one Predicate into the Workspace.
%
% (PDDL's :PREDICATE directive)
%
planner_add_predicate(Workspace, Predicate):-
planner_predicate(+, Workspace, Predicate).
%! planner_remove_predicate(+Workspace, +Predicate) is det.
%
% Removes one Predicate from the Workspace.
%
% (PDDL's :PREDICATE directive)
%
planner_remove_predicate(Workspace, Predicate):-
planner_predicate(-, Workspace, Predicate).
%! planner_current_function(+Workspace, ?Function) is nondet.
%
% Gets each Function contained in the Workspace.
%
% (PDDL's :FUNCTION directive)
%
planner_current_function(Workspace, Function):-
planner_function(?, Workspace, Function).
%! planner_add_function(+Workspace, +Function) is det.
%
% Adds one Function into the Workspace.
%
% (PDDL's :FUNCTION directive)
%
planner_add_function(Workspace, Function):-
planner_function(+, Workspace, Function).
%! planner_remove_function(+Workspace, +Function) is det.
%
% Removes one Function from the Workspace.
%
% (PDDL's :FUNCTION directive)
%
planner_remove_function(Workspace, Function):-
planner_function(-, Workspace, Function).
%! planner_current_type(+Workspace, ?Type) is nondet.
%
% Gets each Type contained in the Workspace.
%
% (PDDL's :TYPE directive)
%
planner_current_type(Workspace, Type):-
planner_type(?, Workspace, Type).
%! planner_add_type(+Workspace, +Type) is det.
%
% Adds one Type into the Workspace.
%
% (PDDL's :TYPE directive)
%
planner_add_type(Workspace, Type):-
planner_type(+, Workspace, Type).
%! planner_remove_type(+Workspace, +Type) is det.
%
% Removes one Type from the Workspace.
%
% (PDDL's :TYPE directive)
%
planner_remove_type(Workspace, Type):-
planner_type(-, Workspace, Type).
%! planner_current_object(+Workspace, ?Object) is nondet.
%
% Gets each Object contained in the Workspace.
%
% (PDDL's :OBJECT directive)
%
planner_current_object(Workspace, Object):-
planner_object(?, Workspace, Object).
%! planner_add_object(+Workspace, +Object) is det.
%
% Adds one Object into the Workspace.
%
% (PDDL's :OBJECT directive)
%
planner_add_object(Workspace, Object):-
planner_object(+, Workspace, Object).
%! planner_remove_object(+Workspace, +Object) is det.
%
% Removes one Object from the Workspace.
%
% (PDDL's :OBJECT directive)
%
planner_remove_object(Workspace, Object):-
planner_object(-, Workspace, Object).
%! planner_current_derived(+Workspace, ?Derived, ?Conds) is nondet.
%
% Gets each Derived contained in the Workspace.
%
% (PDDL's :DERIVED directive)
%
planner_current_derived(Workspace, Derived, Conds):-
planner_derived(?, Workspace, Derived, Conds).
%! planner_add_derived(+Workspace, +Derived, +Conds) is det.
%
% Adds one Derived into the Workspace.
%
% (PDDL's :DERIVED directive)
%
planner_add_derived(Workspace, Derived, Conds):-
planner_derived(+, Workspace, Derived, Conds).
%! planner_remove_derived(+Workspace, +Derived, +Conds) is det.
%
% Removes one Derived from the Workspace.
%
% (PDDL's :DERIVED directive)
%
planner_remove_derived(Workspace, Derived, Conds):-
planner_derived(-, Workspace, Derived, Conds).
%! planner_current_axiom(+Workspace, ?Axiom) is nondet.
%
% Gets each Axiom contained in the Workspace.
%
% (PDDL's :AXIOM directive)
%
planner_current_axiom(Workspace, Axiom):-
planner_axiom(?, Workspace, Axiom).
%! planner_add_axiom(+Workspace, +Axiom) is det.
%
% Adds one Axiom into the Workspace.
%
% (PDDL's :AXIOM directive)
%
planner_add_axiom(Workspace, Axiom):-
planner_axiom(+, Workspace, Axiom).
%! planner_remove_axiom(+Workspace, +Axiom) is det.
%
% Removes one Axiom from the Workspace.
%
% (PDDL's :AXIOM directive)
%
planner_remove_axiom(Workspace, Axiom):-
planner_axiom(-, Workspace, Axiom).
%! planner_current_action(+Workspace, ?Action, ?Conds) is nondet.
%
% Gets each Action contained in the Workspace.
%
% (PDDL's :ACTION directive)
%
planner_current_action(Workspace, Action, Conds):-
planner_action(?, Workspace, Action, Conds).
%! planner_add_action(+Workspace, +Action, +Conds) is det.
%
% Adds one Action into the Workspace.
%
% (PDDL's :ACTION directive)
%
planner_add_action(Workspace, Action, Conds):-
planner_action(+, Workspace, Action, Conds).
%! planner_remove_action(+Workspace, +Action, +Conds) is det.
%
% Removes one Action from the Workspace.
%
% (PDDL's :ACTION directive)
%
planner_remove_action(Workspace, Action, Conds):-
planner_action(-, Workspace, Action, Conds).
planner_copy_workspace(Workspace,NewWorkspace):-
(var(NewWorkspace)->gensym(Workspace,NewWorkspace);true),
forall(mdata:ws_data(Workspace,P,V),call_ws_data_hook(+,NewWorkspace,P,V)).
planner_load_file(W,FileName):- planner_missing(planner_load_file(W,FileName)).
%! planner_workspace(+Opr,+Workspace) is det.
%
% + = Adds a workspace name
% - = Deletes workspace freeing up resources
% ? = Enumerates workspace names
%
planner_workspace(Opr,Workspace):-
call_settings_data(Opr,ws_data(Workspace,isa,tWorkspace)).
%! planner_program(+Opr,?Program) is nondet.
%
% + = Adds a planner program name
% - = Deletes program freeing up resources
% ? = Enumerates planner program names
%
planner_program(Opr,Program):- call_settings_data(Opr,current_planner_program(Program)).
planner_data_template(current_planner_program(_Program)).
planner_data_template(ws_data(_W,_P,_D)).
pre_existing_clause(MData,R):- strip_module(MData,M,Data),
clause(M:Data,true,R),clause(MCData,true,R),strip_module(MCData,_,CData),Data=@=CData,!.
:- dynamic(mdata:current_planner_program/1).
:- dynamic(mdata:ws_data/3).
to_mdata(Data,mdata:BData):- strip_module(Data,_,BData).
call_ws_data_hook(Opr,W,Prop,DataL):-
check_opr(W,Opr),
forall(delistify_data(DataL,Data),
call_settings_data(Opr,ws_data(W,Prop,Data))).
call_settings_data(Opr,Data):- to_mdata(Data,MData), call_settings_mdata(Opr,MData).
call_settings_mdata(?,MData):- !, call(MData).
call_settings_mdata(+,MData):- !, (pre_existing_clause(MData,_R)->true;
(asserta(MData),planner_debug(asserta(MData)))).
call_settings_mdata(-,MData):- ignore(call(MData)),retractall(MData).
delistify_data(DataL,Data):- is_list(DataL),!,member(Data,DataL).
delistify_data(Data,Data).
% Manipulate PDDL Workspace Dfault Planner Program
planner_workspace_program(Opr,W,Program):-
(call_settings_data(Opr,ws_data(W,program,Program))
*->true;
call_settings_data(Opr,current_planner_program(Program))).
% Manipulate PDDL Workspace Problem/Domains (:Requirements ...)
planner_requirement(Opr,W,Require):- call_ws_data_hook(Opr,W,requirement,Require).
% Manipulate PDDL Workspace Problem/Domains (:Init ...)
planner_init(Opr,W,Fact):-
glean_objs(Opr,W,Fact),
call_ws_data_hook(Opr,W,init,Fact).
% Manipulate PDDL Workspace Problem/Domains (:Predicates ...)
planner_predicate(Opr,W,Predicate):-
glean_types(Opr,W,Predicate),
call_ws_data_hook(Opr,W,predicate,Predicate).
% Manipulate PDDL Workspace Problem/Domains (:Functions ...)
planner_function(Opr,W,Function):-
glean_types(Opr,W,Function),
call_ws_data_hook(Opr,W,function,Function).
% Manipulate PDDL Workspace Problem/Domains (:TYPE ...)
planner_type(Opr,W,Type):-
glean_types(Opr,W,Type),
call_ws_data_hook(Opr,W,type,Type).
% Manipulate PDDL Workspace Problem/Domains (:OBJECTS ...)
planner_object(Opr,W,Object):-
glean_types(Opr,W,Object),
call_ws_data_hook(Opr,W,object,Object).
% Manipulate a PDDL Workspace Problem/Domains (:derived-predicates ...)
planner_derived(Opr,W,Fact,Cond) :- Cond==[], !, planner_init(Opr,W,Fact).
planner_derived(Opr,W,Fact,Condition) :-
FULL= derived(Fact,Condition),
numbervars(FULL),
call_ws_data_hook(Opr,W,derived,FULL).
% Manipulate a PDDL Workspace Problem/Domains (:axiom ...)
planner_axiom(Opr,W,Axiom):- call_ws_data_hook(Opr,W,axiom,Axiom).
% Manipulate a PDDL Workspace Problem/Domains (:action Action (...Info...))
planner_action(Opr,W,Action,InfoList):- glean_types(Opr,W,Action),
numbervars(InfoList),
planner_action_info(Opr,W,Action,InfoList),
!. % call_ws_data_hook(Opr,W,action,act_info(Action,InfoList)).
planner_action_info(Opr,W,Action,InfoList):-
is_list(InfoList),!,maplist(planner_action(Opr,W,Action),InfoList).
planner_action_info(Opr,W,Action,Type:Info):-!,
call_ws_data_hook(Opr,W,action,act_inf(Action,Type,Info)).
planner_action_info(Opr,W,Action,Info):-
call_ws_data_hook(Opr,W,action,act_inf(Action,meta,Info)).
%% planner_get_plan(+W,+Goal,-Plan) is nondet.
planner_get_plan(W,Goal,Plan):-
planner_workspace_program(?,W,Planner),
planner_get_plan(Planner,W,Goal,Plan).
%% planner_get_plan(+Planner,+W,+Goal,-Plan) is nondet.
planner_get_plan(Planner,W,Goal,Plan):-
check_workspace(W),
ignore(Plan=[s1,s2,s3]),
planner_missing(planner_get_plan(Planner,W,Goal,Plan)).
%% planner_apply_step(+W,+Step,-NewWorkspace) is det.
planner_apply_step(W,Step,NewWorkspace):-
planner_workspace_program(?,W,Planner),
planner_apply_step(Planner,W,Step,NewWorkspace).
%% planner_apply_step(+Planner,+W,+Step,-NewWorkspace) is det.
planner_apply_step(Planner,W,Step,NewWorkspace):-
check_workspace(W),
(var(NewWorkspace)->gensym(W,NewWorkspace);true),
check_workspace(NewWorkspace),
planner_missing(planner_apply_step(Planner,W,Step,NewWorkspace)).
ensure_external_planners.
glean_types(Opr,W,Any):- Opr == +,!,
check_opr(W,Opr),
forall((sub_term(Sub,Any),
compound(Sub),member(Sub,[_-Type, Type:_])),
planner_type(Opr,W,Type)).
glean_types(_,_,_).
glean_objs(Opr,W,Any):- Opr == +,!,
forall((sub_term(Sub,Any),
compound(Sub),member(Sub,[Obj-_])),
planner_object(Opr,W,Obj)),
forall((sub_term(Obj,Any),
atom(Obj)),planner_object(Opr,W,Obj)).
glean_objs(_,_,_).
check_opr(W,+):- check_workspace(W).
check_opr(W,del):- check_workspace(W).
check_opr(W,?):- check_workspace(W).
check_opr(_Workspace,Opr):- throw(opr_missing(Opr)).
check_workspace(W):- mdata:ws_data(W,isa,tWorkspace),!.
check_workspace(W):- asserta(mdata:ws_data(W,isa,tWorkspace)).
planner_debug(Info):- format('~N% ~q.~n',[Info]).
planner_missing(Goal):- !,planner_debug(g(Goal)).
planner_missing(Goal):- throw(planner_missing(Goal)).
%e_member([L|ST],E):- nonvar(L),!,member(E,[L|ST]).
%e_member(E,E).
end_of_file.
/*
(:constraints (and (always-until (charged ?r) (at ?r rechargepoint))
(always-within 10 (< (charge ?r) 5) (at ?r rechargingpoint))))
(:constraints
(and (preference
(always (forall (?b1 ?b2 - block ?c1 ?c2 - color)
(implies (and (on ?b1 ?b2)
(color ?b1 ?c1)
(color ?b2 ?c2))
(= ?c1 ?c2))))))
)
(:constraints
(and (always (forall (?b1 ?b2 - block)
(implies (and (fragile ?b1) (on ?b2 ?b1))
(clear ?b2)))))
)
*/
| TeamSPoon/logicmoo_workspace | packs_sys/logicmoo_ec/prolog/planner_api.pl | Perl | mit | 22,303 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2019] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
use strict;
use warnings;
use SeqStoreConverter::BasicConverter;
package SeqStoreConverter::AnophelesGambiae;
use vars qw(@ISA);
@ISA = qw(SeqStoreConverter::BasicConverter);
sub create_coord_systems {
my $self = shift;
$self->debug("AnophelesGambiae Specific: creating scaffold, chunk and, " .
"chromosome coord systems");
my $target = $self->target();
my $dbh = $self->dbh();
my $ass_def = $self->get_default_assembly();
my @coords =
(['chunk', undef, 'default_version,sequence_level', 3],
['chromosome', $ass_def, 'default_version', 1],
["scaffold" , undef, "default_version", 2]);
my @assembly_mappings = ("chromosome:$ass_def|chunk",
"chromosome:$ass_def|scaffold",
"scaffold|chromosome:$ass_def|chunk");
$self->debug("Building coord_system table");
my $sth = $dbh->prepare("INSERT INTO $target.coord_system " .
"(name, version, attrib,rank) VALUES (?,?,?,?)");
my %coord_system_ids;
foreach my $cs (@coords) {
$sth->execute(@$cs);
$coord_system_ids{$cs->[0]} = $sth->{'mysql_insertid'};
}
$sth->finish();
$sth = $dbh->prepare("INSERT INTO $target.meta(meta_key, meta_value) " .
"VALUES ('assembly.mapping', ?)");
foreach my $mapping (@assembly_mappings) {
$sth->execute($mapping);
}
$sth->finish();
return;
}
sub create_seq_regions {
my $self = shift;
$self->debug("AnophelesGambiae Specific: creating seq_regions");
$self->contig_to_seq_region('chunk');
$self->supercontig_to_seq_region('scaffold');
$self->chromosome_to_seq_region();
}
sub create_assembly {
my $self = shift;
$self->debug("AnophelesGambiae Specific: loading assembly table");
$self->assembly_contig_chromosome();
$self->assembly_supercontig_chromosome();
return;
}
sub transfer_prediction_transcripts {
my $self = shift;
my $source = $self->source();
my $target = $self->target();
my $dbh = $self->dbh();
$self->debug("AnophelesGambiae Specific: building prediction_exon table");
#
# In Anopheles the predicion transcripts were computed in chromosomal
# coords, so convert them to chromosomal coords and merge any adjacent
# exons
#
my $sql =
"SELECT pt.prediction_transcript_id, tcm.new_id as seq_region_id, " .
" IF(a.contig_ori=1,(pt.contig_start+a.chr_start-a.contig_start),".
" (a.chr_start+a.contig_end-pt.contig_end)) as start, " .
" IF(a.contig_ori=1,(pt.contig_end+a.chr_start-a.contig_start)," .
" (a.chr_start+a.contig_end-pt.contig_start)) as end, " .
" a.contig_ori * pt.contig_strand as strand, " .
" pt.start_phase, pt.score, pt.p_value " .
"FROM $source.assembly a, $target.tmp_chr_map tcm, " .
" $source.prediction_transcript pt " .
"WHERE pt.contig_id = a.contig_id " .
"AND a.chromosome_id = tcm.old_id " .
"ORDER BY pt.prediction_transcript_id, exon_rank";
my $sth = $dbh->prepare($sql);
$sth->execute();
my $prev_end = undef;
my $prev_start = undef;
my $prev_id = undef;
my $rank = undef;
my %prev_exon = ();
while(my $row = $sth->fetchrow_arrayref()) {
my ($pt_id, $sr_id, $sr_start, $sr_end, $sr_strand, $start_phase,
$score, $p_value) = @$row;
if(defined($prev_id) && ($prev_id == $pt_id)) {
#still in the same transcript
if($sr_strand == 1 &&
defined($prev_end) && $prev_end == $sr_start-1) {
$self->debug("merged exon $rank in prediction_transcript $pt_id\n");
#adjacent exons forward strand - merge them
$prev_exon{'seq_region_end'} = $sr_end;
$prev_end = $sr_end;
} elsif($sr_strand == -1 &&
defined($prev_start) && $prev_start == $sr_end+1) {
$self->debug("merged exon $rank in prediction_transcript $pt_id\n");
#adjacent exons negative strand - merge them
$prev_exon{'seq_region_start'} = $sr_start;
$prev_start = $sr_start;
} else {
#non-adjacent exons in the same transcript - no merge
$rank++;
#store the previous exon
$self->store_pexon(\%prev_exon);
#make current exon the previous exon
%prev_exon = ('prediction_transcript_id' => $pt_id,
'seq_region_id' => $sr_id,
'seq_region_start' => $sr_start,
'seq_region_end' => $sr_end,
'seq_region_strand' => $sr_strand,
'start_phase' => $start_phase,
'score' => $score,
'p_value' => $p_value,
'rank' => $rank);
}
} else {
#store previous exon
$self->store_pexon(\%prev_exon) if(%prev_exon);
#new ptranscript
$rank = 1;
$prev_id = $pt_id;
$prev_end = $sr_end;
$prev_start = $sr_start;
%prev_exon = ('prediction_transcript_id' => $pt_id,
'seq_region_id' => $sr_id,
'seq_region_start' => $sr_start,
'seq_region_end' => $sr_end,
'seq_region_strand' => $sr_strand,
'start_phase' => $start_phase,
'score' => $score,
'p_value' => $p_value,
'rank' => $rank);
}
}
#store the very last exon in the table
$self->store_pexon(\%prev_exon) if(%prev_exon);
$sth->finish();
$self->debug("AnophelesGambiae Specific: building prediction_transcript " .
"table");
$dbh->do
("INSERT INTO $target.prediction_transcript (prediction_transcript_id, " .
" seq_region_id, seq_region_start, seq_region_end, " .
" seq_region_strand, analysis_id ) " .
"SELECT pt.prediction_transcript_id, tcm.new_id as seq_region_id, " .
" MIN(IF(a.contig_ori=1,(pt.contig_start+a.chr_start-a.contig_start),".
" (a.chr_start+a.contig_end-pt.contig_end))) as start, " .
" MAX(IF(a.contig_ori=1,(pt.contig_end+a.chr_start-a.contig_start)," .
" (a.chr_start+a.contig_end-pt.contig_start))) as end, " .
" a.contig_ori * pt.contig_strand as strand, " .
" pt.analysis_id " .
"FROM $source.assembly a, $target.tmp_chr_map tcm, " .
" $source.prediction_transcript pt " .
"WHERE pt.contig_id = a.contig_id " .
"AND a.chromosome_id = tcm.old_id " .
"GROUP BY prediction_transcript_id");
return;
}
#
# helper function to store prediction exon
#
sub store_pexon {
my $self = shift;
my $pexon = shift;
my $target = $self->target();
my $source = $self->source();
my $dbh = $self->dbh();
my $store_sth = $dbh->prepare
("INSERT INTO $target.prediction_exon (prediction_transcript_id, " .
" exon_rank, seq_region_id, seq_region_start, seq_region_end, " .
" seq_region_strand, start_phase, score, p_value) " .
"VALUES (?,?,?,?,?,?,?,?,?)");
$store_sth->execute($pexon->{'prediction_transcript_id'},
$pexon->{'rank'},
$pexon->{'seq_region_id'},
$pexon->{'seq_region_start'},
$pexon->{'seq_region_end'},
$pexon->{'seq_region_strand'},
$pexon->{'start_phase'},
$pexon->{'score'},
$pexon->{'p_value'});
$store_sth->finish();
return;
}
1;
| muffato/ensembl | misc-scripts/surgery/SeqStoreConverter/AnophelesGambiae.pm | Perl | apache-2.0 | 8,433 |
# Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
# Copyright [2016-2018] EMBL-European Bioinformatics Institute
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
=head1 NAME
Bio::EnsEMBL::Analysis::RunnableDB::Finished::RepeatMasker
=head1 SYNOPSIS
my $repeat_masker = Bio::EnsEMBL::Analysis::RunnableDB::Finished::RepeatMasker->
new(
-input_id => 'contig::AL805961.22.1.166258:1:166258:1',
-db => $db,
-analysis => $analysis,
);
$repeat_masker->fetch_input;
$repeat_masker->run;
$repeat_masker->write_output;
=head1 DESCRIPTION
This module provides an interface between the ensembl database and
the Runnable RepeatMasker which wraps the program RepeatMasker
This module can fetch appropriate input from the database
pass it to the runnable then write the results back to the database
in the repeat_feature and repeat_consensus tables
=head1 CONTACT
Post questions to : anacode@sanger.ac.uk
=cut
package Bio::EnsEMBL::Analysis::RunnableDB::Finished::RepeatMasker;
use strict;
use warnings;
use Bio::EnsEMBL::Analysis::RunnableDB::RepeatMasker;
use Bio::EnsEMBL::Analysis::Runnable::Finished::RepeatMasker;
use vars qw(@ISA);
@ISA = qw(Bio::EnsEMBL::Analysis::RunnableDB::RepeatMasker);
=head2 fetch_input
Arg [1] : Bio::EnsEMBL::Analysis::RunnableDB::Finished::RepeatMasker
Function : fetch data out of database and create runnable
Returntype: 1
Exceptions: none
Example :
=cut
sub fetch_input{
my ($self) = @_;
my $slice = $self->fetch_sequence;
$self->query($slice);
my %parameters;
if($self->parameters_hash){
%parameters = %{$self->parameters_hash};
}
my $runnable = Bio::EnsEMBL::Analysis::Runnable::Finished::RepeatMasker->new
(
-query => $self->query,
-program => $self->analysis->program_file,
-analysis => $self->analysis,
%parameters,
);
$self->runnable($runnable);
return 1;
}
1;
| kiwiroy/ensembl-analysis | modules/Bio/EnsEMBL/Analysis/RunnableDB/Finished/RepeatMasker.pm | Perl | apache-2.0 | 2,467 |
=head1 LICENSE
Copyright [1999-2015] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Copyright [2016-2017] EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
package Bio::EnsEMBL::Compara::GenomeMF;
use strict;
use warnings;
use Bio::EnsEMBL::Utils::Argument;
use Bio::EnsEMBL::Utils::Scalar qw(:assert);
use Bio::EnsEMBL::Utils::IO::GFFParser;
use JSON;
use Bio::SeqIO;
use FileHandle;
use Data::Dumper;
sub new {
my ($class, @args) = @_;
my ($filename, $index);
if (scalar @args) {
($filename, $index) = rearrange([qw(FILENAME INDEX)], @args);
}
die unless defined $filename;
die unless defined $index;
return $class->all_from_file($filename)->[$index-1];
}
sub all_from_file {
my $self = shift;
my $filename = shift;
# Loads the file with JSON
die "'filename' must be defined" unless defined $filename;
die "Can't read from '$filename'" unless -r $filename;
my $json_text = `cat $filename`;
my $json_parser = JSON->new->relaxed;
my $perl_array = $json_parser->decode($json_text);
# List of fields that must / can be present
my @obligatory_fields = qw(production_name taxonomy_id prot_fasta cds_fasta source);
my $possible_fields = {map {$_ => 1} (@obligatory_fields, qw(gene_coord_gff is_high_coverage has_karyotype))};
# Checks the integrity of the file
my $i = 0;
die "The first level structure in '$filename' must be an array" unless ref($perl_array) eq 'ARRAY';
foreach my $entry (@$perl_array) {
die "The second level structures in '$filename' must be hashes" unless ref($entry) eq 'HASH';
map {die "'$_' must map to a scalar in the registry file '$filename'" if ref($entry->{$_})} keys %$entry;
map {die "'$_' is not a registered key in the registry file '$filename'" unless exists $possible_fields->{$_}} keys %$entry;
map {die "'$_' must be present in every entry of the registry file '$filename'" unless exists $entry->{$_}} @obligatory_fields;
$entry->{'_registry_file'} = $filename;
$entry->{'_registry_index'} = ++$i;
bless $entry, $self;
}
#print Dumper($perl_array);
return $perl_array;
}
sub locator {
my $self = shift;
return sprintf('%s/filename=%s;index=%d', ref($self), $self->{'_registry_file'}, $self->{'_registry_index'});
}
## Coordinates
sub get_coordinates {
my $self = shift;
$self->_load_coordinates unless exists $self->{'_coordinates'};
return ($self->{'_gene_coordinates'}, $self->{'_cds_coordinates'})
}
sub _load_coordinates {
my $self = shift;
my %local_gene_coordinates = ();
my %local_mrna_coordinates = ();
my %local_cds_coordinates = ();
my %gene_coordinates = ();
my %cds_coordinates = ();
#my %gene_mapper = ();
#my %mRNA_mapper = ();
if ( exists $self->{'gene_coord_gff'} ) {
my $fh = FileHandle->new;
$fh->open( "<" . $self->{'gene_coord_gff'} ) || die "Could not open coordinates file (gff): " . $self->{'gene_coord_gff'};
my $parser = Bio::EnsEMBL::Utils::IO::GFFParser->new($fh);
$parser->parse_header();
my $feature;
my $parent;
my $mitochondrial_genome;
while ( $feature = $parser->parse_next_feature() ) {
my %feature = %{$feature};
if ( ( $self->{"source"} eq "refseq" ) || ( $self->{"source"} eq "augustus_maker") ) {
#Check for mitochondrial genomes
if ( ${ $feature{attribute} }{genome} ) {
if ( ${ $feature{attribute} }{genome} =~ /mitochondrion/ ) {
$mitochondrial_genome = 1;
}
if ( ${ $feature{attribute} }{genome} =~ /genomic/ ) {
$mitochondrial_genome = 0;
}
}
#get gene coordinates:
$local_gene_coordinates{${ $feature{attribute} }{ID}} = [ map { $feature{$_} } qw(seqid start end strand) ] if $feature{type} eq 'gene';
#get CDS or mRNA coordinates according to genome type
if ($feature{type} eq 'CDS') {
if ( ${ $feature{attribute} }{Parent} ){
my $parent = ${ $feature{attribute} }{Parent} || warn "CDS does not have a parent:".${ $feature{attribute} }{ID}; #Some mito CDS may not have parents, so we should study closely.
}
else {
if ( $self->{"production_name"} eq "python_molurus_bivittatus" ) {
my $local_protein_id;
my $source = ${ $feature{attribute} }{Dbxref};
if ( $source =~ /Genbank:/ ) {
my @tok = split( /\:/, $source );
$local_protein_id = $tok[1];
}
my $local_id = ${ $feature{attribute} }{ID};
my $local_parent = "gene_$local_id";
$local_cds_coordinates{$local_id}{'parent'} = $local_parent;
$local_cds_coordinates{$local_id}{'coord'} = [ map { $feature{$_} } qw(seqid start end strand) ];
$local_cds_coordinates{$local_id}{'protein_id'} = $local_protein_id;
$local_gene_coordinates{$local_parent} = [ map { $feature{$_} } qw(seqid start end strand) ];
}
}
my $local_id = ${ $feature{attribute} }{ID};
my $protein_id;
#Tuatara has postfixes on the feature names (':cds' && ':exon')
if ( $self->{"source"} eq "augustus_maker"){
my @tok = split(/\:/,$local_id);
$local_id = $tok[0];
$protein_id = $local_id;
}
else{
if (ref(${$feature{attribute}}{Dbxref}) eq 'ARRAY'){
foreach my $source ( @{ ${ $feature{attribute} }{Dbxref} } ) {
if ( $source =~ /Genbank:/ ) {
my @tok = split( /\:/, $source );
$protein_id = $tok[1];
}
}
}
}
if ( $self->{"production_name"} eq "ophiophagus_hannah" ) {
$protein_id = ${ $feature{attribute} }{protein_id};
}
if ($protein_id){
$local_cds_coordinates{$local_id}{'coord'} = [ map { $feature{$_} } qw(seqid start end strand) ];
$local_cds_coordinates{$local_id}{'parent'} = $parent;
$local_cds_coordinates{$local_id}{'protein_id'} = $protein_id;
}
}
if ($feature{type} eq 'mRNA'){
$parent = ${ $feature{attribute} }{Parent};
my $local_id = ${ $feature{attribute} }{ID};
#Tuatara has postfixes on the feature names (':cds' && ':exon')
if ( $self->{"source"} eq "augustus_maker"){
my @tok = split(/\:/,$local_id);
$local_id = $tok[0];
}
$local_mrna_coordinates{$local_id}{'parent'} = $parent;
$local_mrna_coordinates{$local_id}{'coord'} = [ map { $feature{$_} } qw(seqid start end strand) ];
}
}
elsif ( $self->{"source"} eq "gigascience" ) {
#if ( $self->{"production_name"} eq "ophisaurus_gracilis" ) {
#ophisaurus_gracilis gff file is very simple, the mRNA spams across the whole gene, so they have the same coordinates:
#get gene and cds coordinates:
$gene_coordinates{ ${ $feature{attribute} }{ID} } = [ map { $feature{$_} } qw(seqid start end strand) ] if $feature{type} eq 'mRNA';
$cds_coordinates{ ${ $feature{attribute} }{ID} } = [ map { $feature{$_} } qw(seqid start end strand) ] if $feature{type} eq 'mRNA';
#}
}
}
}
print scalar( keys %local_cds_coordinates ), " LOCAL cds coordinates\n";
print scalar( keys %local_mrna_coordinates ), " LOCAL mrna coordinates\n";
print scalar( keys %local_gene_coordinates ), " LOCAL gene coordinates\n";
if ( ( $self->{"source"} eq "refseq" ) || ( $self->{"source"} eq "augustus_maker" ) ) {
#Build hierarchy
foreach my $cds_id (keys %local_cds_coordinates){
my $mRNA_id = $local_cds_coordinates{$cds_id}{'parent'};
my $protein_id = $local_cds_coordinates{$cds_id}{'protein_id'};
my $gene_id;
#my $mRNA_len;
#if mitochondrial the CDS will have as a parent a gene and not an mRNA
if ($mRNA_id =~ /gene/){
$gene_id = $mRNA_id;
$mRNA_id = $cds_id;
$cds_coordinates{$protein_id} = $local_cds_coordinates{$mRNA_id}{'coord'};
}
else{
$gene_id = $local_mrna_coordinates{$mRNA_id}{'parent'};
$cds_coordinates{$protein_id} = $local_mrna_coordinates{$mRNA_id}{'coord'};
}
$gene_coordinates{$protein_id} = $local_gene_coordinates{$gene_id};
}
}
print scalar( keys %gene_coordinates ), " gene coordinates\n";
print scalar( keys %cds_coordinates ), " cds coordinates\n";
$self->{'_gene_coordinates'} = \%gene_coordinates;
$self->{'_cds_coordinates'} = \%cds_coordinates;
}
## Sequences
sub get_sequences {
my $self = shift;
$self->_load_sequences();
return ($self->{'_seqs'}{'prot'},$self->{'_seqs'}{'cds'});
}
sub _load_sequences {
my $self = shift;
foreach my $type ( @{ [ 'cds', 'prot' ] } ) {
#sequence hash
my %sequence2hash = ();
$self->{'_seqs'}->{ ${type} } = \%sequence2hash;
#Test if fasta file was declared and exists
next unless exists $self->{"${type}_fasta"};
my $input_file = $self->{"${type}_fasta"};
die "Cannot find the file '$input_file'\n" unless -e $input_file;
my $in_file = Bio::SeqIO->new( -file => $input_file, '-format' => 'Fasta' );
while ( my $seq = $in_file->next_seq() ) {
if ( ( $self->{"source"} eq "refseq" ) || ( $self->{"source"} eq "augustus_maker") ) {
if ( $type eq "cds" ) {
my @fields = split( /\s+/, $seq->desc );
#Get protein ID
my $protein_id;
if ( $seq->desc =~ /protein_id=/ ) {
my @tok = split( /protein_id=/, $seq->desc );
my $tmp = $tok[1];
@tok = split( /\]/, $tmp );
$protein_id = $tok[0];
}
else{
$protein_id = $seq->id;
}
if ( $seq->desc =~ /gene=/ ) {
my @tok = split( /gene=/, $seq->desc );
my $tmp = $tok[1];
@tok = split( /\]/, $tmp );
$sequence2hash{$protein_id}{'display_name'} = $tok[0];
}
$sequence2hash{$protein_id}{'seq_obj'} = $seq;
}
else {
$sequence2hash{ $seq->id }{'seq_obj'} = $seq;
}
}
elsif( $self->{"source"} eq "gigascience" ){
$sequence2hash{ $seq->id }{'seq_obj'} = $seq;
$sequence2hash{ $seq->id }{'display_name'} = $seq->id;
}
print scalar( keys %sequence2hash ), " sequences of type $type\n";
if ( !keys(%sequence2hash) ) {
die "Error while loading fasta sequences from $input_file\n";
}
}
}
}
## CoreDBAdaptor
sub get_GenomeContainer {
my $self = shift;
return $self;
}
sub get_MetaContainer {
my $self = shift;
return $self;
}
## GenomeDB fields
sub get_taxonomy_id {
my $self = shift;
return $self->{taxonomy_id};
}
sub get_genebuild {
my $self = shift;
return $self->{genebuild};
}
sub get_production_name {
my $self = shift;
return $self->{production_name};
}
sub has_karyotype {
my $self = shift;
return $self->{'has_karyotype'} || 0;
}
sub is_high_coverage {
my $self = shift;
return $self->{'is_high_coverage'} || 0;
}
sub assembly_name {
my $self = shift;
return $self->{'assembly'} || 'unknown_assembly';
}
1;
| danstaines/ensembl-compara | modules/Bio/EnsEMBL/Compara/GenomeMF.pm | Perl | apache-2.0 | 13,416 |
# Copyright 2020, Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
package Google::Ads::GoogleAds::V10::Services::AdService::GetAdRequest;
use strict;
use warnings;
use base qw(Google::Ads::GoogleAds::BaseEntity);
use Google::Ads::GoogleAds::Utils::GoogleAdsHelper;
sub new {
my ($class, $args) = @_;
my $self = {resourceName => $args->{resourceName}};
# Delete the unassigned fields in this object for a more concise JSON payload
remove_unassigned_fields($self, $args);
bless $self, $class;
return $self;
}
1;
| googleads/google-ads-perl | lib/Google/Ads/GoogleAds/V10/Services/AdService/GetAdRequest.pm | Perl | apache-2.0 | 1,038 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package network::citrix::netscaler::snmp::mode::connections;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'global', type => 0, message_separator => ' - ' }
];
$self->{maps_counters}->{global} = [
{ label => 'active', set => {
key_values => [ { name => 'active' } ],
output_template => 'Active Server TCP connections : %s',
perfdatas => [
{ label => 'active_server', value => 'active_absolute', template => '%s',
unit => 'con', min => 0 },
],
}
},
{ label => 'server', set => {
key_values => [ { name => 'server' } ],
output_template => 'Server TCP connections : %s',
perfdatas => [
{ label => 'server', value => 'server_absolute', template => '%s',
unit => 'con', min => 0 },
],
}
},
{ label => 'client', set => {
key_values => [ { name => 'client' } ],
output_template => 'Client TCP connections : %s',
perfdatas => [
{ label => 'client', value => 'client_absolute', template => '%s',
unit => 'con', min => 0 },
],
}
},
];
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
$self->{global} = { client => 0, server => 0, active => 0 };
my $oid_tcpCurServerConn = '.1.3.6.1.4.1.5951.4.1.1.46.1.0';
my $oid_tcpCurClientConn = '.1.3.6.1.4.1.5951.4.1.1.46.2.0';
my $oid_tcpActiveServerConn = '.1.3.6.1.4.1.5951.4.1.1.46.8.0';
my $result = $options{snmp}->get_leef(oids => [$oid_tcpCurServerConn, $oid_tcpCurClientConn, $oid_tcpActiveServerConn ], nothing_quit => 1);
$self->{global}->{client} = $result->{$oid_tcpCurClientConn};
$self->{global}->{server} = $result->{$oid_tcpCurServerConn};
$self->{global}->{active} = $result->{$oid_tcpActiveServerConn};
}
1;
__END__
=head1 MODE
Check connections usage (Client, Server, ActiveServer) (NS-ROOT-MIBv2).
=over 8
=item B<--warning-*>
Threshold warning.
Can be: 'server', 'active', 'client'.
=item B<--critical-*>
Threshold critical.
Can be: 'server', 'active', 'client'.
=back
=cut | Sims24/centreon-plugins | network/citrix/netscaler/snmp/mode/connections.pm | Perl | apache-2.0 | 3,491 |
use Outthentic::DSL;
my $otx = Outthentic::DSL->new( 'A'x99 , { match_l => 9 });
$otx->validate('A'x99);
print "status\tcheck\n";
print "==========================\n";
for my $r (@{$otx->results}) {
print $r->{status} ? 'true' : 'false', "\t", $r->{message}, "\n";
}
| melezhik/outthentic-dsl | examples/match_l.pl | Perl | apache-2.0 | 277 |
#! /usr/local/bin/perl
use DBI;
use Mail::POP3Client;
use Time::HiRes qw(time);
$|++;
die "./translate_pop3.pl <txt file> <domain_id> <pop3_host>" if (scalar(@ARGV)!=3);
die "File ".$ARGV[0]." seem like doesn't exist!" if (!-e $ARGV[0]);
$g_start=time();
$cnt=0;
$retr_size_total=0;
$retr_bulks_total=0;
$file = $ARGV[0];
$domain_id = $ARGV[1];
$pop3_host = $ARGV[2];
$dbh=DBI->connect("DBI:mysql:mail_db;host=210.200.211.3", "rmail", "xxxxxxx")
|| die_db($!);
$sqlstmt=sprintf("select * from DomainTransport where s_idx=%d", $domain_id);
$sth=$dbh->prepare($sqlstmt);
$sth->execute();
if ($sth->rows!=1) {
$dbh->disconnect();
die "Domain id $domain_id doesn't exist!";
}
$s_basedir=($sth->fetchrow_array())[2];
open(FH, "<$file");
while (<FH>) {
$s_start=time();
$retr_size=0;
$retr_bulks=0;
chomp();
($s_mailid, $s_rawpass)=split(/,/, $_);
$s_mailid=lc($s_mailid);
$sqlstmt=sprintf("select s_mhost, s_mbox from MailCheck where s_mailid='%s' and s_domain=%d",
$s_mailid, $domain_id);
$sth=$dbh->prepare($sqlstmt);
$sth->execute();
if ($sth->rows!=1) {
next;
}
($s_mhost, $s_mbox)=$sth->fetchrow_array();
$s_path=sprintf("%s/%s/%s/Maildir/new", $s_basedir, $s_mhost, $s_mbox);
if (!-e $s_path) {
next;
}
chdir($s_path);
$pop = new Mail::POP3Client(
USER => $s_mailid,
PASSWORD => $s_rawpass,
HOST => $pop3_host)
|| die "Cannot connect to $pop3_host: $@";
for ($i=1; $i<=$pop->Count(); $i++) {
$file_name=sprintf("%d.%05d%d.00000000.00.00.%s",
time(), rand(10000), $i, $s_mhost);
open(FILE, ">$file_name");
foreach ($pop->HeadAndBody($i)) {
print FILE $_, "\n";
$retr_size+=length($_);
$retr_size_total+=length($_);
}
close(FILE);
$retr_bulks++;
$retr_bulks_total++;
}
$pop->Close;
printf("%s\t%sRetrieve %d bulks, %d bytes, cost %f secs\n",
$s_mailid, (length($s_mailid)>7)? "":"\t", $retr_bulks, $retr_size, time()-$s_start);
$cnt++;
}
close(FH);
undef $sth;
$dbh->disconnect;
printf("Total User: %d, Total Bulks: %d, Total Size: %d, Cost: %f\n",
$cnt, $retr_bulks_total, $retr_size_total, time()-$g_start);
exit;
| TonyChengTW/PerlTools | translate_pop3.pl | Perl | apache-2.0 | 2,117 |
#!/usr/bin/env perl
my $src = $ARGV[0];
my $dst = '/dev/mmcblk0';
if (not defined $src) {
print "First argument should be the source file. Aborting!\n";
exit(1);
}
if (not -e $src) {
print "Source file $src does not exist. Aborting!\n";
exit(1);
}
if (not -e $dst) {
print "Destination $dst doesn't seem to exist. Please insert your memory card and try again!\n";
exit(1);
}
my $cmd = "sudo dd if=$src of=$dst bs=16M conv=noerror,sync status=progress";
print "Running command '$cmd'\n";
system($cmd);
| mahtuag/DistroSetup | DistroAgnostic/etch.pl | Perl | apache-2.0 | 523 |
package Search::Elasticsearch::Client::Compat;
$Search::Elasticsearch::Client::Compat::VERSION = '0.10';
use Moo;
with 'Search::Elasticsearch::Role::Client';
use strict;
use warnings;
use Any::URI::Escape qw(uri_escape);
use JSON;
use Search::Elasticsearch::Util qw(throw parse_params);
use Carp;
has 'JSON' => ( is => 'lazy' );
has '_base_qs' => ( is => 'ro', default => sub { {} } );
has '_default' => ( is => 'ro', default => sub { {} } );
has 'builder' => ( is => 'lazy' );
has 'builder_class' =>
( is => 'ro', default => 'ElasticSearch::SearchBuilder' );
use constant {
ONE_REQ => 1,
ONE_OPT => 2,
ONE_ALL => 3,
MULTI_ALL => 4,
MULTI_BLANK => 5,
MULTI_REQ => 6,
};
use constant {
CMD_NONE => [],
CMD_INDEX_TYPE_ID => [ index => ONE_REQ, type => ONE_REQ, id => ONE_REQ ],
CMD_INDEX_TYPE_id => [ index => ONE_REQ, type => ONE_REQ, id => ONE_OPT ],
CMD_INDEX_type_ID => [ index => ONE_REQ, type => ONE_ALL, id => ONE_REQ ],
CMD_Index => [ index => ONE_OPT ],
CMD_index => [ index => MULTI_BLANK ],
CMD_indices => [ index => MULTI_ALL ],
CMD_INDICES => [ index => MULTI_REQ ],
CMD_INDEX => [ index => ONE_REQ ],
CMD_INDEX_TYPE => [ index => ONE_REQ, type => ONE_REQ ],
CMD_INDEX_type => [ index => ONE_REQ, type => MULTI_BLANK ],
CMD_index_TYPE => [ index => MULTI_ALL, type => ONE_REQ ],
CMD_index_types => [ index => MULTI_ALL, type => MULTI_REQ ],
CMD_INDICES_TYPE => [ index => MULTI_REQ, type => ONE_REQ ],
CMD_index_type => [ index => MULTI_ALL, type => MULTI_BLANK ],
CMD_index_then_type => [ index => ONE_OPT, type => ONE_OPT ],
CMD_RIVER => [ river => ONE_REQ ],
CMD_nodes => [ node => MULTI_BLANK ],
CMD_NAME => [ name => ONE_REQ ],
CMD_INDEX_PERC => [ index => ONE_REQ, percolator => ONE_REQ ],
CONSISTENCY => [ 'enum', [ 'one', 'quorum', 'all' ] ],
REPLICATION => [ 'enum', [ 'async', 'sync' ] ],
SEARCH_TYPE => [
'enum',
[ 'dfs_query_then_fetch', 'dfs_query_and_fetch',
'query_then_fetch', 'query_and_fetch',
'count', 'scan'
]
],
IGNORE_INDICES => [ 'enum', [ 'missing', 'none' ] ],
};
our %QS_Format = (
boolean => '1 | 0',
duration => "'5m' | '10s'",
optional => "'scalar value'",
flatten => "'scalar' or ['scalar_1', 'scalar_n']",
'int' => "integer",
string => sub {
my $k = shift;
return $k eq 'preference'
? '_local | _primary | _primary_first | $string'
: $k eq 'percolate' || $k eq 'q' ? '$query_string'
: $k eq 'scroll_id' ? '$scroll_id'
: $k eq 'df' ? '$default_field'
: '$string';
},
float => 'float',
enum => sub { join " | ", @{ $_[1][1] } },
coderef => 'sub {..} | "IGNORE"',
);
our %QS_Formatter = (
boolean => sub {
my $key = shift;
my $val = $_[0] ? $_[1] : $_[2];
return unless defined $val;
return ref $val ? $val : [ $key, $val ? 'true' : 'false' ];
},
duration => sub {
my ( $k, $t ) = @_;
return unless defined $t;
return [ $k, $t ] if $t =~ /^\d+([smh]|ms)$/i;
die "$k '$t' is not in the form $QS_Format{duration}\n";
},
flatten => sub {
my $key = shift;
my $array = shift or return;
return [ $key, ref $array ? join( ',', @$array ) : $array ];
},
'int' => sub {
my $key = shift;
my $int = shift;
return unless defined $int;
eval { $int += 0; 1 } or die "'$key' is not an integer";
return [ $key, $int ];
},
'float' => sub {
my $key = shift;
my $float = shift;
return unless defined $float;
$key = shift if @_;
eval { $float += 0; 1 } or die "'$key' is not a float";
return [ $key, $float ];
},
'string' => sub {
my $key = shift;
my $string = shift;
return unless defined $string;
return [ $key, $string ];
},
'coderef' => sub {
my $key = shift;
my $coderef = shift;
return unless defined $coderef;
unless ( ref $coderef ) {
die "'$key' is not a code ref or the string 'IGNORE'"
unless $coderef eq 'IGNORE';
$coderef = sub { };
}
return [ $key, $coderef ];
},
'enum' => sub {
my $key = shift;
my $val = shift;
return unless defined $val;
my $vals = $_[0];
for (@$vals) {
return [ $key, $val ] if $val eq $_;
}
die "Unrecognised value '$val'. Allowed values: "
. join( ', ', @$vals );
},
);
#===================================
sub _build_JSON {
#===================================
JSON->new->utf8(1);
}
#===================================
sub _build_builder {
#===================================
my $self = shift;
my $class = $self->builder_class
or throw( 'Param', "No builder_class specified" );
eval "require $class; 1"
or throw( 'Internal',
"Couldn't load class $class: " . ( $@ || 'Unknown error' ) );
return $self->{_builder} = $class->new(@_);
}
#===================================
sub request {
#===================================
my ( $self, $params ) = parse_params(@_);
my $result;
eval { $result = $self->transport->perform_request($params); 1 };
if ( my $error = $@ ) {
$error->{-vars} = delete $error->{vars};
die $error;
}
$result
= $params->{post_process}
? $params->{post_process}->($result)
: $result;
return $params->{as_json} ? $self->JSON->encode($result) : $result;
}
#===================================
sub use_index {
#===================================
my $self = shift;
if (@_) {
$self->{_default}{index} = shift;
}
return $self->{_default}{index};
}
#===================================
sub use_type {
#===================================
my $self = shift;
if (@_) {
$self->{_default}{type} = shift;
}
return $self->{_default}{type};
}
#===================================
sub reindex {
#===================================
my ( $self, $params ) = parse_params(@_);
my $source = $params->{source}
or throw( 'Param', 'Missing source param' );
my $transform = $params->{transform} || sub { shift() };
my $verbose = !$params->{quiet};
my $dest_index = $params->{dest_index};
my $bulk_size = $params->{bulk_size} || 1000;
my $method = $params->{_method_name} || 'next';
local $| = $verbose;
printf( "Reindexing %d docs\n", $source->total )
if $verbose;
my @docs;
while (1) {
my $doc = $source->$method();
if ( !$doc or @docs == $bulk_size ) {
my $results = $self->bulk_index(
docs => \@docs,
map { $_ => $params->{$_} } qw(on_conflict on_error),
);
if ( my $err = $results->{errors} ) {
my @errors = splice @$err, 0, 5;
push @errors, sprintf "...and %d more", scalar @$err
if @$err;
throw( 'Request', "Errors occurred while reindexing:",
\@errors );
}
@docs = ();
print "." if $verbose;
}
last unless $doc;
$doc = $transform->($doc) or next;
$doc->{version_type} = 'external'
if defined $doc->{_version};
if ( my $fields = delete $doc->{fields} ) {
$doc->{parent} = $fields->{_parent}
if defined $fields->{_parent};
}
$doc->{_index} = $dest_index
if $dest_index;
push @docs, $doc;
}
print "\nDone\n" if $verbose;
}
#===================================
sub query_parser {
#===================================
require Search::Elasticsearch::Compat::QueryParser;
shift; # drop class/$self
Search::Elasticsearch::Compat::QueryParser->new(@_);
}
##################################
## DOCUMENT MANAGEMENT
##################################
#===================================
sub get {
#===================================
shift()->_do_action(
'get',
{ cmd => CMD_INDEX_type_ID,
qs => {
fields => ['flatten'],
ignore_missing => [ 'boolean', 1 ],
preference => ['string'],
refresh => [ 'boolean', 1 ],
routing => ['string'],
parent => ['string'],
},
},
@_
);
}
#===================================
sub exists : method {
#===================================
shift()->_do_action(
'exists',
{ method => 'HEAD',
cmd => CMD_INDEX_TYPE_ID,
qs => {
preference => ['string'],
refresh => [ 'boolean', 1 ],
routing => ['string'],
parent => ['string'],
},
fixup => sub { $_[1]->{qs}{ignore_missing} = 1 },
post_process => sub { $_[0] ? { ok => 1 } : undef },
},
@_
);
}
#===================================
sub mget {
#===================================
my ( $self, $params ) = parse_params(@_);
$params->{$_} ||= $self->{_default}{$_} for qw(index type);
if ( $params->{index} ) {
if ( my $ids = delete $params->{ids} ) {
throw( 'Param', 'mget',
'Cannot specify both ids and docs in mget()' )
if $params->{docs};
$params->{docs} = [ map { +{ _id => $_ } } @$ids ];
}
}
else {
throw( 'Param',
'Cannot specify a type for mget() without specifying index' )
if $params->{type};
throw( 'Param', 'Use of the ids param with mget() requires an index' )
if $params->{ids};
}
my $filter;
$self->_do_action(
'mget',
{ cmd => [ index => ONE_OPT, type => ONE_OPT ],
postfix => '_mget',
data => { docs => 'docs' },
qs => {
fields => ['flatten'],
filter_missing => [ 'boolean', 1 ],
},
fixup => sub {
$_[1]->{skip} = [] unless @{ $_[1]{body}{docs} };
$filter = delete $_[1]->{qs}{filter_missing};
},
post_process => sub {
my $result = shift;
my $docs = $result->{docs};
return $filter ? [ grep { $_->{exists} } @$docs ] : $docs;
}
},
$params
);
}
my %Index_Defn = (
cmd => CMD_INDEX_TYPE_id,
qs => {
consistency => CONSISTENCY,
create => [ 'boolean', [ op_type => 'create' ] ],
parent => ['string'],
percolate => ['string'],
refresh => [ 'boolean', 1 ],
replication => REPLICATION,
routing => ['string'],
timeout => ['duration'],
timestamp => ['string'],
ttl => ['int'],
version => ['int'],
version_type => [ 'enum', [ 'internal', 'external' ] ],
},
data => { data => 'data' },
fixup => sub {
$_[1]{body} = $_[1]{body}{data};
}
);
#===================================
sub index {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_index( 'index', \%Index_Defn, $params );
}
#===================================
sub set {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_index( 'set', \%Index_Defn, $params );
}
#===================================
sub create {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_index( 'create', \%Index_Defn, { %$params, create => 1 } );
}
#===================================
sub _index {
#===================================
my $self = shift;
$_[1]->{method} = $_[2]->{id} ? 'PUT' : 'POST';
$self->_do_action(@_);
}
#===================================
sub update {
#===================================
shift()->_do_action(
'update',
{ method => 'POST',
cmd => CMD_INDEX_TYPE_ID,
postfix => '_update',
data => {
script => ['script'],
params => ['params'],
doc => ['doc'],
upsert => ['upsert'],
},
qs => {
consistency => CONSISTENCY,
fields => ['flatten'],
ignore_missing => [ 'boolean', 1 ],
parent => ['string'],
percolate => ['string'],
retry_on_conflict => ['int'],
routing => ['string'],
timeout => ['duration'],
replication => REPLICATION,
}
},
@_
);
}
#===================================
sub delete {
#===================================
shift()->_do_action(
'delete',
{ method => 'DELETE',
cmd => CMD_INDEX_TYPE_ID,
qs => {
consistency => CONSISTENCY,
ignore_missing => [ 'boolean', 1 ],
refresh => [ 'boolean', 1 ],
parent => ['string'],
routing => ['string'],
version => ['int'],
replication => REPLICATION,
}
},
@_
);
}
#===================================
sub analyze {
#===================================
shift()->_do_action(
'analyze',
{ method => 'GET',
cmd => CMD_Index,
postfix => '_analyze',
qs => {
text => ['string'],
analyzer => ['string'],
tokenizer => ['string'],
filters => ['flatten'],
field => ['string'],
format => [ 'enum', [ 'detailed', 'text' ] ],
prefer_local => [ 'boolean', undef, 0 ],
}
},
@_
);
}
##################################
## BULK INTERFACE
##################################
#===================================
sub bulk {
#===================================
my $self = shift;
$self->_bulk( 'bulk', $self->_bulk_params( 'actions', @_ ) );
}
#===================================
sub _bulk {
#===================================
my ( $self, $method, $params ) = @_;
my %callbacks;
my $actions = $params->{actions} || [];
$self->_do_action(
$method,
{ cmd => CMD_index_then_type,
method => 'POST',
postfix => '_bulk',
qs => {
consistency => CONSISTENCY,
replication => REPLICATION,
refresh => [ 'boolean', 1 ],
timeout => ['duration'],
on_conflict => ['coderef'],
on_error => ['coderef'],
},
data => { actions => 'actions' },
fixup => sub {
die "Cannot specify type without index"
if $params->{type} && !$params->{index};
$_[1]->{body} = $self->_bulk_request($actions);
$_[1]->{skip} = { actions => [], results => [] }
unless $_[1]->{body};
$callbacks{$_} = delete $_[1]->{qs}{$_}
for qw(on_error on_conflict);
},
post_process => sub {
$self->_bulk_response( \%callbacks, $actions, @_ );
},
},
$params
);
}
#===================================
sub bulk_index { shift->_bulk_action( 'index', @_ ) }
sub bulk_create { shift->_bulk_action( 'create', @_ ) }
sub bulk_delete { shift->_bulk_action( 'delete', @_ ) }
#===================================
#===================================
sub _bulk_action {
#===================================
my $self = shift;
my $action = shift;
my $params = $self->_bulk_params( 'docs', @_ );
$params->{actions}
= [ map { +{ $action => $_ } } @{ delete $params->{docs} } ];
return $self->_bulk( "bulk_$action", $params );
}
#===================================
sub _bulk_params {
#===================================
my $self = shift;
my $key = shift;
return { $key => [], @_ } unless ref $_[0];
return
ref $_[0] eq 'ARRAY' ? { $key => $_[0] } : { $key => [], %{ $_[0] } }
unless @_ > 1;
carp "The method signature for bulk methods has changed. "
. "Please check the docs.";
if ( ref $_[0] eq 'ARRAY' ) {
my $first = shift;
my $params = ref $_[0] ? shift : {@_};
$params->{$key} = $first;
return $params;
}
return { $key => \@_ };
}
my %Bulk_Actions = (
'delete' => {
index => ONE_OPT,
type => ONE_OPT,
id => ONE_REQ,
parent => ONE_OPT,
routing => ONE_OPT,
version => ONE_OPT,
version_type => ONE_OPT,
},
'index' => {
index => ONE_OPT,
type => ONE_OPT,
id => ONE_OPT,
data => ONE_REQ,
routing => ONE_OPT,
parent => ONE_OPT,
percolate => ONE_OPT,
timestamp => ONE_OPT,
ttl => ONE_OPT,
version => ONE_OPT,
version_type => ONE_OPT,
},
);
$Bulk_Actions{create} = $Bulk_Actions{index};
#===================================
sub _bulk_request {
#===================================
my $self = shift;
my $actions = shift;
my $json = $self->JSON;
my $indenting = $json->get_indent;
$json->indent(0);
my $json_docs = '';
my $error;
eval {
for my $data (@$actions) {
die "'actions' must be an ARRAY ref of HASH refs"
unless ref $data eq 'HASH';
my ( $action, $params ) = %$data;
$action ||= '';
my $defn = $Bulk_Actions{$action}
|| die "Unknown action '$action'";
my %metadata;
$params = {%$params};
delete @{$params}{qw(_score sort)};
$params->{data} ||= delete $params->{_source}
if $params->{_source};
for my $key ( keys %$defn ) {
my $val = delete $params->{$key};
$val = delete $params->{"_$key"} unless defined $val;
unless ( defined $val ) {
next if $defn->{$key} == ONE_OPT;
die "Missing required param '$key' for action '$action'";
}
$metadata{"_$key"} = $val;
}
die "Unknown params for bulk action '$action': "
. join( ', ', keys %$params )
if keys %$params;
my $data = delete $metadata{_data};
my $request = $json->encode( { $action => \%metadata } ) . "\n";
if ($data) {
$data = $json->encode($data) if ref $data eq 'HASH';
$request .= $data . "\n";
}
$json_docs .= $request;
}
1;
} or $error = $@ || 'Unknown error';
$json->indent($indenting);
die $error if $error;
return $json_docs;
}
#===================================
sub _bulk_response {
#===================================
my $self = shift;
my $callbacks = shift;
my $actions = shift;
my $results = shift;
my $items = ref($results) eq 'HASH' && $results->{items}
|| throw( 'Request', 'Malformed response to bulk query', $results );
my ( @errors, %matches );
my ( $on_conflict, $on_error ) = @{$callbacks}{qw(on_conflict on_error)};
for ( my $i = 0; $i < @$actions; $i++ ) {
my ( $action, $item ) = ( %{ $items->[$i] } );
if ( my $match = $item->{matches} ) {
push @{ $matches{$_} }, $item for @$match;
}
my $error = $items->[$i]{$action}{error} or next;
if ( $on_conflict
and $error =~ /
VersionConflictEngineException
| DocumentAlreadyExistsException
/x
)
{
$on_conflict->( $action, $actions->[$i]{$action}, $error, $i );
}
elsif ($on_error) {
$on_error->( $action, $actions->[$i]{$action}, $error, $i );
}
else {
push @errors, { action => $actions->[$i], error => $error };
}
}
return {
actions => $actions,
results => $items,
matches => \%matches,
took => $results->{took},
( @errors ? ( errors => \@errors ) : () )
};
}
##################################
## DSL FIXUP
##################################
#===================================
sub _to_dsl {
#===================================
my $self = shift;
my $ops = shift;
my $builder;
foreach my $clause (@_) {
while ( my ( $old, $new ) = each %$ops ) {
my $src = delete $clause->{$old} or next;
die "Cannot specify $old and $new parameters.\n"
if $clause->{$new};
$builder ||= $self->builder;
my $method = $new eq 'query' ? 'query' : 'filter';
my $sub_clause = $builder->$method($src) or next;
$clause->{$new} = $sub_clause->{$method};
}
}
}
#===================================
sub _data_fixup {
#===================================
my $self = shift;
my $data = shift;
$self->_to_dsl( { queryb => 'query', filterb => 'filter' }, $data );
my $facets = $data->{facets} or return;
die "(facets) must be a HASH ref" unless ref $facets eq 'HASH';
$facets = $data->{facets} = {%$facets};
for ( values %$facets ) {
die "All (facets) must be HASH refs" unless ref $_ eq 'HASH';
$_ = my $facet = {%$_};
$self->_to_dsl(
{ queryb => 'query',
filterb => 'filter',
facet_filterb => 'facet_filter'
},
$facet
);
}
}
#===================================
sub _query_fixup {
#===================================
my $self = shift;
my $args = shift;
$self->_to_dsl( { queryb => 'query' }, $args->{body} );
if ( my $query = delete $args->{body}{query} ) {
my ( $k, $v ) = %$query;
$args->{body}{$k} = $v;
}
}
#===================================
sub _warmer_fixup {
#===================================
my ( $self, $args ) = @_;
my $warmers = $args->{body}{warmers} or return;
$warmers = $args->{body}{warmers} = {%$warmers};
for ( values %$warmers ) {
$_ = {%$_};
my $source = $_->{source} or next;
$_->{source} = $source = {%$source};
$self->_data_fixup($source);
}
}
##################################
## QUERIES
##################################
my %Search_Data = (
explain => ['explain'],
facets => ['facets'],
fields => ['fields'],
filter => ['filter'],
filterb => ['filterb'],
from => ['from'],
highlight => ['highlight'],
indices_boost => ['indices_boost'],
min_score => ['min_score'],
script_fields => ['script_fields'],
size => ['size'],
'sort' => ['sort'],
track_scores => ['track_scores'],
);
my %Search_Defn = (
cmd => CMD_index_type,
postfix => '_search',
data => {
%Search_Data,
query => ['query'],
queryb => ['queryb'],
partial_fields => ['partial_fields']
},
qs => {
search_type => SEARCH_TYPE,
ignore_indices => IGNORE_INDICES,
preference => ['string'],
routing => ['flatten'],
timeout => ['duration'],
scroll => ['duration'],
stats => ['flatten'],
version => [ 'boolean', 1 ]
},
fixup => sub { $_[0]->_data_fixup( $_[1]->{body} ) },
);
my %SearchQS_Defn = (
cmd => CMD_index_type,
postfix => '_search',
qs => {
q => ['string'],
df => ['string'],
analyze_wildcard => [ 'boolean', 1 ],
analyzer => ['string'],
default_operator => [ 'enum', [ 'OR', 'AND' ] ],
explain => [ 'boolean', 1 ],
fields => ['flatten'],
from => ['int'],
ignore_indices => IGNORE_INDICES,
lenient => [ 'boolean', 1 ],
lowercase_expanded_terms => [ 'boolean', 1 ],
min_score => ['float'],
preference => ['string'],
quote_analyzer => ['string'],
quote_field_suffix => ['string'],
routing => ['flatten'],
scroll => ['duration'],
search_type => SEARCH_TYPE,
size => ['int'],
'sort' => ['flatten'],
stats => ['flatten'],
timeout => ['duration'],
version => [ 'boolean', 1 ],
},
);
my %Query_Defn = (
data => {
query => ['query'],
queryb => ['queryb'],
},
deprecated => {
bool => ['bool'],
boosting => ['boosting'],
constant_score => ['constant_score'],
custom_score => ['custom_score'],
dis_max => ['dis_max'],
field => ['field'],
field_masking_span => ['field_masking_span'],
filtered => ['filtered'],
flt => [ 'flt', 'fuzzy_like_this' ],
flt_field => [ 'flt_field', 'fuzzy_like_this_field' ],
fuzzy => ['fuzzy'],
has_child => ['has_child'],
ids => ['ids'],
match_all => ['match_all'],
mlt => [ 'mlt', 'more_like_this' ],
mlt_field => [ 'mlt_field', 'more_like_this_field' ],
prefix => ['prefix'],
query_string => ['query_string'],
range => ['range'],
span_first => ['span_first'],
span_near => ['span_near'],
span_not => ['span_not'],
span_or => ['span_or'],
span_term => ['span_term'],
term => ['term'],
terms => [ 'terms', 'in' ],
text => ['text'],
text_phrase => ['text_phrase'],
text_phrase_prefix => ['text_phrase_prefix'],
top_children => ['top_children'],
wildcard => ['wildcard'],
}
);
#===================================
sub search { shift()->_do_action( 'search', \%Search_Defn, @_ ) }
sub searchqs { shift()->_do_action( 'searchqs', \%SearchQS_Defn, @_ ) }
#===================================
#===================================
sub msearch {
#===================================
my ( $self, $params ) = parse_params(@_);
my $queries = $params->{queries} || [];
my $order;
if ( ref $queries eq 'HASH' ) {
$order = {};
my $i = 0;
my @queries;
for ( sort keys %$queries ) {
$order->{$_} = $i++;
push @queries, $queries->{$_};
}
$queries = \@queries;
}
$self->_do_action(
'msearch',
{ cmd => CMD_index_type,
method => 'GET',
postfix => '_msearch',
qs => { search_type => SEARCH_TYPE },
data => { queries => 'queries' },
fixup => sub {
my ( $self, $args ) = @_;
$args->{body} = $self->_msearch_queries($queries);
$args->{skip} = $order ? {} : [] unless $args->{body};
},
post_process => sub {
my $responses = shift->{responses};
return $responses unless $order;
return {
map { $_ => $responses->[ $order->{$_} ] }
keys %$order
};
},
},
$params
);
}
my %MSearch = (
( map { $_ => 'h' } 'index', 'type', keys %{ $Search_Defn{qs} } ),
( map { $_ => 'b' } 'timeout', 'stats',
'version', keys %{ $Search_Defn{data} }
)
);
delete $MSearch{scroll};
#===================================
sub _msearch_queries {
#===================================
my $self = shift;
my $queries = shift;
my $json = $self->JSON;
my $indenting = $json->get_indent;
$json->indent(0);
my $json_docs = '';
my $error;
eval {
for my $query (@$queries) {
die "'queries' must contain HASH refs\n"
unless ref $query eq 'HASH';
my %request = ( h => {}, b => {} );
for ( keys %$query ) {
my $dest = $MSearch{$_}
or die "Unknown param for msearch: $_\n";
$request{$dest}{$_} = $query->{$_};
}
# flatten arrays
for (qw(index type stats routing)) {
$request{h}{$_} = join ",", @{ $request{h}{$_} }
if ref $request{h}{$_} eq 'ARRAY';
}
$self->_data_fixup( $request{b} );
$json_docs .= $json->encode( $request{h} ) . "\n"
. $json->encode( $request{b} ) . "\n";
}
1;
} or $error = $@ || 'Unknown error';
$json->indent($indenting);
die $error if $error;
return $json_docs;
}
#===================================
sub validate_query {
#===================================
shift->_do_action(
'validate_query',
{ cmd => CMD_index_type,
postfix => '_validate/query',
data => {
query => ['query'],
queryb => ['queryb'],
},
qs => {
q => ['string'],
explain => [ 'boolean', 1 ],
ignore_indices => IGNORE_INDICES,
},
fixup => sub {
my $args = $_[1];
if ( defined $args->{qs}{q} ) {
die "Cannot specify q and query/queryb parameters.\n"
if %{ $args->{body} };
delete $args->{body};
}
else {
eval { _query_fixup(@_); 1 } or do {
die $@ if $@ =~ /Cannot specify queryb and query/;
};
}
},
},
@_
);
}
#===================================
sub explain {
#===================================
shift->_do_action(
'explain',
{ cmd => CMD_INDEX_TYPE_ID,
postfix => '_explain',
data => {
query => ['query'],
queryb => ['queryb'],
},
qs => {
preference => ['string'],
routing => ['string'],
q => ['string'],
df => ['string'],
analyzer => ['string'],
analyze_wildcard => [ 'boolean', 1 ],
default_operator => [ 'enum', [ 'OR', 'AND' ] ],
fields => ['flatten'],
lowercase_expanded_terms => [ 'boolean', undef, 0 ],
lenient => [ 'boolean', 1 ],
},
fixup => sub {
my $args = $_[1];
if ( defined $args->{qs}{q} ) {
die "Cannot specify q and query/queryb parameters.\n"
if %{ $args->{body} };
delete $args->{body};
}
else {
$_[0]->_data_fixup( $args->{body} );
}
},
},
@_
);
}
#===================================
sub scroll {
#===================================
shift()->_do_action(
'scroll',
{ cmd => [],
prefix => '_search/scroll',
qs => {
scroll_id => ['string'],
scroll => ['duration'],
}
},
@_
);
}
#===================================
sub scrolled_search {
#===================================
my $self = shift;
require Search::Elasticsearch::Compat::ScrolledSearch;
return Search::Elasticsearch::Compat::ScrolledSearch->new( $self, @_ );
}
#===================================
sub delete_by_query {
#===================================
shift()->_do_action(
'delete_by_query',
{ %Search_Defn,
method => 'DELETE',
postfix => '_query',
qs => {
consistency => CONSISTENCY,
replication => REPLICATION,
routing => ['flatten'],
},
%Query_Defn,
fixup => sub {
_query_fixup(@_);
die "Missing required param 'query' or 'queryb'\n"
unless %{ $_[1]->{body} };
},
},
@_
);
}
#===================================
sub count {
#===================================
shift()->_do_action(
'count',
{ %Search_Defn,
postfix => '_count',
%Query_Defn,
qs => {
routing => ['flatten'],
ignore_indices => IGNORE_INDICES,
},
fixup => sub {
_query_fixup(@_);
delete $_[1]{body} unless %{ $_[1]{body} };
},
},
@_
);
}
#===================================
sub mlt {
#===================================
shift()->_do_action(
'mlt',
{ cmd => CMD_INDEX_TYPE_ID,
method => 'GET',
qs => {
mlt_fields => ['flatten'],
pct_terms_to_match => [ 'float', 'percent_terms_to_match' ],
min_term_freq => ['int'],
max_query_terms => ['int'],
stop_words => ['flatten'],
min_doc_freq => ['int'],
max_doc_freq => ['int'],
min_word_len => ['int'],
max_word_len => ['int'],
boost_terms => ['float'],
routing => ['flatten'],
search_indices => ['flatten'],
search_from => ['int'],
search_size => ['int'],
search_type => SEARCH_TYPE,
search_types => ['flatten'],
search_scroll => ['string'],
},
postfix => '_mlt',
data => {
explain => ['explain'],
facets => ['facets'],
fields => ['fields'],
filter => ['filter'],
filterb => ['filterb'],
highlight => ['highlight'],
indices_boost => ['indices_boost'],
min_score => ['min_score'],
script_fields => ['script_fields'],
'sort' => ['sort'],
track_scores => ['track_scores'],
},
fixup => sub {
shift()->_to_dsl( { filterb => 'filter' }, $_[0]->{body} );
},
},
@_
);
}
##################################
## PERCOLATOR
##################################
#===================================
sub create_percolator {
#===================================
shift()->_do_action(
'create_percolator',
{ cmd => CMD_INDEX_PERC,
prefix => '_percolator',
method => 'PUT',
data => {
query => ['query'],
queryb => ['queryb'],
data => ['data']
},
fixup => sub {
my $self = shift;
my $args = shift;
$self->_to_dsl( { queryb => 'query' }, $args->{body} );
die('create_percolator() requires either the query or queryb param'
) unless $args->{body}{query};
die 'The "data" param cannot include a "query" key'
if $args->{body}{data}{query};
$args->{body} = {
query => $args->{body}{query},
%{ $args->{body}{data} }
};
},
},
@_
);
}
#===================================
sub delete_percolator {
#===================================
shift()->_do_action(
'delete_percolator',
{ cmd => CMD_INDEX_PERC,
prefix => '_percolator',
method => 'DELETE',
qs => { ignore_missing => [ 'boolean', 1 ], }
},
@_
);
}
#===================================
sub get_percolator {
#===================================
shift()->_do_action(
'get_percolator',
{ cmd => CMD_INDEX_PERC,
prefix => '_percolator',
method => 'GET',
qs => { ignore_missing => [ 'boolean', 1 ], },
post_process => sub {
my $result = shift;
return $result
unless ref $result eq 'HASH';
return {
index => $result->{_type},
percolator => $result->{_id},
query => delete $result->{_source}{query},
data => $result->{_source},
};
},
},
@_
);
}
#===================================
sub percolate {
#===================================
shift()->_do_action(
'percolate',
{ cmd => CMD_INDEX_TYPE,
postfix => '_percolate',
method => 'GET',
qs => { prefer_local => [ 'boolean', undef, 0 ] },
data => { doc => 'doc', query => ['query'] },
},
@_
);
}
##################################
## INDEX ADMIN
##################################
#===================================
sub index_status {
#===================================
shift()->_do_action(
'index_status',
{ cmd => CMD_index,
postfix => '_status',
qs => {
recovery => [ 'boolean', 1 ],
snapshot => [ 'boolean', 1 ],
ignore_indices => IGNORE_INDICES,
},
},
@_
);
}
#===================================
sub index_stats {
#===================================
shift()->_do_action(
'index_stats',
{ cmd => CMD_index,
postfix => '_stats',
qs => {
docs => [ 'boolean', 1, 0 ],
store => [ 'boolean', 1, 0 ],
indexing => [ 'boolean', 1, 0 ],
get => [ 'boolean', 1, 0 ],
search => [ 'boolean', 1, 0 ],
clear => [ 'boolean', 1 ],
all => [ 'boolean', 1 ],
merge => [ 'boolean', 1 ],
flush => [ 'boolean', 1 ],
refresh => [ 'boolean', 1 ],
types => ['flatten'],
groups => ['flatten'],
level => [ 'enum', [qw(shards)] ],
ignore_indices => IGNORE_INDICES,
},
},
@_
);
}
#===================================
sub index_segments {
#===================================
shift()->_do_action(
'index_segments',
{ cmd => CMD_index,
postfix => '_segments',
qs => { ignore_indices => IGNORE_INDICES, }
},
@_
);
}
#===================================
sub create_index {
#===================================
shift()->_do_action(
'create_index',
{ method => 'PUT',
cmd => CMD_INDEX,
postfix => '',
data => {
settings => ['settings'],
mappings => ['mappings'],
warmers => ['warmers'],
},
fixup => \&_warmer_fixup
},
@_
);
}
#===================================
sub delete_index {
#===================================
shift()->_do_action(
'delete_index',
{ method => 'DELETE',
cmd => CMD_INDICES,
qs => { ignore_missing => [ 'boolean', 1 ], },
postfix => ''
},
@_
);
}
#===================================
sub index_exists {
#===================================
shift()->_do_action(
'index_exists',
{ method => 'HEAD',
cmd => CMD_index,
fixup => sub { $_[1]->{qs}{ignore_missing} = 1 },
post_process => sub { $_[0] ? { ok => 1 } : undef },
},
@_
);
}
#===================================
sub open_index {
#===================================
shift()->_do_action(
'open_index',
{ method => 'POST',
cmd => CMD_INDEX,
postfix => '_open'
},
@_
);
}
#===================================
sub close_index {
#===================================
shift()->_do_action(
'close_index',
{ method => 'POST',
cmd => CMD_INDEX,
postfix => '_close'
},
@_
);
}
#===================================
sub aliases {
#===================================
my ( $self, $params ) = parse_params(@_);
my $actions = $params->{actions};
if ( defined $actions && ref $actions ne 'ARRAY' ) {
$params->{actions} = [$actions];
}
$self->_do_action(
'aliases',
{ prefix => '_aliases',
method => 'POST',
cmd => [],
data => { actions => 'actions' },
fixup => sub {
my $self = shift;
my $args = shift;
my @actions = @{ $args->{body}{actions} };
for (@actions) {
my ( $key, $value ) = %$_;
$value = {%$value};
$self->_to_dsl( { filterb => 'filter' }, $value );
$_ = { $key => $value };
}
$args->{body}{actions} = \@actions;
},
},
$params
);
}
#===================================
sub get_aliases {
#===================================
shift->_do_action(
'aliases',
{ postfix => '_aliases',
cmd => CMD_index,
qs => { ignore_missing => [ 'boolean', 1 ] },
},
@_
);
}
#===================================
sub create_warmer {
#===================================
shift()->_do_action(
'create_warmer',
{ method => 'PUT',
cmd => CMD_index_type,
postfix => '_warmer/',
data => {
warmer => 'warmer',
facets => ['facets'],
filter => ['filter'],
filterb => ['filterb'],
script_fields => ['script_fields'],
'sort' => ['sort'],
query => ['query'],
queryb => ['queryb'],
},
fixup => sub {
my ( $self, $args ) = @_;
$args->{path} .= delete $args->{body}{warmer};
$self->_data_fixup( $args->{body} );
},
},
@_
);
}
#===================================
sub warmer {
#===================================
my ( $self, $params ) = parse_params(@_);
$params->{warmer} = '*'
unless defined $params->{warmer} and length $params->{warmer};
$self->_do_action(
'warmer',
{ method => 'GET',
cmd => CMD_indices,
postfix => '_warmer/',
data => { warmer => ['warmer'] },
qs => { ignore_missing => [ 'boolean', 1 ] },
fixup => sub {
my ( $self, $args ) = @_;
$args->{path} .= delete $args->{body}{warmer};
},
},
$params
);
}
#===================================
sub delete_warmer {
#===================================
shift()->_do_action(
'delete_warmer',
{ method => 'DELETE',
cmd => CMD_INDICES,
postfix => '_warmer/',
data => { warmer => 'warmer' },
qs => { ignore_missing => [ 'boolean', 1 ] },
fixup => sub {
my ( $self, $args ) = @_;
$args->{path} .= delete $args->{body}{warmer};
},
},
@_
);
}
#===================================
sub create_index_template {
#===================================
shift()->_do_action(
'create_index_template',
{ method => 'PUT',
cmd => CMD_NAME,
prefix => '_template',
data => {
template => 'template',
settings => ['settings'],
mappings => ['mappings'],
warmers => ['warmers'],
order => ['order'],
},
fixup => \&_warmer_fixup
},
@_
);
}
#===================================
sub delete_index_template {
#===================================
shift()->_do_action(
'delete_index_template',
{ method => 'DELETE',
cmd => CMD_NAME,
prefix => '_template',
qs => { ignore_missing => [ 'boolean', 1 ] },
},
@_
);
}
#===================================
sub index_template {
#===================================
shift()->_do_action(
'index_template',
{ method => 'GET',
cmd => CMD_NAME,
prefix => '_template',
},
@_
);
}
#===================================
sub flush_index {
#===================================
shift()->_do_action(
'flush_index',
{ method => 'POST',
cmd => CMD_index,
postfix => '_flush',
qs => {
refresh => [ 'boolean', 1 ],
full => [ 'boolean', 1 ],
ignore_indices => IGNORE_INDICES,
},
},
@_
);
}
#===================================
sub refresh_index {
#===================================
shift()->_do_action(
'refresh_index',
{ method => 'POST',
cmd => CMD_index,
postfix => '_refresh',
qs => { ignore_indices => IGNORE_INDICES, }
},
@_
);
}
#===================================
sub optimize_index {
#===================================
shift()->_do_action(
'optimize_index',
{ method => 'POST',
cmd => CMD_index,
postfix => '_optimize',
qs => {
only_deletes =>
[ 'boolean', [ only_expunge_deletes => 'true' ] ],
max_num_segments => ['int'],
refresh => [ 'boolean', undef, 0 ],
flush => [ 'boolean', undef, 0 ],
wait_for_merge => [ 'boolean', undef, 0 ],
ignore_indices => IGNORE_INDICES,
},
},
@_
);
}
#===================================
sub snapshot_index {
#===================================
shift()->_do_action(
'snapshot_index',
{ method => 'POST',
cmd => CMD_index,
postfix => '_gateway/snapshot',
qs => { ignore_indices => IGNORE_INDICES, }
},
@_
);
}
#===================================
sub gateway_snapshot {
#===================================
shift()->_do_action(
'gateway_snapshot',
{ method => 'POST',
cmd => CMD_index,
postfix => '_gateway/snapshot'
},
@_
);
}
#===================================
sub put_mapping {
#===================================
my ( $self, $params ) = parse_params(@_);
my %defn = (
data => { mapping => 'mapping' },
deprecated => {
dynamic => ['dynamic'],
dynamic_templates => ['dynamic_templates'],
properties => ['properties'],
_all => ['_all'],
_analyzer => ['_analyzer'],
_boost => ['_boost'],
_id => ['_id'],
_index => ['_index'],
_meta => ['_meta'],
_parent => ['_parent'],
_routing => ['_routing'],
_source => ['_source'],
},
);
$defn{deprecated}{mapping} = undef
if !$params->{mapping} && grep { exists $params->{$_} }
keys %{ $defn{deprecated} };
my $type = $params->{type} || $self->{_default}{type};
$self->_do_action(
'put_mapping',
{ method => 'PUT',
cmd => CMD_index_TYPE,
postfix => '_mapping',
qs => { ignore_conflicts => [ 'boolean', 1 ] },
%defn,
fixup => sub {
my $args = $_[1];
my $mapping = $args->{body}{mapping} || $args->{body};
$args->{body} = { $type => $mapping };
},
},
$params
);
}
#===================================
sub delete_mapping {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'delete_mapping',
{ method => 'DELETE',
cmd => CMD_INDICES_TYPE,
qs => { ignore_missing => [ 'boolean', 1 ], }
},
$params
);
}
#===================================
sub mapping {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'mapping',
{ method => 'GET',
cmd => CMD_index_type,
postfix => '_mapping',
qs => { ignore_missing => [ 'boolean', 1 ], }
},
$params
);
}
#===================================
sub type_exists {
#===================================
shift()->_do_action(
'type_exists',
{ method => 'HEAD',
cmd => CMD_index_types,
qs => { ignore_indices => IGNORE_INDICES, },
fixup => sub { $_[1]->{qs}{ignore_missing} = 1 },
post_process => sub { $_[0] ? { ok => 1 } : undef },
},
@_
);
}
#===================================
sub clear_cache {
#===================================
shift()->_do_action(
'clear_cache',
{ method => 'POST',
cmd => CMD_index,
postfix => '_cache/clear',
qs => {
id => [ 'boolean', 1 ],
filter => [ 'boolean', 1 ],
field_data => [ 'boolean', 1 ],
bloom => [ 'boolean', 1 ],
fields => ['flatten'],
ignore_indices => IGNORE_INDICES,
}
},
@_
);
}
#===================================
sub index_settings {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'index_settings',
{ method => 'GET',
cmd => CMD_index,
postfix => '_settings'
},
$params
);
}
#===================================
sub update_index_settings {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'update_index_settings',
{ method => 'PUT',
cmd => CMD_index,
postfix => '_settings',
data => { index => 'settings' }
},
$params
);
}
##################################
## RIVER MANAGEMENT
##################################
#===================================
sub create_river {
#===================================
my ( $self, $params ) = parse_params(@_);
my $type = $params->{type}
or throw( 'Param', 'No river type specified', $params );
my $data = { type => 'type', index => ['index'], $type => [$type] };
$self->_do_action(
'create_river',
{ method => 'PUT',
prefix => '_river',
cmd => CMD_RIVER,
postfix => '_meta',
data => $data
},
$params
);
}
#===================================
sub get_river {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'get_river',
{ method => 'GET',
prefix => '_river',
cmd => CMD_RIVER,
postfix => '_meta',
qs => { ignore_missing => [ 'boolean', 1 ] }
},
$params
);
}
#===================================
sub delete_river {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'delete_river',
{ method => 'DELETE',
prefix => '_river',
cmd => CMD_RIVER,
},
$params
);
}
#===================================
sub river_status {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'river_status',
{ method => 'GET',
prefix => '_river',
cmd => CMD_RIVER,
postfix => '_status',
qs => { ignore_missing => [ 'boolean', 1 ] }
},
$params
);
}
##################################
## CLUSTER MANAGEMENT
##################################
#===================================
sub cluster_state {
#===================================
shift()->_do_action(
'cluster_state',
{ prefix => '_cluster/state',
qs => {
filter_blocks => [ 'boolean', 1 ],
filter_nodes => [ 'boolean', 1 ],
filter_metadata => [ 'boolean', 1 ],
filter_routing_table => [ 'boolean', 1 ],
filter_indices => ['flatten'],
}
},
@_
);
}
#===================================
sub current_server_version {
#===================================
shift()->_do_action(
'current_server_version',
{ cmd => CMD_NONE,
prefix => '',
post_process => sub {
return shift->{version};
},
}
);
}
#===================================
sub nodes {
#===================================
shift()->_do_action(
'nodes',
{ prefix => '_cluster/nodes',
cmd => CMD_nodes,
qs => {
settings => [ 'boolean', 1 ],
http => [ 'boolean', 1 ],
jvm => [ 'boolean', 1 ],
network => [ 'boolean', 1 ],
os => [ 'boolean', 1 ],
process => [ 'boolean', 1 ],
thread_pool => [ 'boolean', 1 ],
transport => [ 'boolean', 1 ],
},
},
@_
);
}
#===================================
sub nodes_stats {
#===================================
shift()->_do_action(
'nodes',
{ prefix => '_cluster/nodes',
postfix => 'stats',
cmd => CMD_nodes,
qs => {
indices => [ 'boolean', 1, 0 ],
clear => [ 'boolean', 1 ],
all => [ 'boolean', 1 ],
fs => [ 'boolean', 1 ],
http => [ 'boolean', 1 ],
jvm => [ 'boolean', 1 ],
network => [ 'boolean', 1 ],
os => [ 'boolean', 1 ],
process => [ 'boolean', 1 ],
thread_pool => [ 'boolean', 1 ],
transport => [ 'boolean', 1 ],
},
},
@_
);
}
#===================================
sub shutdown {
#===================================
shift()->_do_action(
'shutdown',
{ method => 'POST',
prefix => '_cluster/nodes',
cmd => CMD_nodes,
postfix => '_shutdown',
qs => { delay => ['duration'] }
},
@_
);
}
#===================================
sub restart {
#===================================
shift()->_do_action(
'shutdown',
{ method => 'POST',
prefix => '_cluster/nodes',
cmd => CMD_nodes,
postfix => '_restart',
qs => { delay => ['duration'] }
},
@_
);
}
#===================================
sub cluster_health {
#===================================
shift()->_do_action(
'cluster_health',
{ prefix => '_cluster/health',
cmd => CMD_index,
qs => {
level => [ 'enum', [qw(cluster indices shards)] ],
wait_for_status => [ 'enum', [qw(green yellow red)] ],
wait_for_relocating_shards => ['int'],
wait_for_nodes => ['string'],
timeout => ['duration']
}
},
@_
);
}
#===================================
sub cluster_settings {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'cluster_settings',
{ method => 'GET',
cmd => CMD_NONE,
postfix => '_cluster/settings'
},
$params
);
}
#===================================
sub update_cluster_settings {
#===================================
my ( $self, $params ) = parse_params(@_);
$self->_do_action(
'update_cluster_settings',
{ method => 'PUT',
cmd => CMD_NONE,
postfix => '_cluster/settings',
data => {
persistent => ['persistent'],
transient => ['transient']
}
},
$params
);
}
#===================================
sub cluster_reroute {
#===================================
my ( $self, $params ) = parse_params(@_);
$params->{commands} = [ $params->{commands} ]
if $params->{commands} and ref( $params->{commands} ) ne 'ARRAY';
$self->_do_action(
'cluster_reroute',
{ prefix => '_cluster/reroute',
cmd => [],
method => 'POST',
data => { commands => ['commands'] },
qs => { dry_run => [ 'boolean', 1 ], },
},
$params
);
}
##################################
## FLAGS
##################################
#===================================
sub camel_case {
#===================================
my $self = shift;
if (@_) {
if ( shift() ) {
$self->{_base_qs}{case} = 'camelCase';
}
else {
delete $self->{_base_qs}{case};
}
}
return $self->{_base_qs}{case} ? 1 : 0;
}
#===================================
sub error_trace {
#===================================
my $self = shift;
if (@_) {
if ( shift() ) {
$self->{_base_qs}{error_trace} = 'true';
}
else {
delete $self->{_base_qs}{error_trace};
}
}
return $self->{_base_qs}{error_trace} ? 1 : 0;
}
##################################
## INTERNAL
##################################
#===================================
sub parse_request { shift->_doc_action(@_) }
#===================================
#===================================
sub _do_action {
#===================================
my $self = shift;
my $action = shift || '';
my $defn = shift || {};
my $original_params = ref $_[0] eq 'HASH' ? { %{ shift() } } : {@_};
my $error;
my $params = {%$original_params};
my %args = ( method => $defn->{method} || 'GET' );
$args{as_json} = delete $params->{as_json};
eval {
$args{path}
= $self->_build_cmd( $params, @{$defn}{qw(prefix cmd postfix)} );
$args{qs} = $self->_build_qs( $params, $defn->{qs} );
$args{body}
= $self->_build_data( $params, @{$defn}{ 'data', 'deprecated' } );
$args{ignore} = 404 if delete $args{qs}{ignore_missing};
if ( my $fixup = $defn->{fixup} ) {
$fixup->( $self, \%args );
}
die "Unknown parameters: " . join( ', ', keys %$params ) . "\n"
if keys %$params;
1;
} or $error = $@ || 'Unknown error';
$args{post_process} = $defn->{post_process};
if ($error) {
die $error if ref $error;
throw(
'Param',
$error . $self->_usage( $action, $defn ),
{ params => $original_params }
);
}
if ( my $skip = $args{skip} ) {
return $self->_skip_request( $args{as_json}, $skip );
}
$args{serialize} ||= 'std';
$args{mime_type} = 'application/json';
$self->request( \%args );
}
#===================================
sub _skip_request {
#===================================
my $self = shift;
my $as_json = shift;
my $result = shift;
return $result unless $as_json;
return $self->JSON->encode($result);
}
#===================================
sub _usage {
#===================================
my $self = shift;
my $action = shift;
my $defn = shift;
my $usage = "Usage for '$action()':\n";
my @cmd = @{ $defn->{cmd} || [] };
while ( my $key = shift @cmd ) {
my $type = shift @cmd;
my $arg_format
= $type == ONE_REQ ? "\$$key"
: $type == ONE_OPT ? "\$$key"
: "\$$key | [\$${key}_1,\$${key}_n]";
my $required
= ( $type == ONE_REQ or $type == MULTI_REQ )
? 'required'
: 'optional';
$usage .= sprintf( " - %-26s => %-45s # %s\n",
$key, $arg_format, $required );
}
if ( my $data = $defn->{body} ) {
my @keys = sort { $a->[0] cmp $b->[0] }
map { ref $_ ? [ $_->[0], 'optional' ] : [ $_, 'required' ] }
values %$data;
for (@keys) {
$usage .= sprintf(
" - %-26s => %-45s # %s\n",
$_->[0], '{' . $_->[0] . '}',
$_->[1]
);
}
}
if ( my $qs = $defn->{qs} ) {
for ( sort keys %$qs ) {
my $arg_format = $QS_Format{ $qs->{$_}[0] };
my @extra;
$arg_format = $arg_format->( $_, $qs->{$_} )
if ref $arg_format;
if ( length($arg_format) > 45 ) {
( $arg_format, @extra ) = split / [|] /, $arg_format;
}
$usage .= sprintf( " - %-26s => %-45s # optional\n", $_,
$arg_format );
$usage .= ( ' ' x 34 ) . " | $_\n" for @extra;
}
}
return $usage;
}
#===================================
sub _build_qs {
#===================================
my $self = shift;
my $params = shift;
my $defn = shift || {};
my %qs = %{ $self->{_base_qs} };
foreach my $key ( keys %$defn ) {
my ( $format_name, @args ) = @{ $defn->{$key} || [] };
$format_name ||= '';
next unless exists $params->{$key};
my $formatter = $QS_Formatter{$format_name}
or die "Unknown QS formatter '$format_name'";
my $val = $formatter->( $key, delete $params->{$key}, @args )
or next;
$qs{ $val->[0] } = $val->[1];
}
return \%qs;
}
#===================================
sub _build_data {
#===================================
my $self = shift;
my $params = shift;
my $defn = shift or return;
if ( my $deprecated = shift ) {
$defn = { %$defn, %$deprecated };
}
my %data;
KEY: while ( my ( $key, $source ) = each %$defn ) {
next unless defined $source;
if ( ref $source eq 'ARRAY' ) {
foreach (@$source) {
my $val = delete $params->{$_};
next unless defined $val;
$data{$key} = $val;
next KEY;
}
}
else {
$data{$key} = delete $params->{$source}
or die "Missing required param '$source'\n";
}
}
return \%data;
}
#===================================
sub _build_cmd {
#===================================
my $self = shift;
my $params = shift;
my ( $prefix, $defn, $postfix ) = @_;
my @defn = ( @{ $defn || [] } );
my @cmd;
while (@defn) {
my $key = shift @defn;
my $type = shift @defn;
my $val
= exists $params->{$key}
? delete $params->{$key}
: $self->{_default}{$key};
$val = '' unless defined $val;
if ( ref $val eq 'ARRAY' ) {
die "'$key' must be a single value\n"
if $type <= ONE_ALL;
$val = join ',', @$val;
}
unless ( length $val ) {
next if $type == ONE_OPT || $type == MULTI_BLANK;
die "Param '$key' is required\n"
if $type == ONE_REQ || $type == MULTI_REQ;
$val = '_all';
}
push @cmd, uri_escape($val);
}
return join '/', '', grep {defined} ( $prefix, @cmd, $postfix );
}
1;
=pod
=encoding UTF-8
=head1 NAME
Search::Elasticsearch::Client::Compat - The client compatibility layer for migrating from ElasticSearch.pm
=head1 VERSION
version 0.10
=head1 SYNOPSIS
use ElasticSearch::Compat;
my $es = ElasticSearch::Compat->new(
servers => 'search.foo.com:9200', # default '127.0.0.1:9200'
transport => 'http' # default 'http'
| 'httptiny',
trace_calls => 'log_file',
no_refresh => 0 | 1,
);
$es->index(
index => 'twitter',
type => 'tweet',
id => 1,
data => {
user => 'kimchy',
post_date => '2009-11-15T14:12:12',
message => 'trying out Elastic Search'
}
);
$data = $es->get(
index => 'twitter',
type => 'tweet',
id => 1
);
# native elasticsearch query language
$results = $es->search(
index => 'twitter',
type => 'tweet',
query => {
text => { user => 'kimchy' }
}
);
# ElasticSearch::SearchBuilder Perlish query language
$results = $es->search(
index => 'twitter',
type => 'tweet',
queryb => {
message => 'Perl API',
user => 'kimchy',
post_date => {
'>' => '2010-01-01',
'<=' => '2011-01-01',
}
}
);
$dodgy_qs = "foo AND AND bar";
$results = $es->search(
index => 'twitter',
type => 'tweet',
query => {
query_string => {
query => $es->query_parser->filter($dodgy_qs)
},
}
);
=head1 DESCRIPTION
See L<Search::Elasticsearch::Compat> for an explanation of why this module exists.
=head1 CALLING CONVENTIONS
I've tried to follow the same terminology as used in the Elasticsearch docs
when naming methods, so it should be easy to tie the two together.
Some methods require a specific C<index> and a specific C<type>, while others
allow a list of indices or types, or allow you to specify all indices or
types. I distinguish between them as follows:
$es->method( index => multi, type => single, ...)
C<single> values must be a scalar, and are required parameters
type => 'tweet'
C<multi> values can be:
index => 'twitter' # specific index
index => ['twitter','user'] # list of indices
index => undef # (or not specified) = all indices
C<multi_req> values work like C<multi> values, but at least one value is
required, so:
index => 'twitter' # specific index
index => ['twitter','user'] # list of indices
index => '_all' # all indices
index => [] # error
index => undef # error
Also, see L</"use_index()/use_type()">.
=head2 as_json
If you pass C<< as_json => 1 >> to any request to the Elasticsearch server,
it will return the raw UTF8-decoded JSON response, rather than a Perl
datastructure.
=head1 RETURN VALUES AND EXCEPTIONS
Methods that query the Elasticsearch cluster return the raw data structure
that the cluster returns. This may change in the future, but as these
data structures are still in flux, I thought it safer not to try to interpret.
Anything that is known to be an error throws an exception, eg trying to delete
a non-existent index.
=head1 INTEGRATION WITH ElasticSearch::SearchBuilder
L<ElasticSearch::SearchBuilder> provides a concise Perlish
L<SQL::Abstract>-style query language, which gets translated into the native
L<Query DSL|http://www.elasticsearch.org/guide/reference/query-dsl> that
Elasticsearch uses.
For instance:
{
content => 'search keywords',
-filter => {
tags => ['perl','ruby'],
date => {
'>' => '2010-01-01',
'<=' => '2011-01-01'
},
}
}
Would be translated to:
{ query => {
filtered => {
query => { text => { content => "search keywords" } },
filter => {
and => [
{ terms => { tags => ["perl", "ruby"] } },
{ numeric_range => {
date => {
gt => "2010-01-01",
lte => "2011-01-01"
}}},
],
}
}}}
All you have to do to start using L<ElasticSearch::SearchBuilder> is to change
your C<query> or C<filter> parameter to C<queryb> or C<filterb> (where the
extra C<b> stands for C<builder>):
$es->search(
queryb => { content => 'keywords' }
)
If you want to see what your SearchBuilder-style query is being converted into,
you can either use L</"trace_calls()"> or access it directly with:
$native_query = $es->builder->query( $query )
$native_filter = $es->builder->filter( $filter )
See the L<ElasticSearch::SearchBuilder> docs for more information about
the syntax.
=head1 METHODS
=head2 Document-indexing methods
=head3 index()
$result = $es->index(
index => single,
type => single,
id => $document_id, # optional, otherwise auto-generated
data => {
key => value,
...
},
# optional
consistency => 'quorum' | 'one' | 'all',
create => 0 | 1,
parent => $parent,
percolate => $percolate,
refresh => 0 | 1,
replication => 'sync' | 'async',
routing => $routing,
timeout => eg '1m' or '10s'
version => int,
version_type => 'internal' | 'external',
);
eg:
$result = $es->index(
index => 'twitter',
type => 'tweet',
id => 1,
data => {
user => 'kimchy',
post_date => '2009-11-15T14:12:12',
message => 'trying out Elastic Search'
},
);
Used to add a document to a specific C<index> as a specific C<type> with
a specific C<id>. If the C<index/type/id> combination already exists,
then that document is updated, otherwise it is created.
Note:
=over
=item *
If the C<id> is not specified, then Elasticsearch autogenerates a unique
ID and a new document is always created.
=item *
If C<version> is passed, and the current version in Elasticsearch is
different, then a C<Conflict> error will be thrown.
=item *
C<data> can also be a raw JSON encoded string (but ensure that it is correctly
encoded, otherwise you see errors when trying to retrieve it from Elasticsearch).
$es->index(
index => 'foo',
type => 'bar',
id => 1,
data => '{"foo":"bar"}'
);
=item *
C<timeout> for all CRUD methods and L</"search()"> is a query timeout,
specifying the amount of time Elasticsearch will spend (roughly) processing a
query. Units can be concatenated with the integer value, e.g., C<500ms> or
C<1s>.
See also: L<http://www.elasticsearch.org/guide/reference/api/search/request-body.html>
Note: this is distinct from the transport timeout, see L</"timeout()">.
=back
See also: L<http://www.elasticsearch.org/guide/reference/api/index_.html>,
L</"bulk()"> and L</"put_mapping()">
=head3 set()
C<set()> is a synonym for L</"index()">
=head3 create()
$result = $es->create(
index => single,
type => single,
id => $document_id, # optional, otherwise auto-generated
data => {
key => value,
...
},
# optional
consistency => 'quorum' | 'one' | 'all',
parent => $parent,
percolate => $percolate,
refresh => 0 | 1,
replication => 'sync' | 'async',
routing => $routing,
timeout => eg '1m' or '10s',
version => int,
version_type => 'internal' | 'external',
);
eg:
$result = $es->create(
index => 'twitter',
type => 'tweet',
id => 1,
data => {
user => 'kimchy',
post_date => '2009-11-15T14:12:12',
message => 'trying out Elastic Search'
},
);
Used to add a NEW document to a specific C<index> as a specific C<type> with
a specific C<id>. If the C<index/type/id> combination already exists,
then a C<Conflict> error is thrown.
If the C<id> is not specified, then Elasticsearch autogenerates a unique
ID.
If you pass a C<version> parameter to C<create>, then it must be C<0> unless
you also set C<version_type> to C<external>.
See also: L</"index()">
=head3 update()
$result = $es->update(
index => single,
type => single,
id => single,
# required
script => $script,
| doc => $doc
# optional
params => { params },
upsert => { new_doc },
consistency => 'quorum' | 'one' | 'all',
fields => ['_source'],
ignore_missing => 0 | 1,
parent => $parent,
percolate => $percolate,
retry_on_conflict => 2,
routing => $routing,
timeout => '10s',
replication => 'sync' | 'async'
)
The C<update()> method accepts a C<script> to update, or a C<doc> to be merged
with, an existing doc, without having to retrieve and reindex the doc yourself,
eg:
$es->update(
index => 'test',
type => 'foo',
id => 123,
script => 'ctx._source.tags+=[tag]',
params => { tag => 'red' }
);
You can also pass a new doc which will be inserted if the doc does not
already exist, via the C<upsert> paramater.
See L<http://www.elasticsearch.org/guide/reference/api/update.html> for more.
=head3 get()
$result = $es->get(
index => single,
type => single or blank,
id => single,
# optional
fields => 'field' or ['field1',...]
preference => '_local' | '_primary' | '_primary_first' | $string,
refresh => 0 | 1,
routing => $routing,
parent => $parent,
ignore_missing => 0 | 1,
);
Returns the document stored at C<index/type/id> or throws an exception if
the document doesn't exist.
Example:
$es->get( index => 'twitter', type => 'tweet', id => 1)
Returns:
{
_id => 1,
_index => "twitter",
_source => {
message => "trying out Elastic Search",
post_date=> "2009-11-15T14:12:12",
user => "kimchy",
},
_type => "tweet",
}
By default the C<_source> field is returned. Use C<fields> to specify
a list of (stored) fields to return instead, or C<[]> to return no fields.
Pass a true value for C<refresh> to force an index refresh before performing
the get.
If the requested C<index>, C<type> or C<id> is not found, then a C<Missing>
exception is thrown, unless C<ignore_missing> is true.
See also: L</"bulk()">, L<http://www.elasticsearch.org/guide/reference/api/get.html>
=head3 exists()
$bool = $es->exists(
index => single,
type => single,
id => single,
preference => '_local' | '_primary' | '_primary_first' | $string,
refresh => 0 | 1,
routing => $routing,
parent => $parent,
);
Returns true or false depending on whether the doc exists.
=head3 mget()
$docs = $es->mget(
index => single,
type => single or blank,
ids => \@ids,
fields => ['field_1','field_2'],
filter_missing => 0 | 1
);
$docs = $es->mget(
index => single or blank,
type => single or blank,
docs => \@doc_info,
fields => ['field_1','field_2'],
filter_missing => 0 | 1
);
C<mget> or "multi-get" returns multiple documents at once. There are two
ways to call C<mget()>:
If all docs come from the same index (and potentially the same type):
$docs = $es->mget(
index => 'myindex',
type => 'mytype', # optional
ids => [1,2,3],
)
Alternatively you can specify each doc separately:
$docs = $es->mget(
docs => [
{ _index => 'index_1', _type => 'type_1', _id => 1 },
{ _index => 'index_2', _type => 'type_2', _id => 2 },
]
)
Or:
$docs = $es->mget(
index => 'myindex', # default index
type => 'mytype', # default type
fields => ['field_1','field_2'], # default fields
docs => [
{ _id => 1 }, # uses defaults
{ _index => 'index_2',
_type => 'type_2',
_id => 2,
fields => ['field_2','field_3'],
},
]
);
If C<$docs> or C<$ids> is an empty array ref, then C<mget()> will just return
an empty array ref.
Returns an array ref containing all of the documents requested. If a document
is not found, then its entry will include C<< {exists => 0} >>. If you would
rather filter these missing docs, pass C<< filter_missing => 1 >>.
See L<http://www.elasticsearch.org/guide/reference/api/multi-get.html>
=head3 delete()
$result = $es->delete(
index => single,
type => single,
id => single,
# optional
consistency => 'quorum' | 'one' | 'all'
ignore_missing => 0 | 1
refresh => 0 | 1
parent => $parent,
routing => $routing,
replication => 'sync' | 'async'
version => int
);
Deletes the document stored at C<index/type/id> or throws an C<Missing>
exception if the document doesn't exist and C<ignore_missing> is not true.
If you specify a C<version> and the current version of the document is
different (or if the document is not found), a C<Conflict> error will
be thrown.
If C<refresh> is true, an index refresh will be forced after the delete has
completed.
Example:
$es->delete( index => 'twitter', type => 'tweet', id => 1);
See also: L</"bulk()">,
L<http://www.elasticsearch.org/guide/reference/api/delete.html>
=head3 bulk()
$result = $es->bulk( [ actions ] )
$result = $es->bulk(
actions => [ actions ] # required
index => 'foo', # optional
type => 'bar', # optional
consistency => 'quorum' | 'one' | 'all' # optional
refresh => 0 | 1, # optional
replication => 'sync' | 'async', # optional
on_conflict => sub {...} | 'IGNORE' # optional
on_error => sub {...} | 'IGNORE' # optional
);
Perform multiple C<index>, C<create> and C<delete> actions in a single request.
This is about 10x as fast as performing each action in a separate request.
Each C<action> is a HASH ref with a key indicating the action type (C<index>,
C<create> or C<delete>), whose value is another HASH ref containing the
associated metadata.
The C<index> and C<type> parameters can be specified for each individual action,
or inherited from the top level C<index> and C<type> parameters, as shown
above.
NOTE: C<bulk()> also accepts the C<_index>, C<_type>, C<_id>, C<_source>,
C<_parent>, C<_routing> and C<_version> parameters so that you can pass search
results directly to C<bulk()>.
=head4 C<index> and C<create> actions
{ index => {
index => 'foo',
type => 'bar',
id => 123,
data => { text => 'foo bar'},
# optional
routing => $routing,
parent => $parent,
percolate => $percolate,
timestamp => $timestamp,
ttl => $ttl,
version => $version,
version_type => 'internal' | 'external'
}}
{ create => { ... same options as for 'index' }}
The C<index> and C<type> parameters, if not specified, are inherited from
the top level bulk request.
C<data> can also be a raw JSON encoded string (but ensure that it is correctly
encoded, otherwise you see errors when trying to retrieve it from Elasticsearch).
actions => [{
index => {
index => 'foo',
type => 'bar',
id => 1,
data => '{"foo":"bar"}'
}
}]
=head4 C<delete> action
{ delete => {
index => 'foo',
type => 'bar',
id => 123,
# optional
routing => $routing,
parent => $parent,
version => $version,
version_type => 'internal' | 'external'
}}
The C<index> and C<type> parameters, if not specified, are inherited from
the top level bulk request.
=head4 Error handlers
The C<on_conflict> and C<on_error> parameters accept either a coderef or the
string C<'IGNORE'>. Normally, any errors are returned under the C<errors>
key (see L</Return values>).
The logic works as follows:
=over
=item *
If the error is a versioning conflict error, or if you try to C<create> a doc
whose ID already exists, and there is an C<on_conflict>
handler, then call the handler and move on to the next document
=item *
If the error is still unhandled, and we have an C<on_error> handler, then call
it and move on to the next document.
=item *
If no handler exists, then add the error to the C<@errors> array which is
returned by L</bulk()>
=back
Setting C<on_conflict> or C<on_error> to C<'IGNORE'> is the equivalent
of passing an empty no-op handler.
The handler callbacks are called as:
$handler->( $action, $document, $error, $req_no );
For instance:
=over
=item C<$action>
"index"
=item C<$document>
{ id => 1, data => { count => "foo" }}
=item C<$error>
"MapperParsingException[Failed to parse [count]]; ... etc ... "
=item C<$req_no>
0
=back
The C<$req_no> is the array index of the current C<$action> from the original
array of C<@actions>.
=head4 Return values
The L</"bulk()"> method returns a HASH ref containing:
{
actions => [ the list of actions you passed in ],
results => [ the result of each of the actions ],
errors => [ a list of any errors ]
}
The C<results> ARRAY ref contains the same values that would be returned
for individiual C<index>/C<create>/C<delete> statements, eg:
results => [
{ create => { _id => 123, _index => "foo", _type => "bar", _version => 1 } },
{ index => { _id => 123, _index => "foo", _type => "bar", _version => 2 } },
{ delete => { _id => 123, _index => "foo", _type => "bar", _version => 3 } },
]
The C<errors> key is only present if an error has occured and has not been handled
by an C<on_conflict> or C<on_error> handler, so you can do:
$results = $es->bulk(\@actions);
if ($results->{errors}) {
# handle errors
}
Each error element contains the C<error> message plus the C<action> that
triggered the error. Each C<result> element will also contain the error
message., eg:
$result = {
actions => [
## NOTE - num is numeric
{ index => { index => 'bar', type => 'bar', id => 123,
data => { num => 123 } } },
## NOTE - num is a string
{ index => { index => 'bar', type => 'bar', id => 123,
data => { num => 'foo bar' } } },
],
errors => [
{
action => {
index => { index => 'bar', type => 'bar', id => 123,
data => { num => 'text foo' } }
},
error => "MapperParsingException[Failed to parse [num]]; ...",
},
],
results => [
{ index => { _id => 123, _index => "bar", _type => "bar", _version => 1 }},
{ index => {
error => "MapperParsingException[Failed to parse [num]];...",
id => 123, index => "bar", type => "bar",
},
},
],
};
See L<http://www.elasticsearch.org/guide/reference/api/bulk.html> for
more details.
=head3 bulk_index(), bulk_create(), bulk_delete()
These are convenience methods which allow you to pass just the metadata, without
the C<index>, C<create> or C<index> action for each record.
These methods accept the same parameters as the L</"bulk()"> method, except
that the C<actions> parameter is replaced by C<docs>, eg:
$result = $es->bulk_index( [ docs ] );
$result = $es->bulk_index(
docs => [ docs ], # required
index => 'foo', # optional
type => 'bar', # optional
consistency => 'quorum' | 'one' | 'all' # optional
refresh => 0 | 1, # optional
replication => 'sync' | 'async', # optional
on_conflict => sub {...} | 'IGNORE' # optional
on_error => sub {...} | 'IGNORE' # optional
);
For instance:
$es->bulk_index(
index => 'foo',
type => 'bar',
refresh => 1,
docs => [
{ id => 123, data => { text=>'foo'} },
{ id => 124, type => 'baz', data => { text=>'bar'} },
]
);
=head3 reindex()
$es->reindex(
source => $scrolled_search,
# optional
bulk_size => 1000,
dest_index => $index,
quiet => 0 | 1,
transform => sub {....},
on_conflict => sub {...} | 'IGNORE'
on_error => sub {...} | 'IGNORE'
)
C<reindex()> is a utility method which can be used for reindexing data
from one index to another (eg if the mapping has changed), or copying
data from one cluster to another.
=head4 Params
=over
=item *
C<source> is a required parameter, and should be an instance of
L<Search::Elasticsearch::Compat::ScrolledSearch>.
=item *
C<dest_index> is the name of the destination index, ie where the docs are
indexed to. If you are indexing your data from one cluster to another,
and you want to use the same index name in your destination cluster, then
you can leave this blank.
=item *
C<bulk_size> - the number of docs that will be indexed at a time. Defaults
to 1,000
=item *
Set C<quiet> to C<1> if you don't want any progress information to be
printed to C<STDOUT>
=item *
C<transform> should be a sub-ref which will be called for each doc, allowing
you to transform some element of the doc, or to skip the doc by returning
C<undef>.
=item *
See L</Error handlers> for an explanation C<on_conflict> and C<on_error>.
=back
=head4 Examples:
To copy the Elasticsearch website index locally, you could do:
my $local = Search::Elasticsearch::Compat->new(
servers => 'localhost:9200'
);
my $remote = Search::Elasticsearch::Compat->new(
servers => 'search.elasticsearch.org:80',
no_refresh => 1
);
my $source = $remote->scrolled_search(
search_type => 'scan',
scroll => '5m'
);
$local->reindex(source=>$source);
To copy one local index to another, make the title upper case,
exclude docs of type C<boring>, and to preserve the version numbers
from the original index:
my $source = $es->scrolled_search(
index => 'old_index',
search_type => 'scan',
scroll => '5m',
version => 1
);
$es->reindex(
source => $source,
dest_index => 'new_index',
transform => sub {
my $doc = shift;
return if $doc->{_type} eq 'boring';
$doc->{_source}{title} = uc( $doc->{_source}{title} );
return $doc;
}
);
B<NOTE:> If some of your docs have parent/child relationships, and you want
to preserve this relationship, then you should add this to your
scrolled search parameters: C<< fields => ['_source','_parent'] >>.
For example:
my $source = $es->scrolled_search(
index => 'old_index',
search_type => 'scan',
fields => ['_source','_parent'],
version => 1
);
$es->reindex(
source => $source,
dest_index => 'new_index',
);
See also L</"scrolled_search()">, L<Search::Elasticsearch::Compat::ScrolledSearch>,
and L</"search()">.
=head3 analyze()
$result = $es->analyze(
text => $text_to_analyze, # required
index => single, # optional
# either
field => 'type.fieldname', # requires index
analyzer => $analyzer,
tokenizer => $tokenizer,
filters => \@filters,
# other options
format => 'detailed' | 'text',
prefer_local => 1 | 0
);
The C<analyze()> method allows you to see how Elasticsearch is analyzing
the text that you pass in, eg:
$result = $es->analyze( text => 'The Man' )
$result = $es->analyze(
text => 'The Man',
analyzer => 'simple'
);
$result = $es->analyze(
text => 'The Man',
tokenizer => 'keyword',
filters => ['lowercase'],
);
$result = $es->analyze(
text => 'The Man',
index => 'my_index',
analyzer => 'my_custom_analyzer'
);
$result = $es->analyze(
text => 'The Man',
index => 'my_index',
field => 'my_type.my_field',
);
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-analyze.html> for
more.
=head2 Query methods
=head3 search()
$result = $es->search(
index => multi,
type => multi,
# optional
query => { native query },
queryb => { searchbuilder query },
filter => { native filter },
filterb => { searchbuilder filter },
explain => 1 | 0,
facets => { facets },
fields => [$field_1,$field_n],
partial_fields => { my_field => { include => 'foo.bar.*' }},
from => $start_from,
highlight => { highlight }.
ignore_indices => 'none' | 'missing',
indices_boost => { index_1 => 1.5,... },
min_score => $score,
preference => '_local' | '_primary' | '_primary_first' | $string,
routing => [$routing, ...]
script_fields => { script_fields }
search_type => 'dfs_query_then_fetch'
| 'dfs_query_and_fetch'
| 'query_then_fetch'
| 'query_and_fetch'
| 'count'
| 'scan'
size => $no_of_results
sort => ['_score',$field_1]
scroll => '5m' | '30s',
stats => ['group_1','group_2'],
track_scores => 0 | 1,
timeout => '10s'
version => 0 | 1
);
Searches for all documents matching the query, with a request-body search.
Documents can be matched against multiple indices and multiple types, eg:
$result = $es->search(
index => undef, # all
type => ['user','tweet'],
query => { term => {user => 'kimchy' }}
);
You can provide either the C<query> parameter, which uses the native
Elasticsearch Query DSL, or the C<queryb> parameter, which uses the
more concise L<ElasticSearch::SearchBuilder> query syntax.
Similarly, use C<filterb> instead of C<filter>. SearchBuilder can also be
used in facets, for instance, instead of:
$es->search(
facets => {
wow_facet => {
query => { text => { content => 'wow' }},
facet_filter => { term => {status => 'active' }},
}
}
)
You can use:
$es->search(
facets => {
wow_facet => {
queryb => { content => 'wow' }, # note the extra 'b'
facet_filterb => { status => 'active' }, # note the extra 'b'
}
}
)
See L</"INTEGRATION WITH ElasticSearch::SearchBuilder"> for more.
For all of the options that can be included in the native C<query> parameter,
see L<http://www.elasticsearch.org/guide/reference/api/search>,
L<http://www.elasticsearch.org/guide/reference/api/search/request-body.html>
and L<http://www.elasticsearch.org/guide/reference/query-dsl>
=head3 searchqs()
$result = $es->searchqs(
index => multi,
type => multi,
# optional
q => $query_string,
analyze_wildcard => 0 | 1,
analyzer => $analyzer,
default_operator => 'OR | AND ',
df => $default_field,
explain => 1 | 0,
fields => [$field_1,$field_n],
from => $start_from,
ignore_indices => 'none' | 'missing',
lenient => 0 | 1,
lowercase_expanded_terms => 0 | 1,
preference => '_local' | '_primary' | '_primary_first' | $string,
quote_analyzer => $analyzer,
quote_field_suffix => '.unstemmed',
routing => [$routing, ...]
search_type => $search_type
size => $no_of_results
sort => ['_score:asc','last_modified:desc'],
scroll => '5m' | '30s',
stats => ['group_1','group_2'],
timeout => '10s'
version => 0 | 1
Searches for all documents matching the C<q> query_string, with a URI request.
Documents can be matched against multiple indices and multiple types, eg:
$result = $es->searchqs(
index => undef, # all
type => ['user','tweet'],
q => 'john smith'
);
For all of the options that can be included in the C<query> parameter, see
L<http://www.elasticsearch.org/guide/reference/api/search> and
L<http://www.elasticsearch.org/guide/reference/api/search/uri-request.html>.
=head3 scroll()
$result = $es->scroll(
scroll_id => $scroll_id,
scroll => '5m' | '30s',
);
If a search has been executed with a C<scroll> parameter, then the returned
C<scroll_id> can be used like a cursor to scroll through the rest of the
results.
If a further scroll request will be issued, then the C<scroll> parameter
should be passed as well. For instance;
my $result = $es->search(
query=>{match_all=>{}},
scroll => '5m'
);
while (1) {
my $hits = $result->{hits}{hits};
last unless @$hits; # if no hits, we're finished
do_something_with($hits);
$result = $es->scroll(
scroll_id => $result->{_scroll_id},
scroll => '5m'
);
}
See L<http://www.elasticsearch.org/guide/reference/api/search/scroll.html>
=head3 scrolled_search()
C<scrolled_search()> returns a convenience iterator for scrolled
searches. It accepts the standard search parameters that would be passed
to L</"search()"> and requires a C<scroll> parameter, eg:
$scroller = $es->scrolled_search(
query => {match_all=>{}},
scroll => '5m' # keep the scroll request
# live for 5 minutes
);
See L<Search::Elasticsearch::Compat::ScrolledSearch>, L</"search()">, L</"searchqs()">
and L</"scroll()">.
=head3 count()
$result = $es->count(
index => multi,
type => multi,
# optional
routing => [$routing,...]
ignore_indices => 'none' | 'missing',
# one of:
query => { native query },
queryb => { search builder query },
);
Counts the number of documents matching the query. Documents can be matched
against multiple indices and multiple types, eg
$result = $es->count(
index => undef, # all
type => ['user','tweet'],
queryb => { user => 'kimchy' }
);
B<Note>: C<count()> supports L<ElasticSearch::SearchBuilder>-style
queries via the C<queryb> parameter. See
L</"INTEGRATION WITH ElasticSearch::SearchBuilder"> for more details.
C<query> defaults to C<< {match_all=>{}} >> unless specified.
B<DEPRECATION>: C<count()> previously took query types at the top level, eg
C<< $es->count( term=> { ... }) >>. This form still works, but is deprecated.
Instead use the C<queryb> or C<query> parameter as you would in L</"search()">.
See also L</"search()">,
L<http://www.elasticsearch.org/guide/reference/api/count.html>
and L<http://www.elasticsearch.org/guide/reference/query-dsl>
=head3 msearch()
$results = $es->msearch(
index => multi,
type => multi,
queries => \@queries | \%queries,
search_type => $search_type,
);
With L</"msearch()"> you can run multiple searches in parallel. C<queries>
can contain either an array of queries, or a hash of named queries. C<$results>
will return either an array or hash of results, depending on what you pass in.
The top-level C<index>, C<type> and C<search_type> parameters define default
values which will be used for each query, although these can be overridden in
the query parameters:
$results = $es->msearch(
index => 'my_index',
type => 'my_type',
queries => {
first => {
query => { match_all: {}} # my_index/my_type
},
second => {
index => 'other_index',
query => { match_all: {}} # other_index/my_type
},
}
)
In the above example, C<$results> would look like:
{
first => { hits => ... },
second => { hits => ... }
}
A query can contain the following options:
{
index => 'index_name' | ['index_1',...],
type => 'type_name' | ['type_1',...],
query => { native query },
queryb => { search_builder query },
filter => { native filter },
filterb => { search_builder filter },
facets => { facets },
from => 0,
size => 10,
sort => { sort },
highlight => { highlight },
fields => [ 'field1', ... ],
explain => 0 | 1,
indices_boost => { index_1 => 5, ... },
ignore_indices => 'none' | 'missing',
min_score => 2,
partial_fields => { partial fields },
preference => '_local' | '_primary' | '_primary_first' | $string,
routing => 'routing' | ['route_1',...],
script_fields => { script fields },
search_type => $search_type,
stats => 'group_1' | ['group_1','group_2'],
timeout => '30s',
track_scores => 0 | 1,
version => 0 | 1,
}
See L<http://www.elasticsearch.org/guide/reference/api/multi-search.html>.
=head3 delete_by_query()
$result = $es->delete_by_query(
index => multi,
type => multi,
# optional
consistency => 'quorum' | 'one' | 'all'
replication => 'sync' | 'async'
routing => [$routing,...]
# one of:
query => { native query },
queryb => { search builder query },
);
Deletes any documents matching the query. Documents can be matched against
multiple indices and multiple types, eg
$result = $es->delete_by_query(
index => undef, # all
type => ['user','tweet'],
queryb => {user => 'kimchy' },
);
B<Note>: C<delete_by_query()> supports L<ElasticSearch::SearchBuilder>-style
queries via the C<queryb> parameter. See
L</"INTEGRATION WITH ElasticSearch::SearchBuilder"> for more details.
B<DEPRECATION>: C<delete_by_query()> previously took query types at the top level,
eg C<< $es->delete_by_query( term=> { ... }) >>. This form still works, but is
deprecated. Instead use the C<queryb> or C<query> parameter as you would in
L</"search()">.
See also L</"search()">,
L<http://www.elasticsearch.org/guide/reference/api/delete-by-query.html>
and L<http://www.elasticsearch.org/guide/reference/query-dsl>
=head3 mlt()
# mlt == more_like_this
$results = $es->mlt(
index => single, # required
type => single, # required
id => $id, # required
# optional more-like-this params
boost_terms => float
mlt_fields => 'scalar' or ['scalar_1', 'scalar_n']
max_doc_freq => integer
max_query_terms => integer
max_word_len => integer
min_doc_freq => integer
min_term_freq => integer
min_word_len => integer
pct_terms_to_match => float
stop_words => 'scalar' or ['scalar_1', 'scalar_n']
# optional search params
explain => {explain}
facets => {facets}
fields => {fields}
filter => { native filter },
filterb => { search builder filter },
indices_boost => { index_1 => 1.5,... }
min_score => $score
routing => [$routing,...]
script_fields => { script_fields }
search_scroll => '5m' | '10s',
search_indices => ['index1','index2],
search_from => integer,
search_size => integer,
search_type => $search_type
search_types => ['type1','type],
sort => {sort}
scroll => '5m' | '30s'
)
More-like-this (mlt) finds related/similar documents. It is possible to run
a search query with a C<more_like_this> clause (where you pass in the text
you're trying to match), or to use this method, which uses the text of
the document referred to by C<index/type/id>.
This gets transformed into a search query, so all of the search parameters
are also available.
Note: C<mlt()> supports L<ElasticSearch::SearchBuilder>-style filters via
the C<filterb> parameter. See L</"INTEGRATION WITH ElasticSearch::SearchBuilder">
for more details.
See L<http://www.elasticsearch.org/guide/reference/api/more-like-this.html>
and L<http://www.elasticsearch.org/guide/reference/query-dsl/mlt-query.html>
=head3 explain()
$result = $ex->explain(
index => single,
type => single,
id => single,
query => { native query}
| queryb => { search builder query }
| q => $query_string,
analyze_wildcard => 1 | 0,
analyzer => $string,
default_operator => 'OR' | 'AND',
df => $default_field
fields => ['_source'],
lenient => 1 | 0,
lowercase_expanded_terms => 1 | 0,
preference => _local | _primary | _primary_first | $string,
routing => $routing
);
The L<explain()> method is very useful for debugging queries. It will run
the query on the specified document and report whether the document matches
the query or not, and why.
See L<http://www.elasticsearch.org/guide/reference/api/search/explain.html>
=head3 validate_query()
$bool = $es->validate_query(
index => multi,
type => multi,
query => { native query }
| queryb => { search builder query }
| q => $query_string
explain => 0 | 1,
ignore_indices => 'none' | 'missing',
);
Returns a hashref with C<< { valid => 1} >> if the passed in C<query>
(native ES query) C<queryb> (SearchBuilder style query) or C<q> (Lucene
query string) is valid. Otherwise C<valid> is false. Set C<explain> to C<1>
to include the explanation of why the query is invalid.
See L<http://www.elasticsearch.org/guide/reference/api/validate.html>
=head2 Index Admin methods
=head3 index_status()
$result = $es->index_status(
index => multi,
recovery => 0 | 1,
snapshot => 0 | 1,
ignore_indices => 'none' | 'missing',
);
Returns the status of
$result = $es->index_status(); #all
$result = $es->index_status( index => ['twitter','buzz'] );
$result = $es->index_status( index => 'twitter' );
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-status.html>
=head3 index_stats()
$result = $es->index_stats(
index => multi,
types => multi,
docs => 1|0,
store => 1|0,
indexing => 1|0,
get => 1|0,
all => 0|1, # returns all stats
clear => 0|1, # clears default docs,store,indexing,get,search
flush => 0|1,
merge => 0|1
refresh => 0|1,
level => 'shards',
ignore_indices => 'none' | 'missing',
);
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-stats.html>
=head3 index_segments()
$result = $es->index_segments(
index => multi,
ignore_indices => 'none' | 'missing',
);
Returns low-level Lucene segments information for the specified indices.
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-segments.html>
=head3 create_index()
$result = $es->create_index(
index => single,
# optional
settings => {...},
mappings => {...},
warmers => {...},
);
Creates a new index, optionally passing index settings and mappings, eg:
$result = $es->create_index(
index => 'twitter',
settings => {
number_of_shards => 3,
number_of_replicas => 2,
analysis => {
analyzer => {
default => {
tokenizer => 'standard',
char_filter => ['html_strip'],
filter => [qw(standard lowercase stop asciifolding)],
}
}
}
},
mappings => {
tweet => {
properties => {
user => { type => 'string' },
content => { type => 'string' },
date => { type => 'date' }
}
}
},
warmers => {
warmer_1 => {
types => ['tweet'],
source => {
queryb => { date => { gt => '2012-01-01' }},
facets => {
content => {
terms => {
field=>'content'
}
}
}
}
}
}
);
Throws an exception if the index already exists.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-create-index.html>
=head3 delete_index()
$result = $es->delete_index(
index => multi_req,
ignore_missing => 0 | 1 # optional
);
Deletes one or more existing indices, or throws a C<Missing> exception if a
specified index doesn't exist and C<ignore_missing> is not true:
$result = $es->delete_index( index => 'twitter' );
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-delete-index.html>
=head3 index_exists()
$result = $e->index_exists(
index => multi
);
Returns C<< {ok => 1} >> if all specified indices exist, or an empty list
if it doesn't.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-indices-exists.html>
=head3 index_settings()
$result = $es->index_settings(
index => multi,
);
Returns the current settings for all, one or many indices.
$result = $es->index_settings( index=> ['index_1','index_2'] );
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-get-settings.html>
=head3 update_index_settings()
$result = $es->update_index_settings(
index => multi,
settings => { ... settings ...},
);
Update the settings for all, one or many indices. Currently only the
C<number_of_replicas> is exposed:
$result = $es->update_index_settings(
settings => { number_of_replicas => 1 }
);
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-update-settings.html>
=head3 aliases()
$result = $es->aliases( actions => [actions] | {actions} )
Adds or removes an alias for an index, eg:
$result = $es->aliases( actions => [
{ remove => { index => 'foo', alias => 'bar' }},
{ add => { index => 'foo', alias => 'baz' }}
]);
C<actions> can be a single HASH ref, or an ARRAY ref containing multiple HASH
refs.
Note: C<aliases()> supports L<ElasticSearch::SearchBuilder>-style
filters via the C<filterb> parameter. See
L</"INTEGRATION WITH ElasticSearch::SearchBuilder"> for more details.
$result = $es->aliases( actions => [
{ add => {
index => 'foo',
alias => 'baz',
index_routing => '1',
search_routing => '1,2',
filterb => { foo => 'bar' }
}}
]);
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-aliases.html>
=head3 get_aliases()
$result = $es->get_aliases(
index => multi,
ignore_missing => 0 | 1,
);
Returns a hashref listing all indices and their corresponding aliases, eg:
{
"foo" : {
"aliases" : {
"foo_1" : {
"search_routing" : "1,2",
"index_routing" : "1"
"filter" : {
"term" : {
"foo" : "bar"
}
}
},
"foo_2" : {}
}
}
}
If you pass in the optional C<index> argument, which can be an index name
or an alias name, then it will only return the indices related
to that argument.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-aliases.html>
=head3 open_index()
$result = $es->open_index( index => single);
Opens a closed index.
The open and close index APIs allow you to close an index, and later on open
it.
A closed index has almost no overhead on the cluster (except for maintaining
its metadata), and is blocked for read/write operations. A closed index can
be opened which will then go through the normal recovery process.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html> for more
=head3 close_index()
$result = $es->close_index( index => single);
Closes an open index. See
L<http://www.elasticsearch.org/guide/reference/api/admin-indices-open-close.html> for more
=head3 create_index_template()
$result = $es->create_index_template(
name => single,
template => $template, # required
mappings => {...}, # optional
settings => {...}, # optional
warmers => {...}, # optional
order => $order, # optional
);
Index templates allow you to define templates that will automatically be
applied to newly created indices. You can specify both C<settings> and
C<mappings>, and a simple pattern C<template> that controls whether
the template will be applied to a new index.
For example:
$result = $es->create_index_template(
name => 'my_template',
template => 'small_*',
settings => { number_of_shards => 1 }
);
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-templates.html> for more.
=head3 index_template()
$result = $es->index_template(
name => single
);
Retrieves the named index template.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-templates.html#GETting_a_Template>
=head3 delete_index_template()
$result = $es->delete_index_template(
name => single,
ignore_missing => 0 | 1 # optional
);
Deletes the named index template.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-templates.html#Deleting_a_Template>
=head3 flush_index()
$result = $es->flush_index(
index => multi,
full => 0 | 1,
refresh => 0 | 1,
ignore_indices => 'none' | 'missing',
);
Flushes one or more indices, which frees
memory from the index by flushing data to the index storage and clearing the
internal transaction log. By default, Elasticsearch uses memory heuristics
in order to automatically trigger flush operations as required in order to
clear memory.
Example:
$result = $es->flush_index( index => 'twitter' );
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-flush.html>
=head3 refresh_index()
$result = $es->refresh_index(
index => multi,
ignore_indices => 'none' | 'missing',
);
Explicitly refreshes one or more indices, making all operations performed
since the last refresh available for search. The (near) real-time capabilities
depends on the index engine used. For example, the robin one requires
refresh to be called, but by default a refresh is scheduled periodically.
Example:
$result = $es->refresh_index( index => 'twitter' );
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-refresh.html>
=head3 optimize_index()
$result = $es->optimize_index(
index => multi,
only_deletes => 0 | 1, # only_expunge_deletes
flush => 0 | 1, # flush after optmization
refresh => 0 | 1, # refresh after optmization
wait_for_merge => 1 | 0, # wait for merge to finish
max_num_segments => int, # number of segments to optimize to
ignore_indices => 'none' | 'missing',
)
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-optimize.html>
=head3 gateway_snapshot()
$result = $es->gateway_snapshot(
index => multi,
ignore_indices => 'none' | 'missing',
);
Explicitly performs a snapshot through the gateway of one or more indices
(backs them up ). By default, each index gateway periodically snapshot changes,
though it can be disabled and be controlled completely through this API.
Example:
$result = $es->gateway_snapshot( index => 'twitter' );
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-gateway-snapshot.html>
and L<http://www.elasticsearch.org/guide/reference/modules/gateway>
=head3 snapshot_index()
C<snapshot_index()> is a synonym for L</"gateway_snapshot()">
=head3 clear_cache()
$result = $es->clear_cache(
index => multi,
bloom => 0 | 1,
field_data => 0 | 1,
filter => 0 | 1,
id => 0 | 1,
fields => 'field1' | ['field1','fieldn',...],
ignore_indices => 'none' | 'missing',
);
Clears the caches for the specified indices. By default, clears all caches,
but if any of C<id>, C<field>, C<field_data> or C<bloom> are true, then
it clears just the specified caches.
Throws a C<Missing> exception if the specified indices do not exist.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-clearcache.html>
=head2 Mapping methods
=head3 put_mapping()
$result = $es->put_mapping(
index => multi,
type => single,
mapping => { ... } # required
ignore_conflicts => 0 | 1
);
A C<mapping> is the data definition of a C<type>. If no mapping has been
specified, then Elasticsearch tries to infer the types of each field in
document, by looking at its contents, eg
'foo' => string
123 => integer
1.23 => float
However, these heuristics can be confused, so it safer (and much more powerful)
to specify an official C<mapping> instead, eg:
$result = $es->put_mapping(
index => ['twitter','buzz'],
type => 'tweet',
mapping => {
_source => { compress => 1 },
properties => {
user => {type => "string", index => "not_analyzed"},
message => {type => "string", null_value => "na"},
post_date => {type => "date"},
priority => {type => "integer"},
rank => {type => "float"}
}
}
);
See also: L<http://www.elasticsearch.org/guide/reference/api/admin-indices-put-mapping.html>
and L<http://www.elasticsearch.org/guide/reference/mapping>
B<DEPRECATION>: C<put_mapping()> previously took the mapping parameters
at the top level, eg C<< $es->put_mapping( properties=> { ... }) >>.
This form still works, but is deprecated. Instead use the C<mapping>
parameter.
=head3 delete_mapping()
$result = $es->delete_mapping(
index => multi_req,
type => single,
ignore_missing => 0 | 1,
);
Deletes a mapping/type in one or more indices.
See also L<http://www.elasticsearch.org/guide/reference/api/admin-indices-delete-mapping.html>
Throws a C<Missing> exception if the indices or type don't exist and
C<ignore_missing> is false.
=head3 mapping()
$mapping = $es->mapping(
index => single,
type => multi
);
Returns the mappings for all types in an index, or the mapping for the specified
type(s), eg:
$mapping = $es->mapping(
index => 'twitter',
type => 'tweet'
);
$mappings = $es->mapping(
index => 'twitter',
type => ['tweet','user']
);
# { twitter => { tweet => {mapping}, user => {mapping}} }
Note: the index name which as used in the results is the actual index name. If
you pass an alias name as the C<index> name, then this key will be the
index (or indices) that the alias points to.
See also: L<http://www.elasticsearch.org/guide/reference/api/admin-indices-get-mapping.html>
=head3 type_exists()
$result = $e->type_exists(
index => multi, # optional
type => multi, # required
ignore_indices => 'none' | 'missing',
);
Returns C<< {ok => 1} >> if all specified types exist in all specified indices,
or an empty list if they doesn't.
See L<http://www.elasticsearch.org/guide/reference/api/admin-indices-types-exists.html>
=head2 Warmer methods
Index warming allow you to run typical search requests to "warm up"
new segments before they become available for search.
Warmup searches typically include requests that require heavy loading of
data, such as faceting or sorting on specific fields.
=head3 create_warmer()
$es->create_warmer(
warmer => $warmer,
index => multi,
type => multi,
# optional
query => { raw query }
| queryb => { search builder query },
filter => { raw filter }
| filterb => { search builder filter},
facets => { facets },
script_fields => { script fields },
sort => { sort },
);
Create an index warmer called C<$warmer>: a search which is run whenever a
matching C<index>/C<type> segment is about to be brought online.
See L<https://github.com/elasticsearch/elasticsearch/issues/1913> for more.
=head2 warmer()
$result = $es->warmer(
index => multi, # optional
warmer => $warmer, # optional
ignore_missing => 0 | 1
);
Returns any matching registered warmers. The C<$warmer> can be blank,
the name of a particular warmer, or use wilcards, eg C<"warmer_*">. Throws
an error if no matching warmer is found, and C<ignore_missing> is false.
See L<https://github.com/elasticsearch/elasticsearch/issues/1913> for more.
=head2 delete_warmer()
$result = $es->delete_warmer(
index => multi, # required
warmer => $warmer, # required
ignore_missing => 0 | 1
);
Deletes any matching registered warmers. The C<index> parameter is
required and can be set to C<_all> to match all indices. The C<$warmer> can be
the name of a particular warmer, or use wilcards, eg C<"warmer_*">
or C<"*"> for any warmer. Throws an error if no matching warmer is found,
and C<ignore_missing> is false.
See L<https://github.com/elasticsearch/elasticsearch/issues/1913> for more.
=head2 River admin methods
See L<http://www.elasticsearch.org/guide/reference/river/>
and L<http://www.elasticsearch.org/guide/reference/river/twitter.html>.
=head3 create_river()
$result = $es->create_river(
river => $river_name, # required
type => $type, # required
$type => {...}, # depends on river type
index => {...}, # depends on river type
);
Creates a new river with name C<$name>, eg:
$result = $es->create_river(
river => 'my_twitter_river',
type => 'twitter',
twitter => {
user => 'user',
password => 'password',
},
index => {
index => 'my_twitter_index',
type => 'status',
bulk_size => 100
}
)
=head3 get_river()
$result = $es->get_river(
river => $river_name,
ignore_missing => 0 | 1 # optional
);
Returns the river details eg
$result = $es->get_river ( river => 'my_twitter_river' )
Throws a C<Missing> exception if the river doesn't exist and C<ignore_missing>
is false.
=head3 delete_river()
$result = $es->delete_river( river => $river_name );
Deletes the corresponding river, eg:
$result = $es->delete_river ( river => 'my_twitter_river' )
See L<http://www.elasticsearch.org/guide/reference/river/>.
=head3 river_status()
$result = $es->river_status(
river => $river_name,
ignore_missing => 0 | 1 # optional
);
Returns the status doc for the named river.
Throws a C<Missing> exception if the river doesn't exist and C<ignore_missing>
is false.
=head2 Percolate methods
See also: L<http://www.elasticsearch.org/guide/reference/api/percolate.html>
and L<http://www.elasticsearch.org/blog/2011/02/08/percolator.html>
=head3 create_percolator()
$es->create_percolator(
index => single
percolator => $percolator
# one of queryb or query is required
query => { native query }
queryb => { search builder query }
# optional
data => {data}
)
Create a percolator, eg:
$es->create_percolator(
index => 'myindex',
percolator => 'mypercolator',
queryb => { field => 'foo' },
data => { color => 'blue' }
)
Note: C<create_percolator()> supports L<ElasticSearch::SearchBuilder>-style
queries via the C<queryb> parameter. See
L</"INTEGRATION WITH ElasticSearch::SearchBuilder"> for more details.
=head3 get_percolator()
$es->get_percolator(
index => single
percolator => $percolator,
ignore_missing => 0 | 1,
)
Retrieves a percolator, eg:
$es->get_percolator(
index => 'myindex',
percolator => 'mypercolator',
)
Throws a C<Missing> exception if the specified index or percolator does not exist,
and C<ignore_missing> is false.
=head3 delete_percolator()
$es->delete_percolator(
index => single
percolator => $percolator,
ignore_missing => 0 | 1,
)
Deletes a percolator, eg:
$es->delete_percolator(
index => 'myindex',
percolator => 'mypercolator',
)
Throws a C<Missing> exception if the specified index or percolator does not exist,
and C<ignore_missing> is false.
=head3 percolate()
$result = $es->percolate(
index => single,
type => single,
doc => { doc to percolate },
# optional
query => { query to filter percolators },
prefer_local => 1 | 0,
)
Check for any percolators which match a document, optionally filtering
which percolators could match by passing a C<query> param, for instance:
$result = $es->percolate(
index => 'myindex',
type => 'mytype',
doc => { text => 'foo' },
query => { term => { color => 'blue' }}
);
Returns:
{
ok => 1,
matches => ['mypercolator']
}
=head2 Cluster admin methods
=head3 cluster_state()
$result = $es->cluster_state(
# optional
filter_blocks => 0 | 1,
filter_nodes => 0 | 1,
filter_metadata => 0 | 1,
filter_routing_table => 0 | 1,
filter_indices => [ 'index_1', ... 'index_n' ],
);
Returns cluster state information.
See L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-state.html>
=head3 cluster_health()
$result = $es->cluster_health(
index => multi,
level => 'cluster' | 'indices' | 'shards',
timeout => $seconds
wait_for_status => 'red' | 'yellow' | 'green',
| wait_for_relocating_shards => $number_of_shards,
| wait_for_nodes => eg '>=2',
);
Returns the status of the cluster, or index|indices or shards, where the
returned status means:
=over
=item C<red>: Data not allocated
=item C<yellow>: Primary shard allocated
=item C<green>: All shards allocated
=back
It can block to wait for a particular status (or better), or can block to
wait until the specified number of shards have been relocated (where 0 means
all) or the specified number of nodes have been allocated.
If waiting, then a timeout can be specified.
For example:
$result = $es->cluster_health( wait_for_status => 'green', timeout => '10s')
See: L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-health.html>
=head3 cluster_settings()
$result = $es->cluster_settings()
Returns any cluster wide settings that have been set with
L</"update_cluster_settings">.
See L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-update-settings.html>
=head3 update_cluster_settings()
$result = $es->update_cluster_settings(
persistent => {...},
transient => {...},
)
For example:
$result = $es->update_cluster_settings(
persistent => {
"discovery.zen.minimum_master_nodes" => 2
},
)
C<persistent> settings will survive a full cluster restart. C<transient>
settings won't.
See L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-update-settings.html>
=head3 nodes()
$result = $es->nodes(
nodes => multi,
settings => 0 | 1,
http => 0 | 1,
jvm => 0 | 1,
network => 0 | 1,
os => 0 | 1,
process => 0 | 1,
thread_pool => 0 | 1,
transport => 0 | 1
);
Returns information about one or more nodes or servers in the cluster.
See: L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-nodes-info.html>
=head3 nodes_stats()
$result = $es->nodes_stats(
node => multi,
indices => 1 | 0,
clear => 0 | 1,
all => 0 | 1,
fs => 0 | 1,
http => 0 | 1,
jvm => 0 | 1,
network => 0 | 1,
os => 0 | 1,
process => 0 | 1,
thread_pool => 0 | 1,
transport => 0 | 1,
);
Returns various statistics about one or more nodes in the cluster.
See: L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-nodes-stats.html>
=head3 cluster_reroute()
$result = $es->cluster_reroute(
commands => [
{ move => {
index => 'test',
shard => 0,
from_node => 'node1',
to_node => 'node2',
}},
{ allocate => {
index => 'test',
shard => 1,
node => 'node3',
allow_primary => 0 | 1
}},
{ cancel => {
index => 'test',
shard => 2,
node => 'node4',
allow_primary => 0 | 1
}},
],
dry_run => 0 | 1
);
The L</cluster_reroute> command allows you to explicitly affect shard allocation
within a cluster. For example, a shard can be moved from one node to another,
an allocation can be cancelled, or an unassigned shard can be explicitly
allocated on a specific node.
B<NOTE:> after executing the commands, the cluster will automatically
rebalance itself if it is out of balance. Use the C<dry_run> parameter
to see what the final outcome will be after automatic rebalancing, before
executing the real L</cluster_reroute> call.
Without any C<\@commands>, the current cluster routing will be returned.
See L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-reroute.html>
=head3 shutdown()
$result = $es->shutdown(
node => multi,
delay => '5s' | '10m' # optional
);
Shuts down one or more nodes (or the whole cluster if no nodes specified),
optionally with a delay.
C<node> can also have the values C<_local>, C<_master> or C<_all>.
See: L<http://www.elasticsearch.org/guide/reference/api/admin-cluster-nodes-shutdown.html>
=head3 restart()
$result = $es->restart(
node => multi,
delay => '5s' | '10m' # optional
);
Restarts one or more nodes (or the whole cluster if no nodes specified),
optionally with a delay.
C<node> can also have the values C<_local>, C<_master> or C<_all>.
See: L</"KNOWN ISSUES">
=head3 current_server_version()
$version = $es->current_server_version()
Returns a HASH containing the version C<number> string and
whether or not the current server is a C<snapshot_build>.
=head2 Other methods
=head3 use_index()/use_type()
C<use_index()> and C<use_type()> can be used to set default values for
any C<index> or C<type> parameter. The default value can be overridden
by passing a parameter (including C<undef>) to any request.
$es->use_index('one');
$es->use_type(['foo','bar']);
$es->index( # index: one, types: foo,bar
data=>{ text => 'my text' }
);
$es->index( # index: two, type: foo,bar
index=>'two',
data=>{ text => 'my text' }
)
$es->search( type => undef ); # index: one, type: all
=head3 trace_calls()
$es->trace_calls(1); # log to STDERR
$es->trace_calls($filename); # log to $filename.$PID
$es->trace_calls(\*STDOUT); # log to STDOUT
$es->trace_calls($fh); # log to given filehandle
$es->trace_calls(0 | undef); # disable logging
C<trace_calls()> is used for debugging. All requests to the cluster
are logged either to C<STDERR>, or the specified filehandle,
or the specified filename, with the
current C<$PID> appended, in a form that can be rerun with curl.
The cluster response will also be logged, and commented out.
Example: C<< $es->cluster_health >> is logged as:
# [Tue Oct 19 15:32:31 2010] Protocol: http, Server: 127.0.0.1:9200
curl -XGET 'http://127.0.0.1:9200/_cluster/health'
# [Tue Oct 19 15:32:31 2010] Response:
# {
# "relocating_shards" : 0,
# "active_shards" : 0,
# "status" : "green",
# "cluster_name" : "elasticsearch",
# "active_primary_shards" : 0,
# "timed_out" : false,
# "initializing_shards" : 0,
# "number_of_nodes" : 1,
# "unassigned_shards" : 0
# }
=head3 query_parser()
$qp = $es->query_parser(%opts);
Returns an L<Search::Elasticsearch::Compat::QueryParser> object for tidying up
query strings so that they won't cause an error when passed to Elasticsearch.
See L<Search::Elasticsearch::Compat::QueryParser> for more information.
=head3 transport()
$transport = $es->transport
Returns the Transport object, eg L<Search::Elasticsearch::Compat::Transport::HTTP>.
=head3 timeout()
$timeout = $es->timeout($timeout)
Convenience method which does the same as:
$es->transport->timeout($timeout)
=head3 refresh_servers()
$es->refresh_servers()
Convenience method which does the same as:
$es->transport->refresh_servers()
This tries to retrieve a list of all known live servers in the Elasticsearch
cluster by connecting to each of the last known live servers (and the initial
list of servers passed to C<new()>) until it succeeds.
This list of live servers is then used in a round-robin fashion.
C<refresh_servers()> is called on the first request and every C<max_requests>.
This automatic refresh can be disabled by setting C<max_requests> to C<0>:
$es->transport->max_requests(0)
Or:
$es = Search::Elasticsearch::Compat->new(
servers => '127.0.0.1:9200',
max_requests => 0,
);
=head3 builder_class() | builder()
The C<builder_class> is set to L<ElasticSearch::SearchBuilder> by default.
This can be changed, eg:
$es = Search::Elasticsearch::Compat->new(
servers => '127.0.0.1:9200',
builder_class => 'My::Builder'
);
C<builder()> will C<require> the module set in C<builder_class()>, create
an instance, and store that instance for future use. The C<builder_class>
should implement the C<filter()> and C<query()> methods.
=head3 camel_case()
$bool = $es->camel_case($bool)
Gets/sets the camel_case flag. If true, then all JSON keys returned by
Elasticsearch are in camelCase, instead of with_underscores. This flag
does not apply to the source document being indexed or fetched.
Defaults to false.
=head3 error_trace()
$bool = $es->error_trace($bool)
If the Elasticsearch server is returning an error, setting C<error_trace>
to true will return some internal information about where the error originates.
Mostly useful for debugging.
=head1 AUTHOR
Clinton Gormley, C<< <drtech at cpan.org> >>
=head1 KNOWN ISSUES
=over
=item L</"get()">
The C<_source> key that is returned from a L</"get()"> contains the original JSON
string that was used to index the document initially. Elasticsearch parses
JSON more leniently than L<JSON::XS>, so if invalid JSON is used to index the
document (eg unquoted keys) then C<< $es->get(....) >> will fail with a
JSON exception.
Any documents indexed via this module will be not susceptible to this problem.
=item L</"restart()">
C<restart()> is currently disabled in Elasticsearch as it doesn't work
correctly. Instead you can L</"shutdown()"> one or all nodes and then
start them up from the command line.
=back
=head1 AUTHOR
Clinton Gormley <drtech@cpan.org>
=head1 COPYRIGHT AND LICENSE
This software is Copyright (c) 2014 by Elasticsearch BV.
This is free software, licensed under:
The Apache License, Version 2.0, January 2004
=cut
__END__
# ABSTRACT: The client compatibility layer for migrating from ElasticSearch.pm
| gitpan/Search-Elasticsearch-Compat | lib/Search/Elasticsearch/Client/Compat.pm | Perl | apache-2.0 | 141,288 |
=head1 LICENSE
See the NOTICE file distributed with this work for additional information
regarding copyright ownership.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a $rows = copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=head1 NAME
Bio::EnsEMBL::Compara::RunnableDB::CopyMembersByGenomeDB
=head1 DESCRIPTION
This module imports all the members (and their sequences and hmm-hits) for a given genome_db_id.
=head1 APPENDIX
The rest of the documentation details each of the object methods.
Internal methods are usually preceded with a _
=cut
package Bio::EnsEMBL::Compara::RunnableDB::CopyMembersByGenomeDB;
use strict;
use warnings;
use Bio::EnsEMBL::Compara::DBSQL::DBAdaptor;
use Bio::EnsEMBL::Compara::Utils::CopyData qw(:table_copy);
use base ('Bio::EnsEMBL::Compara::RunnableDB::BaseRunnable');
sub fetch_input {
my $self = shift;
my $reuse_dba = $self->get_cached_compara_dba('reuse_db');
die $self->param('reuse_db').' cannot be found' unless $reuse_dba;
$self->param('reuse_dba', $reuse_dba);
$self->param_required('genome_db_id');
}
sub run {
my $self = shift;
$self->_copy_data_wrapper('dnafrag', 'SELECT * FROM dnafrag');
$self->_copy_data_wrapper('gene_member', 'SELECT * FROM gene_member');
$self->_copy_data_wrapper('sequence', 'SELECT sequence.* FROM seq_member JOIN sequence USING (sequence_id)');
$self->_copy_data_wrapper('seq_member', 'SELECT * FROM seq_member');
$self->_copy_data_wrapper('other_member_sequence', 'SELECT other_member_sequence.* FROM seq_member JOIN other_member_sequence USING (seq_member_id)');
$self->_copy_data_wrapper('exon_boundaries', 'SELECT exon_boundaries.* FROM seq_member JOIN exon_boundaries USING (seq_member_id)');
$self->_copy_data_wrapper('hmm_annot', 'SELECT hmm_annot.* FROM seq_member JOIN hmm_annot USING (seq_member_id)');
$self->_copy_data_wrapper('seq_member_projection_stable_id', 'SELECT seq_member_projection_stable_id.* FROM seq_member JOIN seq_member_projection_stable_id ON seq_member_id = target_seq_member_id');
}
sub _copy_data_wrapper {
my ($self, $table, $input_query, $genome_db_id_prefix) = @_;
my $genome_db_id = $self->param('genome_db_id');
my $from_dbc = $self->param('reuse_dba')->dbc;
my $to_dbc = $self->compara_dba->dbc;
# We add the genome_db_id filter
if ($input_query =~ /\bwhere\b/i) {
$input_query .= ' AND '
} else {
$input_query .= ' WHERE '
}
$input_query .= ($genome_db_id_prefix // '') . 'genome_db_id = '.$genome_db_id;
# The extra arguments tell copy_data *not* to disable and enable keys
# since there is too little data to copy to make it worth
copy_data($from_dbc, $to_dbc, $table, $input_query, undef, 'skip_disable_vars', $self->debug);
}
1;
| Ensembl/ensembl-compara | modules/Bio/EnsEMBL/Compara/RunnableDB/CopyMembersByGenomeDB.pm | Perl | apache-2.0 | 3,488 |
#
# Copyright 2018 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package centreon::common::powershell::exchange::2010::activesyncmailbox;
use strict;
use warnings;
use centreon::plugins::misc;
use centreon::common::powershell::exchange::2010::powershell;
sub get_powershell {
my (%options) = @_;
# options: no_ps
my $no_ps = (defined($options{no_ps})) ? 1 : 0;
my $no_trust_ssl = (defined($options{no_trust_ssl})) ? '' : '-TrustAnySSLCertificate';
return '' if ($no_ps == 1);
my $ps = centreon::common::powershell::exchange::2010::powershell::powershell_init(%options);
$ps .= '
try {
$ErrorActionPreference = "Stop"
$username = "' . $options{mailbox} . '"
$password = "' . $options{password} . '"
$secstr = New-Object -TypeName System.Security.SecureString
$password.ToCharArray() | ForEach-Object {$secstr.AppendChar($_)}
$cred = new-object -typename System.Management.Automation.PSCredential -argumentlist $username, $secstr
$results = Test-ActiveSyncConnectivity -MailboxCredential $cred ' . $no_trust_ssl . '
} catch {
Write-Host $Error[0].Exception
exit 1
}
Foreach ($result in $results) {
Write-Host "[scenario=" $result.Scenario "][result=" $result.Result "][latency=" $result.Latency.TotalMilliseconds "][[error=" $Result.Error "]]"
}
exit 0
';
return centreon::plugins::misc::powershell_encoded($ps);
}
sub check {
my ($self, %options) = @_;
# options: stdout
# Following output:
#[scenario= Options ][result= Failure ][latency= 52,00 ][[error=...]]
$self->{output}->output_add(severity => 'OK',
short_msg => "ActiveSync to '" . $options{mailbox} . "' is ok.");
my $checked = 0;
$self->{output}->output_add(long_msg => $options{stdout});
while ($options{stdout} =~ /\[scenario=(.*?)\]\[result=(.*?)\]\[latency=(.*?)\]\[\[error=(.*?)\]\]/msg) {
$self->{data} = {};
($self->{data}->{scenario}, $self->{data}->{result}, $self->{data}->{latency}, $self->{data}->{error}) =
($self->{output}->to_utf8($1), centreon::plugins::misc::trim($2),
centreon::plugins::misc::trim($3), centreon::plugins::misc::trim($4));
$checked++;
my ($status, $message) = ('ok');
eval {
local $SIG{__WARN__} = sub { $message = $_[0]; };
local $SIG{__DIE__} = sub { $message = $_[0]; };
if (defined($self->{option_results}->{critical}) && $self->{option_results}->{critical} ne '' &&
eval "$self->{option_results}->{critical}") {
$status = 'critical';
} elsif (defined($self->{option_results}->{warning}) && $self->{option_results}->{warning} ne '' &&
eval "$self->{option_results}->{warning}") {
$status = 'warning';
}
};
if (defined($message)) {
$self->{output}->output_add(long_msg => 'filter status issue: ' . $message);
}
if (!$self->{output}->is_status(value => $status, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $status,
short_msg => sprintf("ActiveSync scenario '%s' to '%s' is '%s'",
$self->{data}->{scenario}, $options{mailbox}, $self->{data}->{result}));
}
if ($self->{data}->{latency} =~ /^(\d+)/) {
$self->{output}->perfdata_add(label => $self->{data}->{scenario}, unit => 's',
value => sprintf("%.3f", $1 / 1000),
min => 0);
}
}
if ($checked == 0) {
$self->{output}->output_add(severity => 'UNKNOWN',
short_msg => 'Cannot find informations');
}
}
1;
__END__
=head1 DESCRIPTION
Method to check Exchange 2010 activesync on a specific mailbox.
=cut | wilfriedcomte/centreon-plugins | centreon/common/powershell/exchange/2010/activesyncmailbox.pm | Perl | apache-2.0 | 4,709 |
# Copyright Yahoo. Licensed under the terms of the Apache 2.0 license. See LICENSE in the project root.
#
# Defines structs to represent a cluster state
#
package Yahoo::Vespa::ClusterState;
use strict;
use warnings;
use Class::Struct;
struct( ClusterState => {
globalState => '$', # A state primitive
distributor => '%', # Index to Node map
storage => '%' # Index to Node map
});
struct( Node => {
group => '$', # Hierarchical group node belongs to
unit => 'State',
generated => 'State',
user => 'State',
partition => '%'
});
struct( Partition => {
generated => 'State',
bucketcount => '$',
doccount => '$',
totaldocsize => '$'
});
struct( State => {
state => '$', # A state primitive
reason => '$', # Textual reason for it to be set.
timestamp => '$', # Timestamp of the time it got set.
source => '$' # What type of state is it (unit/generated/user)
});
return 1;
sub legalState { # (State) -> Bool
my ($state) = @_;
return ($state =~ /^(up|down|maintenance|retired|stopping|initializing)$/);
}
| vespa-engine/vespa | vespaclient/src/perl/lib/Yahoo/Vespa/ClusterState.pm | Perl | apache-2.0 | 1,102 |
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <dev@ensembl.org>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=cut
#
# Ensembl module for Bio::EnsEMBL::Variation::DBSQL::VariationSetAdaptor
#
# Copyright (c) 2010 Ensembl
#
# You may distribute this module under the same terms as perl itself
#
#
=head1 NAME
Bio::EnsEMBL::Variation::DBSQL::VariationSetAdaptor
=head1 SYNOPSIS
$db = Bio::EnsEMBL::Variation::DBSQL::DBAdaptor->new(...);
$vsa = $db->get_VariationSetAdaptor();
# retrieve a variation set by its name
$vs = $vsa->fetch_by_name('Phenotype-associated variations');
# retrieve a variation set by its internal identifier
$vs = $vsa->fetch_by_dbID(12);
# retrieve all variation sets which a variation is a part of
@vs = @{$vsa->fetch_all_by_Variation($var)};
=head1 DESCRIPTION
This adaptor provides database connectivity for VariationSet objects.
VariationSets may be retrieved from the Ensembl variation database by
several means using this module.
=head1 METHODS
=cut
use strict;
use warnings;
package Bio::EnsEMBL::Variation::DBSQL::VariationSetAdaptor;
use Bio::EnsEMBL::DBSQL::BaseAdaptor;
use Bio::EnsEMBL::Utils::Exception qw(throw warning);
use Bio::EnsEMBL::Utils::Scalar qw(assert_ref wrap_array);
use Bio::EnsEMBL::Variation::VariationSet;
our @ISA = ('Bio::EnsEMBL::DBSQL::BaseAdaptor');
our $MAX_VARIATION_SET_ID = 64;
=head2 fetch_all_top_VariationSets
Example : $vs = $vs_adaptor->fetch_all_top_VariationSets();
Description: Retrieves all VariationSet objects that are 'toplevel',
i.e. they are not subsets of any other variation set.
Returntype : istref of Bio::EnsEMBL::Variation::VariationSet
Exceptions : none
Caller : general
Status : At Risk
=cut
sub fetch_all_top_VariationSets {
my $self = shift;
#ÊAdd a constraint to only get the sets that don't have any parent sets
my $constraint = qq{
NOT EXISTS (
SELECT
*
FROM
variation_set_structure vss
WHERE
vss.variation_set_sub = vs.variation_set_id
)
};
#ÊGet the results from generic fetch method
return $self->generic_fetch($constraint);
}
=head2 fetch_all_by_sub_VariationSet
Arg [1] : Bio::EnsEMBL::Variation::VariationSet $sub
Arg [2] : (optional) boolean $only_immediate
If true, only the direct supersets of this variation set will be fetched. The default behaviour is
to recursively fetch all supersets.
Example : @vs_supersets = @{$vs_adaptor->fetch_all_by_sub_VariationSet($vs)};
Description: Retrieves all VariationSets that are direct supersets of the specified VariationSet.
Returntype : listref of Bio::EnsEMBL::Variation::VariationSet
Exceptions : throw if sub arg is not valid
Caller : general
Status : At Risk
=cut
sub fetch_all_by_sub_VariationSet {
my $self = shift;
my $set = shift;
my $only_immediate = shift;
#ÊCheck the input set
assert_ref($set,'Bio::EnsEMBL::Variation::VariationSet');
# First, get all VariationSets that are direct supersets of this one
my $dbID = $set->dbID();
my $stmt = qq{
SELECT
vss.variation_set_super
FROM
variation_set_structure vss
WHERE
vss.variation_set_sub = ?
};
my $sth = $self->prepare($stmt);
$sth->execute($dbID);
my %vs;
while (my $result = $sth->fetchrow_arrayref()) {
# For each superset, fetch all of its supersets, unless specifically told not to
my $vs_sup = $self->fetch_by_dbID($result->[0]);
$vs{$vs_sup->dbID()} = $vs_sup;
if (!defined($only_immediate)) {
foreach my $v (@{$self->fetch_all_by_sub_VariationSet($vs_sup)}) {
$vs{$v->dbID()} = $v;
}
}
}
my @res = values(%vs);
return \@res;
}
=head2 fetch_all_by_super_VariationSet
Arg [1] : Bio::EnsEMBL::Variation::VariationSet $super
Arg [2] : (optional) boolean $only_immediate
If true, only the direct subsets of this variation set will be fetched. The default behaviour is
to recursively fetch all subsets.
Example : @vs_subsets = @{$vs_adaptor->fetch_all_by_super_VariationSet($vs)};
Description: Retrieves all VariationSets that are subsets of the specified VariationSet.
Returntype : listref of Bio::EnsEMBL::Variation::VariationSet
Exceptions : throw if super arg is not valid
Caller : general
Status : At Risk
=cut
sub fetch_all_by_super_VariationSet {
my $self = shift;
my $set = shift;
my $only_immediate = shift;
#ÊCheck the input set
assert_ref($set,'Bio::EnsEMBL::Variation::VariationSet');
# First, get all VariationSets that are direct subsets of this one
my $dbID = $set->dbID();
my $stmt = qq{
SELECT
vss.variation_set_sub
FROM
variation_set_structure vss
WHERE
vss.variation_set_super = ?
};
my $sth = $self->prepare($stmt);
$sth->execute($dbID);
my %vs;
while (my $result = $sth->fetchrow_arrayref()) {
# For each subset, fetch all of its subsets unless specifically told not to
my $vs_sub = $self->fetch_by_dbID($result->[0]);
$vs{$vs_sub->dbID()} = $vs_sub;
if (!defined($only_immediate)) {
foreach my $v (@{$self->fetch_all_by_super_VariationSet($vs_sub)}) {
$vs{$v->dbID()} = $v;
}
}
}
my @res = values(%vs);
return \@res;
}
=head2 fetch_by_name
Arg [1] : string $name
Example : $vg = $vga->fetch_by_name('Phenotype-associated variations');
Description: Retrieves a variation set by its name.
Returntype : Bio::EnsEMBL::Variation::VariationSet
Exceptions : throw if name argument is not provided
Caller : general
Status : At Risk
=cut
sub fetch_by_name {
my $self = shift;
my $name = shift;
throw('name argument expected') unless (defined($name));
# Add a constraint on the name column and bind the name to it
my $constraint = qq{ vs.name LIKE ? };
$self->bind_param_generic_fetch($name,SQL_VARCHAR);
#ÊCall the generic fetch method
my $result = wrap_array($self->generic_fetch($constraint));
# Return the result
return undef unless (scalar(@{$result}));
return $result->[0];
}
=head2 fetch_by_short_name
Arg [1] : string $name
Example : $vg = $vga->fetch_by_short_name('ph_variants');
Description: Retrieves a variation set by its short name.
Returntype : Bio::EnsEMBL::Variation::VariationSet
Exceptions : throw if short name argument is not provided
Caller : general
=cut
sub fetch_by_short_name {
my $self = shift;
my $name = shift;
throw('short name argument expected') unless (defined($name));
#ÊGet the attrib_id corresponding to the 'short_name' type and specified name
my $aa = $self->db->get_AttributeAdaptor();
my $attrib_id = $aa->attrib_id_for_type_value($self->_short_name_attrib_type_code(),$name);
return undef unless (defined($attrib_id));
# Add a constraint on the short_name_attrib_id column and bind the name to it
my $constraint = qq{ vs.short_name_attrib_id = ? };
$self->bind_param_generic_fetch($attrib_id,SQL_INTEGER);
#ÊCall the generic fetch method
my $result = wrap_array($self->generic_fetch($constraint));
# Return the result
return undef unless (scalar(@{$result}));
return $result->[0];
}
=head2 fetch_all_by_Variation
Arg [1] : Bio::EnsEMBL::Variation::Variation
Example : my $vgs = $vga->fetch_all_by_Variation($var);
Description: Retrieves all variation sets which a particular variation
is present in.
Returntype : reference to list of Bio::EnsEMBL::Variation::VariationSets
Exceptions : throw on incorrect argument
Caller : general
Status : At Risk
=cut
sub fetch_all_by_Variation {
my $self = shift;
my $var = shift;
assert_ref($var,'Bio::EnsEMBL::Variation::Variation');
my $cols = join(',',$self->_columns());
my $stmt = qq{
SELECT
$cols
FROM
variation_set vs,
variation_set_variation vsv
WHERE
vs.variation_set_id = vsv.variation_set_id AND
vsv.variation_id = ?
};
my $sth = $self->prepare($stmt);
$sth->bind_param(1,$var->dbID,SQL_INTEGER);
$sth->execute();
my $result = $self->_objs_from_sth($sth);
$sth->finish();
# Fetch all supersets of the returned sets as well. Since a variation may occur at several places in a hierarchy
# which will cause duplicated data, store variation sets in a hash with dbID as key.
my %sets;
foreach my $set (@{$result}) {
$sets{$set->dbID()} = $set;
foreach my $sup (@{$self->fetch_all_by_sub_VariationSet($set)}) {
$sets{$sup->dbID()} = $sup;
}
}
my @res = values %sets;
return \@res;
}
# An API-internal subroutine for getting the bitvalue of the specified variation_set and (unless specifically indicated) its subsets
sub _get_bitvalue {
my $self = shift;
my $set = shift;
my $no_subsets = shift;
#ÊCheck the input set
assert_ref($set,'Bio::EnsEMBL::Variation::VariationSet');
#ÊStore the dbIDs of the set and its subsets in an array
my @dbIDs = ($set->dbID());
unless ($no_subsets) {
map {push(@dbIDs,$_->dbID())} @{$set->adaptor->fetch_all_by_super_VariationSet($set)};
}
#ÊDo a quick check that none of the dbIDs are too large for being stored in the set construct. In that case, warn about this.
my @non_compatible = grep {$_ > $MAX_VARIATION_SET_ID} @dbIDs;
if (scalar(@non_compatible)) {
warn ("Variation set(s) with dbID " . join(", ",@non_compatible) . " cannot be stored in the variation_set_id SET construct. Entries for these sets won't be returned");
}
#ÊAdd the bitvalues of the dbIDs in the set together to get the bitvalue, use only the ones that fit within the $MAX_VARIATION_SET_ID limit
my $bitvalue = 0;
map {$bitvalue += (2 ** ($_ - 1))} grep {$_ <= $MAX_VARIATION_SET_ID} @dbIDs;
return $bitvalue;
}
# API-internal method for getting the attrib_type code used for short names
sub _short_name_attrib_type_code {
return q{short_name};
}
sub _columns {
return qw( vs.variation_set_id vs.name vs.description vs.short_name_attrib_id );
}
sub _tables {
return ( ['variation_set','vs'] );
}
sub _default_where_clause {
return '1';
}
sub _objs_from_sth {
my $self = shift;
my $sth = shift;
my ($vs_id, $name, $description, $short_name_attrib_id);
$sth->bind_columns(\$vs_id, \$name, \$description, \$short_name_attrib_id);
my @results;
my ($cur_vs, $cur_vs_id);
my $aa = $self->db->get_AttributeAdaptor();
# Construct all variation sets
while($sth->fetch()) {
if (!defined($cur_vs) || $vs_id != $cur_vs_id) {
$cur_vs = Bio::EnsEMBL::Variation::VariationSet->new
(
-dbID => $vs_id,
-adaptor => $self,
-name => $name,
-description => $description,
-short_name => $aa->attrib_value_for_id($short_name_attrib_id)
);
$cur_vs_id = $vs_id;
push(@results,$cur_vs);
}
}
return \@results;
}
1;
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-variation/modules/Bio/EnsEMBL/Variation/DBSQL/VariationSetAdaptor.pm | Perl | apache-2.0 | 11,488 |
#!/usr/bin/perl
=head1 LICENSE
Copyright (c) 1999-2011 The European Bioinformatics Institute and
Genome Research Limited. All rights reserved.
This software is distributed under a modified Apache license.
For license details, please see
http://www.ensembl.org/info/about/code_licence.html
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <ensembl-dev@ebi.ac.uk>.
Questions may also be sent to the Ensembl help desk at
<helpdesk@ensembl.org>.
=cut
=head1 NAME
Variant Effect Predictor - a script to predict the consequences of genomic variants
http://www.ensembl.org/info/docs/variation/vep/vep_script.html
Version 2.1
by Will McLaren (wm2@ebi.ac.uk)
=cut
use strict;
use Getopt::Long;
use FileHandle;
use Bio::EnsEMBL::Registry;
use Bio::EnsEMBL::Variation::DBSQL::VariationFeatureAdaptor;
use Bio::EnsEMBL::Variation::Utils::VariationEffect qw(MAX_DISTANCE_FROM_TRANSCRIPT);
use Bio::EnsEMBL::Utils::Sequence qw(reverse_comp);
use Bio::EnsEMBL::Variation::Utils::Sequence qw(unambiguity_code);
use Storable qw(nstore_fd fd_retrieve);
# we need to manually include all these modules for caching to work
use Bio::EnsEMBL::CoordSystem;
use Bio::EnsEMBL::Transcript;
use Bio::EnsEMBL::Translation;
use Bio::EnsEMBL::Exon;
use Bio::EnsEMBL::DBSQL::GeneAdaptor;
use Bio::EnsEMBL::DBSQL::SliceAdaptor;
use Bio::EnsEMBL::DBSQL::TranslationAdaptor;
use Bio::EnsEMBL::DBSQL::TranscriptAdaptor;
use Bio::EnsEMBL::DBSQL::MetaContainer;
use Bio::EnsEMBL::DBSQL::CoordSystemAdaptor;
# debug
#use Time::HiRes qw(tv_interval gettimeofday);
# output columns
my @OUTPUT_COLS = qw(
Uploaded_variation
Location
Allele
Gene
Feature
Feature_type
Consequence
cDNA_position
CDS_position
Protein_position
Amino_acids
Codons
Existing_variation
Extra
);
# global vars
my $VERSION = '2.1';
# set output autoflush for progress bars
$| = 1;
# configure from command line opts
my $config = &configure(scalar @ARGV);
# run the main sub routine
&main($config);
# this is the main sub-routine - it needs the configured $config hash
sub main {
my $config = shift;
debug("Starting...") unless defined $config->{quiet};
my ($include_regions, $transcript_cache);
# scan file if requested
$include_regions = &scan_file($config) if defined($config->{scan});
# build transcript cache upfront if requested
$transcript_cache = &cache_transcripts($config, $include_regions) if defined($config->{upfront});
# create a hash to hold slices so we don't get the same one twice
my %slice_cache = ();
# load slices from the transcript cache if we have it
# saves us fetching them again
%slice_cache = %{&build_slice_cache($config, $transcript_cache)} if defined($transcript_cache);
my @new_vfs;
my %vf_hash;
my $line_number = 0;
my ($vf_count, $total_vf_count);
my $in_file_handle = $config->{in_file_handle};
# read the file
while(<$in_file_handle>) {
chomp;
$line_number++;
# header line?
next if /^\#/;
# some lines (pileup) may actually parse out into more than one variant
foreach my $sub_line(@{&parse_line($config, $_)}) {
# get the sub-line into named variables
my ($chr, $start, $end, $allele_string, $strand, $var_name) = @{$sub_line};
next if defined($config->{chr}) && !$config->{chr}->{$chr};
# non-variant line from VCF
next if $chr eq 'non-variant';
# fix inputs
$chr =~ s/chr//ig unless $chr =~ /^chromosome$/i;
$chr = 'MT' if $chr eq 'M';
$strand = ($strand =~ /\-/ ? "-1" : "1");
$allele_string =~ tr/acgt/ACGT/;
# sanity checks
unless($start =~ /^\d+$/ && $end =~ /^\d+$/) {
warn("WARNING: Start $start or end $end coordinate invalid on line $line_number\n") unless defined $config->{quiet};
next;
}
unless($allele_string =~ /([ACGT-]+\/*)+/) {
warn("WARNING: Invalid allele string $allele_string on line $line_number\n") unless defined $config->{quiet};
next;
}
# now get the slice
my $slice;
# don't get slices if we're using cache
# we can steal them from transcript objects later
if((!defined($config->{cache}) && !defined($config->{whole_genome})) || defined($config->{check_ref})) {
# check if we have fetched this slice already
if(defined $slice_cache{$chr}) {
$slice = $slice_cache{$chr};
}
# if not create a new one
else {
$slice = &get_slice($config, $chr);
# if failed, warn and skip this line
if(!defined($slice)) {
warn("WARNING: Could not fetch slice named $chr on line $line_number\n") unless defined $config->{quiet};
next;
}
# store the hash
$slice_cache{$chr} = $slice;
}
}
# check reference allele if requested
if(defined $config->{check_ref}) {
my $ref_allele = (split /\//, $allele_string)[0];
my $ok = 0;
my $slice_ref_allele;
# insertion, therefore no ref allele to check
if($ref_allele eq '-') {
$ok = 1;
}
else {
my $slice_ref = $slice->sub_Slice($start, $end, $strand);
if(!defined($slice_ref)) {
warn "WARNING: Could not fetch sub-slice from $start\-$end\($strand\) on line $line_number" unless defined $config->{quiet};
}
else {
$slice_ref_allele = $slice_ref->seq;
$ok = ($slice_ref_allele eq $ref_allele ? 1 : 0);
}
}
if(!$ok) {
warn
"WARNING: Specified reference allele $ref_allele ",
"does not match Ensembl reference allele",
($slice_ref_allele ? " $slice_ref_allele" : ""),
" on line $line_number" unless defined $config->{quiet};
next;
}
}
# create a new VariationFeature object
my $new_vf = Bio::EnsEMBL::Variation::VariationFeature->new(
-start => $start,
-end => $end,
-slice => $slice, # the variation must be attached to a slice
-allele_string => $allele_string,
-strand => $strand,
-map_weight => 1,
-adaptor => $config->{vfa}, # we must attach a variation feature adaptor
-variation_name => (defined $var_name ? $var_name : $chr.'_'.$start.'_'.$allele_string),
);
if(defined $config->{whole_genome}) {
push @{$vf_hash{$chr}{int($start / $config->{chunk_size})}{$start}}, $new_vf;
$vf_count++;
$total_vf_count++;
if($vf_count == $config->{buffer_size}) {
debug("Read $vf_count variants into buffer") unless defined($config->{quiet});
$include_regions ||= ®ions_from_hash($config, \%vf_hash);
&check_existing_hash($config, \%vf_hash) if defined($config->{check_existing});
&whole_genome_fetch($config, \%vf_hash, $transcript_cache, $include_regions);
debug("Processed $total_vf_count total variants") unless defined($config->{quiet});
undef $include_regions unless defined($config->{scan});
%vf_hash = ();
$vf_count = 0;
}
}
else {
&print_consequences($config, [$new_vf]);
$vf_count++;
$total_vf_count++;
debug("Processed $vf_count variants") if $vf_count =~ /0$/ && defined($config->{verbose});
}
}
}
# if in whole-genome mode, finish off the rest of the buffer
if(defined $config->{whole_genome} && %vf_hash) {
debug("Read $vf_count variants into buffer") unless defined($config->{quiet});
$include_regions ||= ®ions_from_hash($config, \%vf_hash);
&check_existing_hash($config, \%vf_hash) if defined($config->{check_existing});
&whole_genome_fetch($config, \%vf_hash, $transcript_cache, $include_regions);
}
debug("Executed ", defined($Bio::EnsEMBL::DBSQL::StatementHandle::count_queries) ? $Bio::EnsEMBL::DBSQL::StatementHandle::count_queries : 'unknown number of', " SQL statements") if defined($config->{count_queries}) && !defined($config->{quiet});
debug("Finished!") unless defined $config->{quiet};
}
# takes a listref of variation features and prints out consequence information
sub print_consequences {
my $config = shift;
my $vfs = shift;
my $out_file_handle = $config->{out_file_handle};
# method name for consequence terms
my $term_method = $config->{terms}.'_term';
my ($vf_count, $vf_counter);
$vf_count = scalar @$vfs;
foreach my $new_vf(@$vfs) {
&progress($config, $vf_counter++, $vf_count) unless $vf_count == 1;
# find any co-located existing VFs
my $existing_vf = $new_vf->{existing};
$existing_vf ||= &find_existing($config, $new_vf) if defined $config->{check_existing};
# initiate line hash for this variation
my $line = {
Uploaded_variation => $new_vf->variation_name,
Location => $new_vf->seq_region_name.':'.&format_coords($new_vf->start, $new_vf->end),
Existing_variation => $existing_vf,
Extra => {},
};
# force empty hash into object's transcript_variations if undefined from whole_genome_fetch
# this will stop the API trying to go off and fill it again
$new_vf->{transcript_variations} ||= {} if defined $config->{whole_genome};
# regulatory stuff
if(!defined $config->{coding_only} && defined $config->{regulatory}) {
for my $rfv (@{ $new_vf->get_all_RegulatoryFeatureVariations }) {
my $rf = $rfv->regulatory_feature;
$line->{Feature_type} = 'RegulatoryFeature';
$line->{Feature} = $rf->stable_id;
# this currently always returns 'RegulatoryFeature', so we ignore it for now
#$line->{Extra}->{REG_FEAT_TYPE} = $rf->feature_type->name;
for my $rfva (@{ $rfv->get_all_alternate_RegulatoryFeatureVariationAlleles }) {
$line->{Allele} = $rfva->variation_feature_seq;
$line->{Consequence} = join ',',
map { $_->$term_method || $_->display_term }
@{ $rfva->get_all_OverlapConsequences };
print_line($line);
}
}
for my $mfv (@{ $new_vf->get_all_MotifFeatureVariations }) {
my $mf = $mfv->motif_feature;
$line->{Feature_type} = 'MotifFeature';
$line->{Feature} = $mf->binding_matrix->name;
for my $mfva (@{ $mfv->get_all_alternate_MotifFeatureVariationAlleles }) {
$line->{Extra}->{MATRIX} = $mf->binding_matrix->description.'_'.$mf->display_label,
$line->{Extra}->{MATRIX} =~ s/\s+/\_/g;
my $high_inf_pos = $mfva->in_informative_position;
if (defined $high_inf_pos) {
$line->{Extra}->{HIGH_INF_POS} = ($high_inf_pos ? 'Y' : 'N');
}
$line->{Allele} = $mfva->variation_feature_seq;
$line->{Consequence} = join ',',
map { $_->$term_method || $_->display_term }
@{ $mfva->get_all_OverlapConsequences };
print_line($line);
}
}
}
# get TVs
my $tvs = $new_vf->get_all_TranscriptVariations;
# no TVs (intergenic) or only most severe
if(!@$tvs || defined($config->{most_severe}) || defined($config->{summary})) {
if(defined($config->{summary})) {
$line->{Consequence} = join ",", @{$new_vf->consequence_type($config->{terms}) || $new_vf->consequence_type};
}
else {
$line->{Consequence} = $new_vf->display_consequence($config->{terms}) || $new_vf->display_consequence;
}
&print_line($line);
}
else {
foreach my $tv(@$tvs) {
next if(defined $config->{coding_only} && !($tv->affects_transcript));
my $t = $tv->transcript;
$line->{Feature_type} = 'Transcript';
$line->{Feature} = $t->stable_id if defined $t;
$line->{cDNA_position} = &format_coords($tv->cdna_start, $tv->cdna_end);
$line->{CDS_position} = &format_coords($tv->cds_start, $tv->cds_end);
$line->{Protein_position} = &format_coords($tv->translation_start, $tv->translation_end);
# get gene
my $gene;
if(defined($config->{gene})) {
$line->{Gene} = $tv->transcript->{_gene_stable_id};
if(!defined($line->{Gene})) {
$gene = $config->{ga}->fetch_by_transcript_stable_id($t->stable_id);
$line->{Gene}= $gene->stable_id;
}
}
foreach my $tva(@{$tv->get_all_alternate_TranscriptVariationAlleles}) {
# basic stuff
$line->{Allele} = $tva->variation_feature_seq;
$line->{Amino_acids} = $tva->pep_allele_string;
$line->{Codons} = $tva->display_codon_allele_string;
$line->{Consequence} = join ",", map {$_->$term_method || $_->display_term} @{$tva->get_all_OverlapConsequences};
# HGNC
if(defined $config->{hgnc}) {
my $hgnc;
$hgnc = $tv->transcript->{_gene_hgnc};
if(!defined($hgnc)) {
if(!defined($gene)) {
$gene = $config->{ga}->fetch_by_transcript_stable_id($tv->transcript->stable_id);
}
my @entries = grep {$_->database eq 'HGNC'} @{$gene->get_all_DBEntries()};
if(scalar @entries) {
$hgnc = $entries[0]->display_id;
}
}
$hgnc = undef if $hgnc eq '-';
$line->{Extra}->{HGNC} = $hgnc if defined($hgnc);
}
# protein ID
if(defined $config->{protein} && $t->translation) {
$line->{Extra}->{ENSP} = $t->translation->stable_id;
}
# HGVS
if(defined $config->{hgvs}) {
$line->{Extra}->{HGVSc} = $tva->hgvs_coding if defined($tva->hgvs_coding);
$line->{Extra}->{HGVSp} = $tva->hgvs_protein if defined($tva->hgvs_protein);
}
foreach my $tool (qw(SIFT PolyPhen Condel)) {
my $lc_tool = lc($tool);
if (my $opt = $config->{$lc_tool}) {
my $want_pred = $opt =~ /^p/i;
my $want_score = $opt =~ /^s/i;
my $want_both = $opt =~ /^b/i;
if ($want_both) {
$want_pred = 1;
$want_score = 1;
}
next unless $want_pred || $want_score;
my $pred_meth = $lc_tool.'_prediction';
my $score_meth = $lc_tool.'_score';
my $pred = $tva->$pred_meth;
if($pred) {
if ($want_pred) {
$pred =~ s/\s+/\_/;
$line->{Extra}->{$tool} = $pred;
}
if ($want_score) {
my $score = $tva->$score_meth;
if(defined $score) {
if($want_pred) {
$line->{Extra}->{$tool} .= "($score)";
}
else {
$line->{Extra}->{$tool} = $score;
}
}
}
}
}
}
&print_line($line);
}
}
}
}
&end_progress($config) unless $vf_count == 1;
}
# prints a line from the hash
sub print_line {
my $line = shift;
$line->{Extra} = join ';', map { $_.'='.$line->{Extra}->{$_} } keys %{ $line->{Extra} || {} };
my $output = join "\t", map { $line->{$_} || '-' } @OUTPUT_COLS;
my $fh = $config->{out_file_handle};
print $fh "$output\n";
# clear out the Extra column for the next line
$line->{Extra} = {};
}
# sets up configuration hash that is used throughout the script
sub configure {
my $args = shift;
my $config = {};
GetOptions(
$config,
'help', # displays help message
# input options,
'config=s', # config file name
'input_file=s', # input file name
'format=s', # input file format
# DB options
'species=s', # species e.g. human, homo_sapiens
'registry=s', # registry file
'host=s', # database host
'port=s', # database port
'user=s', # database user name
'password=s', # database password
'db_version=i', # Ensembl database version to use e.g. 62
'genomes', # automatically sets DB params for e!Genomes
#'no_disconnect', # disables disconnect_when_inactive
# runtime options
'most_severe', # only return most severe consequence
'summary', # only return one line per variation with all consquence types
'buffer_size=i', # number of variations to read in before analysis
'chunk_size=s', # size in bases of "chunks" used in internal hash structure
'check_ref', # check supplied reference allele against DB
'check_existing', # find existing co-located variations
'check_alleles', # only attribute co-located if alleles are the same
'failed=i', # include failed variations when finding existing
'no_whole_genome', # disables now default whole-genome mode
'whole_genome', # proxy for whole genome mode - now just warns user
'gp', # read coords from GP part of INFO column in VCF (probably only relevant to 1KG)
'chr=s', # analyse only these chromosomes, e.g. 1-5,10,MT
# verbosity options
'verbose', # print out a bit more info while running
'quiet', # print nothing to STDOUT (unless using -o stdout)
'no_progress', # don't display progress bars
# output options
'output_file=s', # output file name
'force_overwrite', # force overwrite of output file if already exists
'terms=s', # consequence terms to use e.g. NCBI, SO
'coding_only', # only return results for consequences in coding regions
'protein', # add e! protein ID to extra column
'hgnc', # add HGNC gene ID to extra column
'hgvs', # add HGVS names to extra column
'sift=s', # SIFT predictions
'polyphen=s', # PolyPhen predictions
'condel=s', # Condel predictions
'gene', # force gene column to be populated (disabled by default, enabled when using cache)
'regulatory', # enable regulatory stuff
# cache stuff
'cache', # use cache
'write_cache', # enables writing to the cache
'build=s', # builds cache from DB from scratch; arg is either all (all top-level seqs) or a list of chrs
'scan', # scan the whole input file at the beginning to get regions
'upfront', # fetch transcripts and prefetch upfront before analysis starts (requires scan)
'prefetch', # prefetch exons, translation, introns, codon table etc for each transcript
'strip', # strips adaptors etc from objects before caching them
'rebuild=s', # rebuilds cache by reading in existing then redumping - probably don't need to use this any more
'dir=s', # dir where cache is found (defaults to $HOME/.vep/)
'cache_region_size=i', # size of region in bases for each cache file
'no_slice_cache', # tell API not to cache features on slice
'standalone', # standalone mode uses minimal set of modules installed in same dir, no DB connection
'skip_db_check', # don't compare DB parameters with cached
'compress=s', # by default we use zcat to decompress; user may want to specify gzcat or "gzip -dc"
# debug
'cluck', # these two need some mods to Bio::EnsEMBL::DBSQL::StatementHandle to work. Clucks callback trace and SQL
'count_queries', # counts SQL queries executed
);
# print usage message if requested or no args supplied
if(defined($config->{help}) || !$args) {
&usage;
exit(0);
}
# config file?
if(defined $config->{config}) {
open CONFIG, $config->{config} or die "ERROR: Could not open config file \"".$config->{config}."\"\n";
while(<CONFIG>) {
next if /^\#/;
my ($key, $value) = split /\s+|\=/;
$key =~ s/^\-//g;
$config->{$key} = $value unless defined $config->{$key};
}
close CONFIG;
}
# can't be both quiet and verbose
die "ERROR: Can't be both quiet and verbose!" if defined($config->{quiet}) && defined($config->{verbose});
# check file format
if(defined $config->{format}) {
die "ERROR: Unrecognised input format specified \"".$config->{format}."\"\n" unless $config->{format} =~ /pileup|vcf|guess/i;
}
# connection settings for Ensembl Genomes
if($config->{genomes}) {
$config->{host} ||= 'mysql.ebi.ac.uk';
$config->{port} ||= 4157;
}
# connection settings for main Ensembl
else {
$config->{species} ||= "homo_sapiens";
$config->{host} ||= 'ensembldb.ensembl.org';
$config->{port} ||= 5306;
}
# output term
if(defined $config->{terms}) {
die "ERROR: Unrecognised consequence term type specified \"".$config->{terms}."\" - must be one of ensembl, so, ncbi\n" unless $config->{terms} =~ /ensembl|display|so|ncbi/i;
if($config->{terms} =~ /ensembl|display/i) {
$config->{terms} = 'display';
}
else {
$config->{terms} = uc($config->{terms});
}
}
# check nsSNP tools
foreach my $tool(grep {defined $config->{lc($_)}} qw(SIFT PolyPhen Condel)) {
die "ERROR: Unrecognised option for $tool \"", $config->{lc($tool)}, "\" - must be one of p (prediction), s (score) or b (both)\n" unless $config->{lc($tool)} =~ /^(s|p|b)/;
die "ERROR: $tool not available for this species\n" unless $config->{species} =~ /human|homo/i;
die "ERROR: $tool not available in standalone mode\n" if defined($config->{standalone});
}
# force quiet if outputting to STDOUT
if(defined($config->{output_file}) && $config->{output_file} =~ /stdout/i) {
delete $config->{verbose} if defined($config->{verbose});
$config->{quiet} = 1;
}
# summarise options if verbose
if(defined $config->{verbose}) {
my $header =<<INTRO;
#----------------------------------#
# ENSEMBL VARIANT EFFECT PREDICTOR #
#----------------------------------#
version $VERSION
By Will McLaren (wm2\@ebi.ac.uk)
Configuration options:
INTRO
print $header;
my $max_length = (sort {$a <=> $b} map {length($_)} keys %$config)[-1];
foreach my $key(sort keys %$config) {
print $key.(' ' x (($max_length - length($key)) + 4)).$config->{$key}."\n";
}
print "\n".("-" x 20)."\n\n";
}
# set defaults
$config->{user} ||= 'anonymous';
$config->{buffer_size} ||= 5000;
$config->{chunk_size} ||= '50kb';
$config->{output_file} ||= "variant_effect_output.txt";
$config->{tmpdir} ||= '/tmp';
$config->{format} ||= 'guess';
$config->{terms} ||= 'display';
$config->{gene} ||= 1 unless defined($config->{whole_genome});
$config->{cache_region_size} ||= 1000000;
$config->{dir} ||= join '/', ($ENV{'HOME'}, '.vep');
$config->{compress} ||= 'zcat';
# warn users still using whole_genome flag
if(defined($config->{whole_genome})) {
debug("INFO: Whole-genome mode is now the default run-mode for the script. To disable it, use --no_whole_genome") unless defined($config->{quiet});
}
$config->{whole_genome} = 1 unless defined $config->{no_whole_genome};
$config->{include_failed} = 1 unless defined $config->{include_failed};
$config->{chunk_size} =~ s/mb?/000000/i;
$config->{chunk_size} =~ s/kb?/000/i;
$config->{cache_region_size} =~ s/mb?/000000/i;
$config->{cache_region_size} =~ s/kb?/000/i;
# cluck and display executed SQL?
$Bio::EnsEMBL::DBSQL::StatementHandle::cluck = 1 if defined($config->{cluck});
# standalone needs cache, can't use HGVS
if(defined($config->{standalone})) {
$config->{cache} = 1;
die("ERROR: Cannot generate HGVS coordinates in standalone mode") if defined($config->{hgvs});
die("ERROR: Cannot analyse regulatory features in standalone mode") if defined($config->{regulatory});
}
# no_slice_cache, prefetch and whole_genome have to be on to use cache or upfront
if(defined($config->{cache}) || defined($config->{upfront})) {
$config->{prefetch} = 1;
$config->{no_slice_cache} = 1;
$config->{whole_genome} = 1;
$config->{strip} = 1;
# scan should also be on for upfront
$config->{scan} = 1 if defined($config->{upfront});
}
$config->{build} = $config->{rebuild} if defined($config->{rebuild});
# force options for full build
if(defined($config->{build})) {
$config->{prefetch} = 1;
$config->{gene} = 1;
$config->{hgnc} = 1;
$config->{no_slice_cache} = 1;
$config->{cache} = 1;
$config->{strip} = 1;
$config->{write_cache} = 1;
}
# connect to databases
$config->{reg} = &connect_to_dbs($config);
# complete dir with species name and db_version
$config->{dir} .= '/'.(
join '/', (
defined($config->{standalone}) ? $config->{species} : ($config->{reg}->get_alias($config->{species}) || $config->{species}),
$config->{db_version} || $config->{reg}->software_version
)
);
# warn user cache directory doesn't exist
if(!-e $config->{dir}) {
# if using write_cache
if(defined($config->{write_cache})) {
debug("INFO: Cache directory ", $config->{dir}, " not found - it will be created") unless defined($config->{quiet});
}
# want to read cache, not found
elsif(defined($config->{cache})) {
die("ERROR: Cache directory ", $config->{dir}, " not found");
}
}
# suppress warnings that the FeatureAdpators spit if using no_slice_cache
Bio::EnsEMBL::Utils::Exception::verbose(1999) if defined($config->{no_slice_cache});
# get adaptors
if(defined($config->{cache})) {
# try and load adaptors from cache
if(!&load_dumped_adaptor_cache($config)) {
&get_adaptors($config);
&dump_adaptor_cache($config) if defined($config->{write_cache});
}
# check cached adaptors match DB params
else {
my $dbc = $config->{sa}->{dbc};
my $ok = 1;
if($dbc->{_host} ne $config->{host}) {
# ens-livemirror, useastdb and ensembldb should all have identical DBs
unless(
(
$dbc->{_host} eq 'ens-livemirror'
|| $dbc->{_host} eq 'ensembldb.ensembl.org'
|| $dbc->{_host} eq 'useastdb.ensembl.org'
) && (
$config->{host} eq 'ens-livemirror'
|| $config->{host} eq 'ensembldb.ensembl.org'
|| $config->{host} eq 'useastdb.ensembl.org'
)
) {
$ok = 0;
}
# but we still need to reconnect
debug("INFO: Defined host ", $config->{host}, " is different from cached ", $dbc->{_host}, " - reconnecting to host") unless defined($config->{quiet});
&get_adaptors($config);
}
if(!$ok) {
if(defined($config->{skip_db_check})) {
debug("INFO: Defined host ", $config->{host}, " is different from cached ", $dbc->{_host}) unless defined($config->{quiet});
}
else {
die "ERROR: Defined host ", $config->{host}, " is different from cached ", $dbc->{_host}, ". If you are sure this is OK, rerun with -skip_db_check flag set";
}
}
}
}
else {
&get_adaptors($config);
&dump_adaptor_cache($config) if defined($config->{write_cache})
}
# get terminal width for progress bars
unless(defined($config->{quiet})) {
my $width;
# module may not be installed
eval {
use Term::ReadKey;
};
if(!$@) {
my ($w, $h);
# module may be installed, but e.g.
eval {
($w, $h) = GetTerminalSize();
};
$width = $w if defined $w;
}
$width ||= 60;
$width -= 12;
$config->{terminal_width} = $width;
}
# jump out to build cache if requested
if(defined($config->{build})) {
# build the cache
debug("Building cache for ".$config->{species}) unless defined($config->{quiet});
&build_full_cache($config, $config->{rebuild});
# exit script
debug("Finished building cache") unless defined($config->{quiet});
exit(0);
}
# warn user DB will be used for SIFT/PolyPhen/Condel
if(defined($config->{cache}) && (defined($config->{sift}) || defined($config->{polyphen}) || defined($config->{condel}) || defined($config->{hgvs}) || defined($config->{regulatory}))) {
debug("INFO: Database will be accessed for SIFT/PolyPhen/Condel, HGVS and regulatory features") unless defined($config->{quiet});
}
# get list of chrs if supplied
if(defined($config->{chr})) {
my %chrs;
foreach my $val(split /\,/, $config->{chr}) {
my @nnn = split /\-/, $val;
foreach my $chr($nnn[0]..$nnn[-1]) {
$chrs{$chr} = 1;
}
}
$config->{chr} = \%chrs;
}
# get input file handle
$config->{in_file_handle} = &get_in_file_handle($config);
# configure output file
$config->{out_file_handle} = &get_out_file_handle($config);
return $config;
}
# connects to DBs; in standalone mode this just loads registry module
sub connect_to_dbs {
my $config = shift;
# get registry
my $reg = 'Bio::EnsEMBL::Registry';
unless(defined($config->{standalone})) {
# load DB options from registry file if given
if(defined($config->{registry})) {
debug("Loading DB config from registry file ", $config->{registry}) unless defined($config->{quiet});
$reg->load_all(
$config->{registry},
$config->{verbose},
undef,
$config->{no_slice_cache}
);
}
# otherwise manually connect to DB server
else {
$reg->load_registry_from_db(
-host => $config->{host},
-user => $config->{user},
-pass => $config->{password},
-port => $config->{port},
-db_version => $config->{db_version},
-species => $config->{species} =~ /^[a-z]+\_[a-z]+/i ? $config->{species} : undef,
-verbose => $config->{verbose},
-no_cache => $config->{no_slice_cache},
);
}
eval { $reg->set_reconnect_when_lost() };
if(defined($config->{verbose})) {
# get a meta container adaptors to check version
my $core_mca = $reg->get_adaptor($config->{species}, 'core', 'metacontainer');
my $var_mca = $reg->get_adaptor($config->{species}, 'variation', 'metacontainer');
if($core_mca && $var_mca) {
debug(
"Connected to core version ", $core_mca->get_schema_version, " database ",
"and variation version ", $var_mca->get_schema_version, " database"
);
}
}
}
return $reg;
}
# get adaptors from DB
sub get_adaptors {
my $config = shift;
die "ERROR: No registry" unless defined $config->{reg};
$config->{vfa} = $config->{reg}->get_adaptor($config->{species}, 'variation', 'variationfeature');
$config->{tva} = $config->{reg}->get_adaptor($config->{species}, 'variation', 'transcriptvariation');
# get fake ones for species with no var DB
if(!defined($config->{vfa})) {
$config->{vfa} = Bio::EnsEMBL::Variation::DBSQL::VariationFeatureAdaptor->new_fake($config->{species});
}
else {
$config->{vfa}->db->include_failed_variations($config->{include_failed}) if defined($config->{vfa}->db) && $config->{vfa}->db->can('include_failed_variations');
}
$config->{sa} = $config->{reg}->get_adaptor($config->{species}, 'core', 'slice');
$config->{ga} = $config->{reg}->get_adaptor($config->{species}, 'core', 'gene');
$config->{ta} = $config->{reg}->get_adaptor($config->{species}, 'core', 'transcript');
$config->{mca} = $config->{reg}->get_adaptor($config->{species}, 'core', 'metacontainer');
$config->{csa} = $config->{reg}->get_adaptor($config->{species}, 'core', 'coordsystem');
# cache schema version
$config->{mca}->get_schema_version if defined $config->{mca};
# check we got slice adaptor - can't continue without a core DB
die("ERROR: Could not connect to core database\n") unless defined $config->{sa};
}
# gets file handle for input
sub get_in_file_handle {
my $config = shift;
# define the filehandle to read input from
my $in_file_handle = new FileHandle;
if(defined($config->{input_file})) {
# check defined input file exists
die("ERROR: Could not find input file ", $config->{input_file}, "\n") unless -e $config->{input_file};
if($config->{input_file} =~ /\.gz$/){
$in_file_handle->open($config->{compress}." ". $config->{input_file} . " | " ) or die("ERROR: Could not read from input file ", $config->{input_file}, "\n");
}
else {
$in_file_handle->open( $config->{input_file} ) or die("ERROR: Could not read from input file ", $config->{in_file}, "\n");
}
}
# no file specified - try to read data off command line
else {
$in_file_handle = 'STDIN';
debug("Reading input from STDIN (or maybe you forgot to specify an input file?)...") unless defined $config->{quiet};
}
return $in_file_handle;
}
# gets file handle for output and adds header
sub get_out_file_handle {
my $config = shift;
# define filehandle to write to
my $out_file_handle = new FileHandle;
# check if file exists
if(-e $config->{output_file} && !defined($config->{force_overwrite})) {
die("ERROR: Output file ", $config->{output_file}, " already exists. Specify a different output file with --output_file or overwrite existing file with --force_overwrite\n");
}
if($config->{output_file} =~ /stdout/i) {
$out_file_handle = *STDOUT;
}
else {
$out_file_handle->open(">".$config->{output_file}) or die("ERROR: Could not write to output file ", $config->{output_file}, "\n");
}
# make header
my $time = &get_time;
my $db_string = $config->{mca}->dbc->dbname." on ".$config->{mca}->dbc->host if defined $config->{mca};
$db_string .= "\n## Using cache in ".$config->{dir} if defined($config->{cache});
my $version_string =
"Using API version ".$config->{reg}->software_version.
", DB version ".(defined $config->{mca} && $config->{mca}->get_schema_version ? $config->{mca}->get_schema_version : '?');
my $header =<<HEAD;
## ENSEMBL VARIANT EFFECT PREDICTOR v$VERSION
## Output produced at $time
## Connected to $db_string
## $version_string
## Extra column keys:
## HGNC : HGNC gene identifier
## ENSP : Ensembl protein identifer
## HGVSc : HGVS coding sequence name
## HGVSp : HGVS protein sequence name
## SIFT : SIFT prediction
## PolyPhen : PolyPhen prediction
## Condel : Condel SIFT/PolyPhen consensus prediction
## MATRIX : The source and identifier of a transcription factor binding profile aligned at this position
## HIGH_INF_POS : A flag indicating if the variant falls in a high information position of a transcription factor binding profile
HEAD
# add headers
print $out_file_handle $header;
# add column headers
print $out_file_handle '#', (join "\t", @OUTPUT_COLS);
print $out_file_handle "\n";
return $out_file_handle;
}
# parses a line of input
sub parse_line {
my $config = shift;
my $line = shift;
my @data = (split /\s+/, $_);
# pileup: chr1 60 T A
if(
($config->{format} =~ /pileup/i) ||
(
$data[0] =~ /(chr)?\w+/ &&
$data[1] =~ /\d+/ &&
$data[2] =~ /^[ACGTN-]+$/ &&
$data[3] =~ /^[ACGTNRYSWKM*+\/-]+$/
)
) {
my @return = ();
if($data[2] ne "*"){
my $var;
if($data[3] =~ /^[A|C|G|T]$/) {
$var = $data[3];
}
else {
($var = unambiguity_code($data[3])) =~ s/$data[2]//ig;
}
if(length($var)==1){
push @return, [$data[0], $data[1], $data[1], $data[2]."/".$var, 1, undef];
}
else{
for my $nt(split //,$var){
push @return, [$data[0], $data[1], $data[1], $data[2]."/".$nt, 1, undef];
}
}
}
else{ #indel
my @genotype=split /\//,$data[3];
foreach my $allele(@genotype){
if(substr($allele,0,1) eq "+") { #ins
push @return, [$data[0], $data[1]+1, $data[1], "-/".substr($allele,1), 1, undef];
}
elsif(substr($allele,0,1) eq "-"){ #del
push @return, [$data[0], $data[1], $data[1]+length($data[3])-4, substr($allele,1)."/-", 1, undef];
}
elsif($allele ne "*"){
warn("WARNING: invalid pileup indel genotype: $line\n") unless defined $config->{quiet};
push @return, ['non-variant'];
}
}
}
return \@return;
}
# VCF: 20 14370 rs6054257 G A 29 0 NS=58;DP=258;AF=0.786;DB;H2 GT:GQ:DP:HQ
elsif(
($config->{format} =~ /vcf/i) ||
(
$data[0] =~ /(chr)?\w+/ &&
$data[1] =~ /\d+/ &&
$data[3] =~ /^[ACGTN-]+$/ &&
$data[4] =~ /^([\.ACGTN-]+\,?)+$/
)
) {
# non-variant line in VCF, return dummy line
if($data[4] eq '.') {
return [['non-variant']];
}
# get relevant data
my ($chr, $start, $end, $ref, $alt) = ($data[0], $data[1], $data[1], $data[3], $data[4]);
if(defined $config->{gp}) {
$chr = undef;
$start = undef;
foreach my $pair(split /\;/, $data[7]) {
my ($key, $value) = split /\=/, $pair;
if($key eq 'GP') {
($chr,$start) = split /\:/, $value;
$end = $start;
}
}
unless(defined($chr) and defined($start)) {
warn "No GP flag found in INFO column" unless defined $config->{quiet};
return [['non-variant']];
}
}
# adjust end coord
$end += (length($ref) - 1);
# find out if any of the alt alleles make this an insertion or a deletion
my ($is_indel, $is_sub, $ins_count, $total_count);
foreach my $alt_allele(split /\,/, $alt) {
$is_indel = 1 if $alt_allele =~ /D|I/;
$is_indel = 1 if length($alt_allele) != length($ref);
$is_sub = 1 if length($alt_allele) == length($ref);
$ins_count++ if length($alt_allele) > length($ref);
$total_count++;
}
# multiple alt alleles?
if($alt =~ /\,/) {
if($is_indel) {
my @alts;
if($alt =~ /D|I/) {
foreach my $alt_allele(split /\,/, $alt) {
# deletion (VCF <4)
if($alt_allele =~ /D/) {
push @alts, '-';
}
elsif($alt_allele =~ /I/) {
$alt_allele =~ s/^I//g;
push @alts, $alt_allele;
}
}
}
else {
$ref = substr($ref, 1);
$ref = '-' if $ref eq '';
$start++;
foreach my $alt_allele(split /\,/, $alt) {
$alt_allele = substr($alt_allele, 1);
$alt_allele = '-' if $alt_allele eq '';
push @alts, $alt_allele;
}
}
$alt = join "/", @alts;
}
else {
# for substitutions we just need to replace ',' with '/' in $alt
$alt =~ s/\,/\//;
}
}
else {
if($is_indel) {
# deletion (VCF <4)
if($alt =~ /D/) {
my $num_deleted = $alt;
$num_deleted =~ s/\D+//g;
$end += $num_deleted - 1;
$alt = "-";
$ref .= ("N" x ($num_deleted - 1)) unless length($ref) > 1;
}
# insertion (VCF <4)
elsif($alt =~ /I/) {
$ref = '-';
$alt =~ s/^I//g;
$start++;
}
# insertion or deletion (VCF 4+)
else {
# chop off first base
$ref = substr($ref, 1);
$alt = substr($alt, 1);
$start++;
if($ref eq '') {
# make ref '-' if no ref allele left
$ref = '-';
}
# make alt '-' if no alt allele left
$alt = '-' if $alt eq '';
}
}
}
return [[$chr, $start, $end, $ref."/".$alt, 1, ($data[2] eq '.' ? undef : $data[2])]];
}
# our format
else {
# we allow commas as delimiter so re-split
@data = (split /\s+|\,/, $_);
return [\@data];
}
}
# takes a hash of VFs and fetches consequences by pre-fetching overlapping transcripts
# from database and/or cache
sub whole_genome_fetch {
my $config = shift;
my $vf_hash = shift;
my $transcript_cache = shift;
my $include_regions = shift;
my $up_down_size = MAX_DISTANCE_FROM_TRANSCRIPT;
my (%vf_done, @finished_vfs, %seen_trs);
# convert regions to cached sizes
my $converted_regions = &convert_regions($config, $include_regions) if defined($config->{cache});
foreach my $chr(sort {$a <=> $b} keys %$vf_hash) {
if(defined($config->{standalone}) && !-e $config->{dir}.'/'.$chr) {
debug("No cache found for chromsome $chr") unless defined($config->{quiet});
next;
}
my $slice_cache;
debug("Analyzing chromosome $chr") unless defined($config->{quiet});
my $use_regions = defined($config->{cache}) ? $converted_regions : $include_regions;
my ($count_from_db, $count_from_cache, $count_duplicates) = (0, 0, 0);
if(!defined($transcript_cache->{$chr})) {
# no regions defined (this probably shouldn't happen)
if(!defined($use_regions->{$chr})) {
# spoof regions covering whole chromosome
my $start = 1;
my $end = $config->{cache_region_size};
my $slice = &get_slice($config, $chr);
if(defined($slice)) {
while($start < $slice->end) {
push @{$use_regions->{$chr}}, $start.'-'.$end;
$start += $config->{cache_region_size};
$end += $config->{cache_region_size};
}
}
}
# check we have defined regions
if(defined($use_regions->{$chr})) {
my $region_count = scalar @{$use_regions->{$chr}};
my $counter;
debug("Reading transcript data from cache and/or database") unless defined($config->{quiet});
foreach my $region(sort {(split /\-/, $a)[0] <=> (split /\-/, $b)[1]} @{$use_regions->{$chr}}) {
&progress($config, $counter++, $region_count);
# skip regions beyond the end of the chr
next if defined($slice_cache->{$chr}) && (split /\-/, $region)[0] > $slice_cache->{$chr}->length;
# force quiet so other methods don't mess up the progress bar
my $quiet = $config->{quiet};
$config->{quiet} = 1;
# try and load cache from disk if using cache
my $tmp_cache;
if(defined($config->{cache})) {
$tmp_cache = &load_dumped_transcript_cache($config, $chr, $region);
$count_from_cache += scalar @{$tmp_cache->{$chr}} if defined($tmp_cache->{$chr});
}
# no cache found on disk or not using cache
if(!defined($tmp_cache->{$chr})) {
if(defined($config->{standalone})) {
debug("WARNING: Could not find cache for $chr\:$region") unless defined($config->{quiet});
next;
}
# spoof temporary region hash
my $tmp_hash;
push @{$tmp_hash->{$chr}}, $region;
$tmp_cache = &cache_transcripts($config, $tmp_hash);
# make it an empty arrayref that gets cached
# so we don't get confused and reload next time round
$tmp_cache->{$chr} ||= [];
$count_from_db += scalar @{$tmp_cache->{$chr}};
# dump to disk if writing to cache
&dump_transcript_cache($config, $tmp_cache, $chr, $region) if defined($config->{write_cache});
}
# add loaded transcripts to main cache
if(defined($tmp_cache->{$chr})) {
while(my $tr = shift @{$tmp_cache->{$chr}}) {
# track already added transcripts by dbID
my $dbID = $tr->dbID;
if($seen_trs{$dbID}) {
$count_duplicates++;
next;
}
$seen_trs{$dbID} = 1;
push @{$transcript_cache->{$chr}}, $tr;
}
}
undef $tmp_cache;
# restore quiet status
$config->{quiet} = $quiet;
# build slice cache
$slice_cache = &build_slice_cache($config, $transcript_cache) unless defined($slice_cache->{$chr});
}
&end_progress($config);
}
}
# skip chr if no cache
next unless defined($transcript_cache->{$chr});
# copy slice from transcript to slice cache
$slice_cache = &build_slice_cache($config, $transcript_cache) unless defined($slice_cache->{$chr});
my $tr_count = scalar @{$transcript_cache->{$chr}};
debug("Retrieved $tr_count transcripts ($count_from_cache cached, $count_from_db DB, $count_duplicates duplicates)") unless defined($config->{quiet});
debug("Analyzing variants") unless defined($config->{quiet});
my $tr_counter;
while($tr_counter < $tr_count) {
&progress($config, $tr_counter, $tr_count);
my $tr = $transcript_cache->{$chr}->[$tr_counter++];
# do each overlapping VF
my $s = $tr->start - $up_down_size;
my $e = $tr->end + $up_down_size;
# get the chunks this transcript overlaps
my %chunks;
$chunks{$_} = 1 for (int($s/$config->{chunk_size})..int($e/$config->{chunk_size}));
map {delete $chunks{$_} unless defined($vf_hash->{$chr}{$_})} keys %chunks;
foreach my $chunk(keys %chunks) {
foreach my $pos(grep {$_ >= $s && $_ <= $e} keys %{$vf_hash->{$chr}{$chunk}}) {
foreach my $vf(@{$vf_hash->{$chr}{$chunk}{$pos}}) {
# pinch slice from slice cache if we don't already have it
$_->{slice} ||= $slice_cache->{$chr} for @{$vf_hash->{$chr}{$chunk}{$pos}};
my $tv = Bio::EnsEMBL::Variation::TranscriptVariation->new(
-transcript => $tr,
-variation_feature => $vf,
-adaptor => $config->{tva},
-no_ref_check => 1
);
# prefetching stuff here prevents doing loads at the
# end and makes progress reporting more useful
$tv->_prefetch_for_vep;
$vf->add_TranscriptVariation($tv);
}
}
}
}
# sort results into @finished_vfs array
foreach my $chunk(sort {$a <=> $b} keys %{$vf_hash->{$chr}}) {
foreach my $pos(sort {$a <=> $b} keys %{$vf_hash->{$chr}{$chunk}}) {
# pinch slice from slice cache if we don't already have it
$_->{slice} ||= $slice_cache->{$chr} for @{$vf_hash->{$chr}{$chunk}{$pos}};
# add to final array
push @finished_vfs, @{$vf_hash->{$chr}{$chunk}{$pos}};
}
}
&end_progress($config);
debug("Calculating and writing output") unless defined($config->{quiet});
&print_consequences($config, \@finished_vfs);
# clean hash
delete $vf_hash->{$chr};
delete $transcript_cache->{$chr} if defined($config->{cache});
}
}
# gets existing VFs for a vf_hash
sub check_existing_hash {
my $config = shift;
my $vf_hash = shift;
my $variation_cache;
debug("Checking for existing variations") unless defined($config->{quiet});
my ($chunk_count, $counter);
$chunk_count += scalar keys %{$vf_hash->{$_}} for keys %{$vf_hash};
foreach my $chr(keys %{$vf_hash}) {
my %loaded_regions;
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
&progress($config, $counter++, $chunk_count);
# get the VFs for this chunk
my ($start, $end);
# work out start and end using chunk_size
$start = $config->{chunk_size} * $chunk;
$end = $config->{chunk_size} * ($chunk + 1);
# using cache?
if(defined($config->{cache})) {
my $tmp_regions;
push @{$tmp_regions->{$chr}}, $start.'-'.$end;
my $converted_regions = &convert_regions($config, $tmp_regions);
foreach my $region(@{$converted_regions->{$chr}}) {
unless($loaded_regions{$region}) {
my $tmp_cache = &load_dumped_variation_cache($config, $chr, $region);
# load from DB if not found in cache
if(!defined($tmp_cache->{$chr})) {
if(defined($config->{standalone})) {
debug("WARNING: Could not find variation cache for $chr\:$region") unless defined($config->{quiet});
next;
}
$tmp_cache->{$chr} = &get_variations_in_region($config, $chr, $region);
&dump_variation_cache($config, $tmp_cache, $chr, $region) if defined($config->{write_cache});
}
# merge tmp_cache with the main cache
foreach my $key(keys %{$tmp_cache->{$chr}}) {
$variation_cache->{$chr}->{$key} = $tmp_cache->{$chr}->{$key};
delete $tmp_cache->{$chr}->{$key};
}
# clear memory
undef $tmp_cache;
# record this region as fetched
$loaded_regions{$region} = 1;
}
}
}
# no cache, get all variations in region from DB
else {
$variation_cache->{$chr} = &get_variations_in_region($config, $chr, $start.'-'.$end);
}
# now compare retrieved vars with vf_hash
foreach my $pos(keys %{$vf_hash->{$chr}->{$chunk}}) {
foreach my $var(@{$vf_hash->{$chr}->{$chunk}->{$pos}}) {
my @found;
if(defined($variation_cache->{$chr})) {
if(my $existing_vars = $variation_cache->{$chr}->{$pos}) {
foreach my $existing_var(@$existing_vars) {
push @found, $existing_var->[0] unless &is_var_novel($config, $existing_var, $var);
}
}
}
$var->{existing} = join ",", @found;
$var->{existing} ||= '-';
}
}
}
delete $variation_cache->{$chr};
}
&end_progress($config);
}
# gets a slice from the slice adaptor
sub get_slice {
my $config = shift;
my $chr = shift;
return undef unless defined($config->{sa}) && defined($chr);
my $slice;
# first try to get a chromosome
eval { $slice = $config->{sa}->fetch_by_region('chromosome', $chr); };
# if failed, try to get any seq region
if(!defined($slice)) {
$slice = $config->{sa}->fetch_by_region(undef, $chr);
}
return $slice;
}
# METHODS THAT DEAL WITH "REGIONS"
##################################
# scans file to get all slice bits we need
sub scan_file() {
my $config = shift;
my $in_file_handle = $config->{in_file_handle};
my %include_regions;
debug("Scanning input file") unless defined($config->{quiet});
while(<$in_file_handle>) {
chomp;
# header line?
next if /^\#/;
# some lines (pileup) may actually parse out into more than one variant)
foreach my $sub_line(@{&parse_line($config, $_)}) {
# get the sub-line into named variables
my ($chr, $start, $end, $allele_string, $strand, $var_name) = @{$sub_line};
$chr =~ s/chr//ig unless $chr =~ /^chromosome$/i;
$chr = 'MT' if $chr eq 'M';
next if defined($config->{chr}) && !$config->{chr}->{$chr};
$include_regions{$chr} ||= [];
&add_region($start, $end, $include_regions{$chr});
}
}
# close filehandle and recycle
close $in_file_handle;
$config->{in_file_handle} = &get_in_file_handle($config);
# merge regions
&merge_regions(\%include_regions);
return \%include_regions;
}
# gets regions from VF hash
sub regions_from_hash {
my $config = shift;
my $vf_hash = shift;
my %include_regions;
# if using cache we just want the regions of cache_region_size
# since that's what we'll get from the cache (or DB if no cache found)
if(defined($config->{cache})) {
my $region_size = $config->{cache_region_size};
foreach my $chr(keys %$vf_hash) {
$include_regions{$chr} = [];
my %temp_regions;
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
my ($s, $e) = ($pos - MAX_DISTANCE_FROM_TRANSCRIPT, $pos + MAX_DISTANCE_FROM_TRANSCRIPT);
my $low = int ($s / $region_size);
my $high = int ($e / $region_size) + 1;
for my $i($low..($high - 1)) {
$temp_regions{(($i * $region_size) + 1).'-'.(($i + 1) * $region_size)} = 1;
}
}
}
@{$include_regions{$chr}} = keys %temp_regions;
}
}
# if no cache we don't want to fetch more than is necessary, so find the
# minimum covered region of the variations in the hash
else {
foreach my $chr(keys %$vf_hash) {
$include_regions{$chr} = [];
foreach my $chunk(keys %{$vf_hash->{$chr}}) {
foreach my $pos(keys %{$vf_hash->{$chr}{$chunk}}) {
&add_region($_->start, $_->end, $include_regions{$chr}) for @{$vf_hash->{$chr}{$chunk}{$pos}};
}
}
}
# merge regions
&merge_regions(\%include_regions);
}
return \%include_regions;
}
# adds a region to region list, expanding existing one if overlaps
sub add_region {
my $start = shift;
my $end = shift;
my $region_list = shift;
# fix end for insertions
$end = $start if $end < $start;
my $added = 0;
my $i = 0;
while ($i < scalar @$region_list) {
my ($region_start, $region_end) = split /\-/, $region_list->[$i];
if($start <= $region_end && $end >= $region_start) {
my $new_region_start = ($start < $end ? $start : $end) - MAX_DISTANCE_FROM_TRANSCRIPT;
my $new_region_end = ($start > $end ? $start : $end) + MAX_DISTANCE_FROM_TRANSCRIPT;
$region_start = $new_region_start if $new_region_start < $region_start;
$region_end = $new_region_end if $new_region_end > $region_end;
$region_list->[$i] = $region_start.'-'.$region_end;
$added = 1;
}
$i++;
}
unless($added) {
push @{$region_list}, ($start - MAX_DISTANCE_FROM_TRANSCRIPT).'-'.($end + MAX_DISTANCE_FROM_TRANSCRIPT);
}
}
# merges overlapping regions from scans
sub merge_regions {
my $include_regions = shift;
# now merge overlapping regions
foreach my $chr(keys %$include_regions) {
my $max_index = $#{$include_regions->{$chr}};
my (@new_regions, %skip);
for my $i(0..$max_index) {
next if $skip{$i};
my ($s, $e) = split /\-/, $include_regions->{$chr}[$i];
for my $j(($i+1)..$max_index) {
next if $skip{$j};
my ($ns, $ne) = split /\-/, $include_regions->{$chr}[$j];
if($s <= $ne && $e >= $ns) {
$s = $ns if $ns < $s;
$e = $ne if $ne > $e;
$skip{$j} = 1;
}
}
push @new_regions, $s.'-'.$e;
}
# replace original
$include_regions->{$chr} = \@new_regions;
$config->{region_count} += scalar @new_regions;
}
return $include_regions;
}
# converts regions as determined by scan_file to regions loadable from cache
sub convert_regions {
my $config = shift;
my $regions = shift;
return undef unless defined $regions;
my $region_size = $config->{cache_region_size};
my %new_regions;
foreach my $chr(keys %$regions) {
my %temp_regions;
foreach my $region(@{$regions->{$chr}}) {
my ($s, $e) = split /\-/, $region;
my $low = int ($s / $region_size);
my $high = int ($e / $region_size) + 1;
for my $i($low..($high - 1)) {
$temp_regions{(($i * $region_size) + 1).'-'.(($i + 1) * $region_size)} = 1;
}
}
@{$new_regions{$chr}} = keys %temp_regions;
}
return \%new_regions;
}
# CACHE METHODS
###############
# get transcripts for slices
sub cache_transcripts {
my $config = shift;
my $include_regions = shift;
my $transcript_cache;
my $i;
debug("Caching transcripts") unless defined($config->{quiet});
foreach my $chr(keys %$include_regions) {
my $slice = &get_slice($config, $chr);
next unless defined $slice;
# prefetch some things
$slice->is_circular;
# trim bumf off the slice
delete $slice->{coord_system}->{adaptor} if defined($config->{write_cache});
# no regions?
if(!scalar @{$include_regions->{$chr}}) {
my $start = 1;
my $end = $config->{cache_region_size};
while($start < $slice->end) {
push @{$include_regions->{$chr}}, $start.'-'.$end;
$start += $config->{cache_region_size};
$end += $config->{cache_region_size};
}
}
my $region_count;
if(scalar keys %$include_regions == 1) {
my ($chr) = keys %$include_regions;
$region_count = scalar @{$include_regions->{$chr}};
debug("Caching transcripts for chromosome $chr") unless defined($config->{quiet});
}
foreach my $region(@{$include_regions->{$chr}}) {
&progress($config, $i++, $region_count || $config->{region_count});
my ($s, $e) = split /\-/, $region;
# sanity check start and end
$s = 1 if $s < 1;
$e = $slice->end if $e > $slice->end;
# get sub-slice
my $sub_slice = $slice->sub_Slice($s, $e);
# add transcripts to the cache, via a transfer to the chrom's slice
if(defined($sub_slice)) {
foreach my $gene(@{$sub_slice->get_all_Genes(undef, undef, 1)}) {
my $gene_stable_id = $gene->stable_id;
foreach my $tr(map {$_->transfer($slice)} @{$gene->get_all_Transcripts}) {
$tr->{_gene_stable_id} = $gene_stable_id;
if(defined($config->{prefetch})) {
$tr->{_gene} = $gene;
&prefetch_transcript_data($config, $tr);
delete $tr->{_gene};
}
# strip some unnecessary data from the transcript object
&clean_transcript($tr) if defined($config->{write_cache});
push @{$transcript_cache->{$chr}}, $tr;
}
}
}
}
}
&end_progress($config);
return $transcript_cache;
}
# gets rid of extra bits of info attached to the transcript that we don't need
sub clean_transcript {
my $tr = shift;
foreach my $key(qw(display_xref external_db external_display_name external_name external_status created_date status description edits_enabled modified_date)) {
delete $tr->{$key} if defined($tr->{$key});
}
# clean all attributes but miRNA
if(defined($tr->{attributes})) {
my @new_atts;
foreach my $att(@{$tr->{attributes}}) {
push @new_atts, $att if $att->{code} eq 'miRNA';
}
$tr->{attributes} = \@new_atts;
}
$tr->{analysis} = {};
}
# build slice cache from transcript cache
sub build_slice_cache {
my $config = shift;
my $transcript_cache = shift;
my %slice_cache;
foreach my $chr(keys %$transcript_cache) {
$slice_cache{$chr} = $transcript_cache->{$chr}[0]->slice;
# reattach adaptor to the coord system
$slice_cache{$chr}->{coord_system}->{adaptor} ||= $config->{csa};
}
return \%slice_cache;
}
# pre-fetches per-transcript data
sub prefetch_transcript_data {
my $config = shift;
my $tran = shift;
# introns, translateable_seq, mapper
$tran->{_variation_effect_feature_cache}->{introns} ||= $tran->get_all_Introns;
$tran->{_variation_effect_feature_cache}->{translateable_seq} ||= $tran->translateable_seq;
$tran->{_variation_effect_feature_cache}->{mapper} ||= $tran->get_TranscriptMapper;
# peptide
unless ($tran->{_variation_effect_feature_cache}->{peptide}) {
my $translation = $tran->translate;
$tran->{_variation_effect_feature_cache}->{peptide} = $translation ? $translation->seq : undef;
}
# codon table
unless ($tran->{_variation_effect_feature_cache}->{codon_table}) {
# for mithocondrial dna we need to to use a different codon table
my $attrib = $tran->slice->get_all_Attributes('codon_table')->[0];
$tran->{_variation_effect_feature_cache}->{codon_table} = $attrib ? $attrib->value : 1;
}
# gene HGNC
if(defined $config->{hgnc}) {
# get from gene cache if found already
if(defined($tran->{_gene}->{_hgnc})) {
$tran->{_gene_hgnc} = $tran->{_gene}->{_hgnc};
}
else {
my @entries = grep {$_->database eq 'HGNC'} @{$tran->{_gene}->get_all_DBEntries()};
if(scalar @entries) {
$tran->{_gene_hgnc} = $entries[0]->display_id;
}
$tran->{_gene_hgnc} ||= '-';
# cache it on the gene object too
$tran->{_gene}->{_hgnc} = $tran->{_gene_hgnc};
}
}
return $tran;
}
# dumps out transcript cache to file
sub dump_transcript_cache {
my $config = shift;
my $transcript_cache = shift;
my $chr = shift;
my $region = shift;
debug("Dumping cached transcript data") unless defined($config->{quiet});
# clean the slice adaptor before storing
&clean_slice_adaptor($config);
&strip_transcript_cache($config, $transcript_cache);
$config->{reg}->disconnect_all;
my $dir = $config->{dir}.'/'.$chr;
my $dump_file = $dir.'/'.($region || "dump").'.gz';
# make directory if it doesn't exist
if(!(-e $dir)) {
system("mkdir -p ".$dir);
}
debug("Writing to $dump_file") unless defined($config->{quiet});
# storable
open my $fh, "| gzip -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
nstore_fd($transcript_cache, $fh);
close $fh;
}
# loads in dumped transcript cache to memory
sub load_dumped_transcript_cache {
my $config = shift;
my $chr = shift;
my $region = shift;
my $dir = $config->{dir}.'/'.$chr;
my $dump_file = $dir.'/'.($region || "dump").'.gz';
return undef unless -e $dump_file;
debug("Reading cached transcript data for chromosome $chr".(defined $region ? "\:$region" : "")." from dumped file") unless defined($config->{quiet});
open my $fh, $config->{compress}." ".$dump_file." |" or return undef;
my $transcript_cache = fd_retrieve($fh);
close $fh;
return $transcript_cache;
}
# strips cache
sub strip_transcript_cache {
my $config = shift;
my $cache = shift;
foreach my $chr(keys %$cache) {
foreach my $tr(@{$cache->{$chr}}) {
foreach my $exon(@{$tr->{_trans_exon_array}}) {
delete $exon->{adaptor};
delete $exon->{slice}->{adaptor};
}
delete $tr->{adaptor};
delete $tr->{slice}->{adaptor};
}
}
}
# cleans slice adaptor before storing in cache
sub clean_slice_adaptor{
my $config = shift;
# clean some stuff off the slice adaptor
$config->{sa}->{asm_exc_cache} = {};
$config->{sa}->{sr_name_cache} = {};
$config->{sa}->{sr_id_cache} = {};
delete $config->{sa}->{db}->{seq_region_cache};
delete $config->{sa}->{db}->{name_cache};
}
# dump adaptors to cache
sub dump_adaptor_cache {
my $config = shift;
$config->{reg}->disconnect_all;
my $dir = $config->{dir};
my $dump_file = $dir.'/adaptors.gz';
# make directory if it doesn't exist
if(!(-e $dir)) {
system("mkdir -p ".$dir);
}
open my $fh, "| gzip -c > ".$dump_file or die "ERROR: Could not write to dump file $dump_file";
nstore_fd($config, $fh);
close $fh;
}
# load dumped adaptors
sub load_dumped_adaptor_cache {
my $config = shift;
my $dir = $config->{dir};
my $dump_file = $dir.'/adaptors.gz';
return undef unless -e $dump_file;
debug("Reading cached adaptor data") unless defined($config->{quiet});
open my $fh, $config->{compress}." ".$dump_file." |" or return undef;
my $cached_config = fd_retrieve($fh);
close $fh;
$config->{$_} = $cached_config->{$_} for qw(sa ga ta vfa tva mca csa);
return 1;
}
# dumps cached variations to disk
sub dump_variation_cache {
my $config = shift;
my $v_cache = shift;
my $chr = shift;
my $region = shift;
my $dir = $config->{dir}.'/'.$chr;
my $dump_file = $dir.'/'.($region || "dump").'_var.gz';
# make directory if it doesn't exist
if(!(-e $dir)) {
system("mkdir -p ".$dir);
}
open DUMP, "| gzip -c > ".$dump_file or die "ERROR: Could not write to adaptor dump file $dump_file";
foreach my $pos(keys %{$v_cache->{$chr}}) {
foreach my $v(@{$v_cache->{$chr}->{$pos}}) {
my ($name, $source, $start, $end, $as, $strand) = @$v;
print DUMP join " ", (
$name,
$source == 1 ? '' : $source,
$start,
$end == $start ? '' : $end,
$as,
$strand == 1 ? '' : $strand,
);
print DUMP "\n";
}
}
close DUMP;
}
# loads dumped variation cache
sub load_dumped_variation_cache {
my $config = shift;
my $chr = shift;
my $region = shift;
my $dir = $config->{dir}.'/'.$chr;
my $dump_file = $dir.'/'.($region || "dump").'_var.gz';
return undef unless -e $dump_file;
open DUMP, $config->{compress}." ".$dump_file." |" or return undef;
my $v_cache;
while(<DUMP>) {
chomp;
my ($name, $source, $start, $end, $as, $strand) = split / /, $_;
$source ||= 1;
$end ||= $start;
$strand ||= 1;
my @v = ($name, $source, $start, $end, $as, $strand);
push @{$v_cache->{$chr}->{$start}}, \@v;
}
close DUMP;
return $v_cache;
}
# builds a full cache for this species
sub build_full_cache {
my $config = shift;
my $rebuild = shift;
my @slices;
if($config->{build} =~ /all/i) {
@slices = @{$config->{sa}->fetch_all('toplevel')};
}
else {
foreach my $val(split /\,/, $config->{build}) {
my @nnn = split /\-/, $val;
foreach my $chr($nnn[0]..$nnn[-1]) {
my $slice = &get_slice($config, $chr);
push @slices, $slice if defined($slice);
}
}
}
foreach my $slice(@slices) {
my $chr = $slice->seq_region_name;
my $regions;
# for progress
my $region_count = int($slice->end / $config->{cache_region_size}) + 1;
my $counter = 0;
# initial region
my ($start, $end) = (1, $config->{cache_region_size});
debug((defined($config->{rebuild}) ? "Rebuild" : "Creat")."ing cache for chromosome $chr") unless defined($config->{quiet});
while($start < $slice->end) {
&progress($config, $counter++, $region_count);
# store quiet status
my $quiet = $config->{quiet};
$config->{quiet} = 1;
# store transcripts
$regions->{$chr} = [$start.'-'.$end];
my $tmp_cache = ($rebuild ? &load_dumped_transcript_cache($config, $chr, $start.'-'.$end) : &cache_transcripts($config, $regions));
$tmp_cache->{$chr} ||= [];
&dump_transcript_cache($config, $tmp_cache, $chr, $start.'-'.$end);
undef $tmp_cache;
# store variations
my $variation_cache;
$variation_cache->{$chr} = &get_variations_in_region($config, $chr, $start.'-'.$end);
$variation_cache->{$chr} ||= {};
&dump_variation_cache($config, $variation_cache, $chr, $start.'-'.$end);
undef $variation_cache;
# restore quiet status
$config->{quiet} = $quiet;
# increment by cache_region_size to get next region
$start += $config->{cache_region_size};
$end += $config->{cache_region_size};
}
&end_progress($config);
undef $regions;
}
}
# format coords for printing
sub format_coords {
my ($start, $end) = @_;
if(!defined($start)) {
return '-';
}
elsif(!defined($end)) {
return $start;
}
elsif($start == $end) {
return $start;
}
elsif($start > $end) {
return $end.'-'.$start;
}
else {
return $start.'-'.$end;
}
}
# METHODS TO FIND CO-LOCATED / EXISTING VARIATIONS
##################################################
# finds an existing VF in the db
sub find_existing {
my $config = shift;
my $new_vf = shift;
if(defined($new_vf->adaptor->db)) {
my $sth = $new_vf->adaptor->db->dbc->prepare(qq{
SELECT variation_name, source_id, seq_region_start, seq_region_end, allele_string, seq_region_strand
FROM variation_feature
WHERE seq_region_id = ?
AND seq_region_start = ?
AND seq_region_end = ?
ORDER BY source_id ASC
});
$sth->execute($new_vf->slice->get_seq_region_id, $new_vf->start, $new_vf->end);
my @v;
for my $i(0..5) {
$v[$i] = undef;
}
$sth->bind_columns(\$v[0], \$v[1], \$v[2], \$v[3], \$v[4], \$v[5]);
my @found;
while($sth->fetch) {
push @found, $v[0] unless &is_var_novel($config, \@v, $new_vf);
}
$sth->finish();
return (scalar @found ? join ",", @found : undef);
}
return undef;
}
# compare a new vf to one from the cache / DB
sub is_var_novel {
my $config = shift;
my $existing_var = shift;
my $new_var = shift;
my $is_novel = 1;
$is_novel = 0 if $existing_var->[2] == $new_var->start && $existing_var->[3] == $new_var->end;
if(defined($config->{check_alleles})) {
my %existing_alleles;
$existing_alleles{$_} = 1 for split /\//, $existing_var->[4];
my $seen_new = 0;
foreach my $a(split /\//, $new_var->allele_string) {
reverse_comp(\$a) if $new_var->seq_region_strand ne $existing_var->[5];
$seen_new = 1 unless defined $existing_alleles{$a};
}
$is_novel = 1 if $seen_new;
}
return $is_novel;
}
# gets all variations in a region
sub get_variations_in_region {
my $config = shift;
my $chr = shift;
my $region = shift;
my ($start, $end) = split /\-/, $region;
my %variations;
if(defined($config->{vfa}->db)) {
my $sth = $config->{vfa}->db->dbc->prepare(qq{
SELECT vf.variation_name, vf.source_id, vf.seq_region_start, vf.seq_region_end, vf.allele_string, vf.seq_region_strand
FROM variation_feature vf, seq_region s
WHERE s.seq_region_id = vf.seq_region_id
AND s.name = ?
AND vf.seq_region_start >= ?
AND vf.seq_region_start <= ?
});
$sth->execute($chr, $start, $end);
my @v;
for my $i(0..5) {
$v[$i] = undef;
}
$sth->bind_columns(\$v[0], \$v[1], \$v[2], \$v[3], \$v[4], \$v[5]);
while($sth->fetch) {
my @v_copy = @v;
push @{$variations{$v[2]}}, \@v_copy;
}
$sth->finish();
}
return \%variations;
}
# DEBUG AND STATUS METHODS
##########################
# gets time
sub get_time() {
my @time = localtime(time());
# increment the month (Jan = 0)
$time[4]++;
# add leading zeroes as required
for my $i(0..4) {
$time[$i] = "0".$time[$i] if $time[$i] < 10;
}
# put the components together in a string
my $time =
($time[5] + 1900)."-".
$time[4]."-".
$time[3]." ".
$time[2].":".
$time[1].":".
$time[0];
return $time;
}
# prints debug output with time
sub debug {
my $text = (@_ ? (join "", @_) : "No message");
my $time = get_time;
print $time." - ".$text.($text =~ /\n$/ ? "" : "\n");
}
# update or initiate progress bar
sub progress {
my ($config, $i, $total) = @_;
return if defined($config->{quiet}) || defined($config->{no_progress});
my $width = $config->{terminal_width};
my $percent = int(($i/$total) * 100);
my $numblobs = (($i/$total) * $width) - 2;
# this ensures we're not writing to the terminal too much
return if(defined($config->{prev_prog})) && $numblobs.'-'.$percent eq $config->{prev_prog};
$config->{prev_prog} = $numblobs.'-'.$percent;
printf("\r% -${width}s% 1s% 10s", '['.('=' x $numblobs).($numblobs == $width - 2 ? '=' : '>'), ']', "[ " . $percent . "% ]");
}
# end progress bar
sub end_progress {
my $config = shift;
return if defined($config->{quiet}) || defined($config->{no_progress});
&progress($config, 1,1);
print "\n";
delete $config->{prev_prog};
}
# outputs usage message
sub usage {
my $usage =<<END;
#----------------------------------#
# ENSEMBL VARIANT EFFECT PREDICTOR #
#----------------------------------#
version $VERSION
By Will McLaren (wm2\@ebi.ac.uk)
http://www.ensembl.org/info/docs/variation/vep/vep_script.html
Usage:
perl variant_effect_predictor.pl [arguments]
Options
=======
--help Display this message and quit
--verbose Display verbose output as the script runs [default: off]
--quiet Suppress status and warning messages [default: off]
--no_progress Suppress progress bars [default: off]
--config Load configuration from file. Any command line options
specified overwrite those in the file [default: off]
-i | --input_file Input file - if not specified, reads from STDIN. Files
may be gzip compressed.
--format Alternative input file format - one of "pileup", "vcf"
-o | --output_file Output file. Write to STDOUT by specifying -o STDOUT - this
will force --quiet [default: "variant_effect_output.txt"]
--force_overwrite Force overwriting of output file [default: quit if file
exists]
-t | --terms Type of consequence terms to output - one of "ensembl", "SO",
"NCBI" [default: ensembl]
--sift=[p|s|b] Add SIFT [p]rediction, [s]core or [b]oth [default: off]
--polyphen=[p|s|b] Add PolyPhen [p]rediction, [s]core or [b]oth [default: off]
--condel=[p|s|b] Add Condel SIFT/PolyPhen consensus [p]rediction, [s]core or
[b]oth [default: off]
NB: SIFT, PolyPhen and Condel predictions are currently available for human only
--regulatory Look for overlaps with regulatory regions. The script can
also call if a variant falls in a high information position
within a transcription factor binding site. Output lines have
a Feature type of RegulatoryFeature or MotifFeature. Requires
database connection. [default: off]
NB: Regulatory consequences are currently available for human and mouse only
--hgnc If specified, HGNC gene identifiers are output alongside the
Ensembl Gene identifier [default: off]
--hgvs Output HGVS identifiers (coding and protein). Requires database
connection [default: off]
--protein Output Ensembl protein identifer [default: off]
--gene Force output of Ensembl gene identifer - disabled by default
unless using --cache or --no_whole_genome [default: off]
--coding_only Only return consequences that fall in the coding region of
transcripts [default: off]
--most_severe Ouptut only the most severe consequence per variation.
Transcript-specific columns will be left blank. [default: off]
--summary Ouptut only a comma-separated list of all consequences per
variation. Transcript-specific columns will be left blank.
[default: off]
--check_ref If specified, checks supplied reference allele against stored
entry in Ensembl Core database [default: off]
--check_existing If specified, checks for existing co-located variations in the
Ensembl Variation database [default: off]
--check_alleles If specified, the alleles of existing co-located variations
are compared to the input; an existing variation will only
be reported if no novel allele is in the input (strand is
accounted for) [default: off]
--chr [list] Select a subset of chromosomes to analyse from your file. Any
data not on this chromosome in the input will be skipped. The
list can be comma separated, with "-" characters representing
an interval [default: off]
--gp If specified, tries to read GRCh37 position from GP field in the
INFO column of a VCF file. Only applies when VCF is the input
format and human is the species [default: off]
--species Species to use [default: "human"]
--host Manually define database host [default: "ensembldb.ensembl.org"]
-u | --user Database username [default: "anonymous"]
--port Database port [default: 5306]
--password Database password [default: no password]
--genomes Sets DB connection params for Ensembl Genomes [default: off]
--registry Registry file to use defines DB connections [default: off]
Defining a registry file overrides above connection settings.
--db_version=[number] Force script to load DBs from a specific Ensembl version. Not
advised due to likely incompatibilities between API and DB
--no_whole_genome Run in old-style, non-whole genome mode [default: off]
--buffer_size Sets the number of variants sent in each batch [default: 5000]
Increasing buffer size can retrieve results more quickly
but requires more memory. Only applies to whole genome mode.
--cache Enables read-only use of cache [default: off]
--dir [directory] Specify the base cache directory to use [default: "\$HOME/.vep/"]
--write_cache Enable writing to cache [default: off]
--build [all|list] Build a complete cache for the selected species. Build for all
chromosomes with --build all, or a list of chromosomes (see
--chr). DO NOT USE WHEN CONNECTED TO PUBLIC DB SERVERS AS THIS
VIOLATES OUR FAIR USAGE POLICY [default: off]
--compress Specify utility to decompress cache files - may be "gzcat" or
"gzip -dc Only use if default does not work [default: zcat]
--skip_db_check ADVANCED! Force the script to use a cache built from a different
database than specified with --host. Only use this if you are
sure the hosts are compatible (e.g. ensembldb.ensembl.org and
useastdb.ensembl.org) [default: off]
--cache_region_size ADVANCED! The size in base-pairs of the region covered by one
file in the cache. [default: 1MB]
END
print $usage;
}
| adamsardar/perl-libs-custom | EnsemblAPI/ensembl-variation/scripts/examples/variant_effect_predictor.pl | Perl | apache-2.0 | 91,701 |
=head1 LICENSE
Copyright [1999-2014] Wellcome Trust Sanger Institute and the EMBL-European Bioinformatics Institute
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
=cut
=head1 CONTACT
Please email comments or questions to the public Ensembl
developers list at <http://lists.ensembl.org/mailman/listinfo/dev>.
Questions may also be sent to the Ensembl help desk at
<http://www.ensembl.org/Help/Contact>.
=cut
=head1 NAME
Bio::EnsEMBL::OntologyTerm
=head1 DESCRIPTION
An ontology term object, (most often) created by
Bio::EnsEMBL::DBSQL::GOTermAdaptor and used in querying for
transcripts, genes, and translations using the relevant adaptors and
methods.
=head1 METHODS
=cut
package Bio::EnsEMBL::OntologyTerm;
use strict;
use warnings;
use Bio::EnsEMBL::Utils::Argument qw( rearrange );
use base qw( Bio::EnsEMBL::Storable );
=head2 new
Arg [-ACCESSION] : String
The accession of the ontology term.
Arg [-ONTOLOGY] : String
The ontology that the term belongs to.
Arg [-NAMESPACE] : String
The namespace of the ontology term.
Arg [-NAME] : String
The name of the ontology term.
Arg [-SUBSETS] : (optional) Listref of strings
The subsets within the ontology to which this
term belongs.
Arg [-DEFINITION] : (optional) String
The definition of the ontology term.
Arg [-SYNONYMS] : (optional) Listref of strings
The synonyms of this term.
Arg : Further arguments required for parent class
Bio::EnsEMBL::Storable.
Description : Creates an ontology term object.
Example :
my $term = Bio::EnsEMBL::OntologyTerm->new(
'-accession' => 'GO:0021785',
'-ontology' => 'GO',
'-namespace' => 'biological_process',
'-name' => 'branchiomotor neuron axon guidance',
'-definition' => 'The process in which a branchiomotor '
. 'neuron growth cone is directed to a specific target site. '
. 'Branchiomotor neurons are located in the hindbrain and '
. 'innervate branchial arch-derived muscles that control jaw '
. 'movements, facial expression, the larynx, and the pharynx.',
'-synonyms' => [ 'BMN axon guidance',
'branchial motor axon guidance',
'special visceral motor neuron axon guidance' ]
# ... other arguments required by Bio::EnsEMBL::Storable.
);
Return type : Bio::EnsEMBL::OntologyTerm
=cut
sub new {
my $proto = shift(@_);
my $this = $proto->SUPER::new(@_);
my ( $accession, $ontology, $namespace, $name, $definition, $is_root, $is_obsolete, $subsets )
= rearrange( [ 'ACCESSION', 'ONTOLOGY', 'NAMESPACE', 'NAME',
'DEFINITION', 'IS_ROOT', 'IS_OBSOLETE', 'SUBSETS' ],
@_ );
$this->{'accession'} = $accession;
$this->{'ontology'} = $ontology;
$this->{'namespace'} = $namespace;
$this->{'name'} = $name;
$this->{'definition'} = $definition;
$this->{'is_root'} = $is_root;
$this->{'is_obsolete'}= $is_obsolete;
$this->{'subsets'} = [ @{$subsets} ];
$this->{'child_terms_fetched'} = 0;
$this->{'parent_terms_fetched'} = 0;
return $this;
}
=head2 accession
Arg : None
Description : Returns the accession for the ontology term in question.
Example : my $accession = $term->accession();
Return type : String
=cut
sub accession {
my ($this) = @_;
return $this->{'accession'};
}
=head2 ontology
Arg : None
Description : Returns the ontology for the ontology term in question.
Example : my $ontology = $term->ontology();
Return type : String
=cut
sub ontology {
my ($this) = @_;
return $this->{'ontology'};
}
=head2 namespace
Arg : None
Description : Returns the namespace for the ontology term in question.
Example : my $acc = $term->namespace();
Return type : String
=cut
sub namespace {
my ($this) = @_;
return $this->{'namespace'};
}
=head2 name
Arg : None
Description : Returns the name for the ontology term in question.
Example : my $name = $term->name();
Return type : String
=cut
sub name {
my ($this) = @_;
return $this->{'name'};
}
=head2 definition
Arg : None
Description : Returns the definition for the ontology term in question.
Example : my $definition = $term->definition();
Return type : String
=cut
sub definition {
my ($this) = @_;
return $this->{'definition'};
}
=head2 is_root
Arg : None
Description : Returns true if the term is root of its ontology
Example : my $is_root = $term->is_root();
Return type : Boolean (TRUE if it is a root, else FALSE)
=cut
sub is_root {
my ($this) = @_;
return $this->{'is_root'};
}
=head2 is_obsolete
Arg : None
Description : Returns true if the term is obsolete
Example : my $is_obsolete = $term->is_obsolete();
Return type : Boolean (TRUE if it is obsolete, else FALSE)
=cut
sub is_obsolete {
my ($this) = @_;
return $this->{'is_obsolete'};
}
=head2 synonyms
Arg : None
Description : Returns the list of synonyms defined for this term
(if any).
Example : my @synonyms = @{ $term->synonyms() };
Return type : Listref of strings
=cut
sub synonyms {
my ($this) = @_;
if ( !exists( $this->{'synonyms'} ) ) {
$this->{'synonyms'} =
$this->adaptor()->_fetch_synonyms_by_dbID( $this->dbID() );
}
return $this->{'synonyms'};
}
=head2 subsets
Arg : None
Description : Returns a list of subsets that this term is part
of. The list might be empty.
Example : my @subsets = @{ $term->subsets() };
Return type : listref of strings
=cut
sub subsets {
my ($this) = @_;
return $this->{'subsets'};
}
=head2 children
Arg : (optional) List of strings
The type of relations to retrieve children for.
Description : Returns the children terms of this ontology term.
Example : my @children =
@{ $term->children( 'is_a', 'part_of' ) };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub children {
my ( $this, @relations ) = @_;
my @terms = @{ $this->adaptor()->fetch_all_by_parent_term($this) };
if (@relations) {
@terms = ();
foreach my $relation (@relations) {
if ( exists( $this->{'children'}{$relation} ) ) {
push( @terms, @{ $this->{'children'}{$relation} } );
}
}
}
return \@terms;
}
=head2 descendants
Arg : None
Description : Returns the complete set of 'is_a' and 'part_of'
descendant terms of this ontology term, down to
and including any leaf terms.
Example : my @descendants = @{ $term->descendants() };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub descendants {
my ($this) = @_;
return $this->adaptor()->fetch_all_by_ancestor_term($this);
}
=head2 parents
Arg : (optional) List of strings
The type of relations to retrieve parents for.
Description : Returns the parent terms of this ontology term.
Example : my @parents =
@{ $term->parents( 'is_a', 'part_of' ) };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub parents {
my ( $this, @relations ) = @_;
my @terms = @{ $this->adaptor()->fetch_all_by_child_term($this) };
if (@relations) {
@terms = ();
foreach my $relation (@relations) {
if ( exists( $this->{'parents'}{$relation} ) ) {
push( @terms, @{ $this->{'parents'}{$relation} } );
}
}
}
return \@terms;
}
=head2 ancestors
Arg : None
Description : Returns the complete set of 'is_a' and 'part_of'
ancestor terms of this ontology term, up to and
including the root term.
Example : my @ancestors = @{ $term->ancestors() };
Return type : listref of Bio::EnsEMBL::OntologyTerm
=cut
sub ancestors {
my ($this) = @_;
return $this->adaptor()->fetch_all_by_descendant_term($this);
}
1;
| willmclaren/ensembl | modules/Bio/EnsEMBL/OntologyTerm.pm | Perl | apache-2.0 | 8,851 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package hardware::devices::safenet::keysecure::snmp::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_snmp);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
%{$self->{modes}} = (
'connections' => 'centreon::common::ingrian::snmp::mode::connections',
'cpu' => 'centreon::common::ingrian::snmp::mode::cpu',
'disk' => 'centreon::common::ingrian::snmp::mode::disk',
'interfaces' => 'snmp_standard::mode::interfaces',
'list-interfaces' => 'snmp_standard::mode::listinterfaces',
'memory' => 'centreon::common::ingrian::snmp::mode::memory',
'request-stats' => 'centreon::common::ingrian::snmp::mode::requeststats',
);
return $self;
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check Safenet Keysecure in SNMP.
=cut
| Sims24/centreon-plugins | hardware/devices/safenet/keysecure/snmp/plugin.pm | Perl | apache-2.0 | 1,869 |
package Paws::Config::DescribeComplianceByResource;
use Moose;
has ComplianceTypes => (is => 'ro', isa => 'ArrayRef[Str|Undef]');
has Limit => (is => 'ro', isa => 'Int');
has NextToken => (is => 'ro', isa => 'Str');
has ResourceId => (is => 'ro', isa => 'Str');
has ResourceType => (is => 'ro', isa => 'Str');
use MooseX::ClassAttribute;
class_has _api_call => (isa => 'Str', is => 'ro', default => 'DescribeComplianceByResource');
class_has _returns => (isa => 'Str', is => 'ro', default => 'Paws::Config::DescribeComplianceByResourceResponse');
class_has _result_key => (isa => 'Str', is => 'ro');
1;
### main pod documentation begin ###
=head1 NAME
Paws::Config::DescribeComplianceByResource - Arguments for method DescribeComplianceByResource on Paws::Config
=head1 DESCRIPTION
This class represents the parameters used for calling the method DescribeComplianceByResource on the
AWS Config service. Use the attributes of this class
as arguments to method DescribeComplianceByResource.
You shouldn't make instances of this class. Each attribute should be used as a named argument in the call to DescribeComplianceByResource.
As an example:
$service_obj->DescribeComplianceByResource(Att1 => $value1, Att2 => $value2, ...);
Values for attributes that are native types (Int, String, Float, etc) can passed as-is (scalar values). Values for complex Types (objects) can be passed as a HashRef. The keys and values of the hashref will be used to instance the underlying object.
=head1 ATTRIBUTES
=head2 ComplianceTypes => ArrayRef[Str|Undef]
Filters the results by compliance.
The allowed values are C<COMPLIANT>, C<NON_COMPLIANT>, and
C<INSUFFICIENT_DATA>.
=head2 Limit => Int
The maximum number of evaluation results returned on each page. The
default is 10. You cannot specify a limit greater than 100. If you
specify 0, AWS Config uses the default.
=head2 NextToken => Str
The C<NextToken> string returned on a previous page that you use to get
the next page of results in a paginated response.
=head2 ResourceId => Str
The ID of the AWS resource for which you want compliance information.
You can specify only one resource ID. If you specify a resource ID, you
must also specify a type for C<ResourceType>.
=head2 ResourceType => Str
The types of AWS resources for which you want compliance information;
for example, C<AWS::EC2::Instance>. For this action, you can specify
that the resource type is an AWS account by specifying
C<AWS::::Account>.
=head1 SEE ALSO
This class forms part of L<Paws>, documenting arguments for method DescribeComplianceByResource in L<Paws::Config>
=head1 BUGS and CONTRIBUTIONS
The source code is located here: https://github.com/pplu/aws-sdk-perl
Please report bugs to: https://github.com/pplu/aws-sdk-perl/issues
=cut
| ioanrogers/aws-sdk-perl | auto-lib/Paws/Config/DescribeComplianceByResource.pm | Perl | apache-2.0 | 2,819 |
package VMOMI::ClusterProfileConfigServiceCreateSpec;
use parent 'VMOMI::ClusterProfileConfigSpec';
use strict;
use warnings;
our @class_ancestors = (
'ClusterProfileConfigSpec',
'ClusterProfileCreateSpec',
'ProfileCreateSpec',
'DynamicData',
);
our @class_members = (
['serviceType', undef, 1, 1],
);
sub get_class_ancestors {
return @class_ancestors;
}
sub get_class_members {
my $class = shift;
my @super_members = $class->SUPER::get_class_members();
return (@super_members, @class_members);
}
1;
| stumpr/p5-vmomi | lib/VMOMI/ClusterProfileConfigServiceCreateSpec.pm | Perl | apache-2.0 | 544 |
% ----------------------------------------------------------------------------
% Building a string containing the query
% ----------------------------------------------------------------------------
:- pred buildqueries(SQLTermList,SQLString) :: list(sqlterm) * string
# "Builds a SQL string from the list of SQL term @var{SQLTermList}.".
buildqueries(QueriesList,String):-
buildqueries2(QueriesList,[],String).
buildqueries2([Query],PreviousString,QueryString):-
build_query(Query,QueryStr),
list_concat([PreviousString," ",QueryStr,";"],QueryString).
buildqueries2([Query|Queries],PreviousString,QueryString):-
\+ (Queries = []),
build_query(Query,QueryStr1),
list_concat([PreviousString," ",QueryStr1," UNION "],QueryString1),
buildqueries2(Queries, QueryString1, QueryString).
% ----------------------------------------------------------------------------
:- pred build_query(QueryCode,SQLString).
build_query(query([agg_query(Function,Select,From,Where,Group)],_,_),SQLStr):-
% --- ugly rule here: aggregate function only in SELECT Part of query ----
!,
build_query(agg_query(Function,Select,From,Where,Group),SQLStr).
build_query(query(Select,From,Where),SQLStr):-
build_clause3("SELECT",Select,",",Str1),
build_clause3("FROM",From,",",Str2),
build_clause3("WHERE",Where,"AND",Str3),
list_concat([Str1," ",Str2," ",Str3],SQLStr).
build_query(agg_query(Function,Select,From,Where,Group),SQLStr):-
build_clause4("SELECT",Function,Select,",",Str1),
build_clause3("FROM",From,",",Str2),
build_clause3("WHERE",Where,"AND",Str3),
build_clause3("GROUP BY",Group,",",Str4),
list_concat([Str1," ",Str2," ",Str3," ",Str4],SQLStr).
build_query(negated_existential_subquery(Select,From,Where),SQLStr):-
build_clause3("SELECT",Select,",",Str1),
build_clause3("FROM",From,",",Str2),
build_clause3("WHERE",Where,"AND",Str3),
list_concat(["NOT EXISTS ","(",Str1," ",Str2," ",Str3,")"],SQLStr).
% ----------------------------------------------------------------------------
:- pred build_clause4(Keyword,Function,ClauseCode,Separator,SQLString)
# "Where
@var{Keyword} is one of SELECT, FROM, WHERE, or GROUP BY,
@var{Function} is an aggregation function, and
@var{ClauseCode} is the code corresponding to the appropriate clause
of an SQL query, and
@var{Separator} indicates the character(s) which separate the items
of a clause from each other (, or AND), and
@var{SQLString} contains the SQL sentence fragment corresponding to the
clause.".
build_clause4(Keyword,Function,[Column],Separator,SQLString):-
build_clause2([Column],Separator,Str),
%% aggregate_functor(Function,FunctionTerm), modified 19-10-98, Ignacio
atom_codes(Function,FunctionString),
list_concat([Keyword," ",FunctionString,"(",Str,")"],SQLString).
% ----------------------------------------------------------------------------
:- pred build_clause3(Keyword,ClauseCode,Separator,SQLString).
build_clause3(_Keyword,[],_,"").
build_clause3(Keyword,[Column|RestColumns],Separator,SQLString):-
build_clause2([Column|RestColumns],Separator,Str),
list_concat([Keyword," ",Str],SQLString).
% ----------------------------------------------------------------------------
:- pred build_clause2(ClauseCode,Separator,SQLString).
build_clause2([Item],_,SQLString):-
build_column(Item,SQLString).
build_clause2([Item,NextItem|RestItems],Separator,SQLString):-
build_column(Item,Str1),
build_clause2([NextItem|RestItems],Separator,Str2),
list_concat([Str1," ",Separator," ",Str2],SQLString).
% ----------------------------------------------------------------------------
:- pred build_column(ColumnCode,SQLString).
build_column('*',"*").
build_column(att(RangeVar,Attribute),SQLString):-
atom_codes(RangeVar,RangeVarStr),
atom_codes(Attribute,AttributeStr),
list_concat([RangeVarStr,".",AttributeStr],SQLString).
build_column(rel(Relation,RangeVar),SQLString):-
atom_codes(Relation,RelationStr),
atom_codes(RangeVar,RangeVarStr),
list_concat([RelationStr," ",RangeVarStr],SQLString).
build_column('$const$'(StringAtom),String):-
get_type('$const$'(StringAtom),string),
atom_codes(StringAtom,String).
%%% writeq(String). %% Using writeq ^^^^^^^^^^
build_column('$const$'(Number),String):-
get_type('$const$'(Number),NumType),
check_type_compatible(NumType,number),
number_codes(Number,String).
build_column(comp(LeftArg,Operator,RightArg),String):-
build_column(LeftArg,Str1),
atom_codes(Operator,Str2),
build_column(RightArg,Str3),
list_concat([Str1," ",Str2," ",Str3],String).
build_column(LeftExpr * RightExpr,String):-
build_column(LeftExpr,Str1),
build_column(RightExpr,Str2),
list_concat([Str1,"*",Str2],String).
build_column(LeftExpr / RightExpr,String):-
build_column(LeftExpr,Str1),
build_column(RightExpr,Str2),
list_concat([Str1,"/",Str2],String).
build_column(LeftExpr + RightExpr,String):-
build_column(LeftExpr,Str1),
build_column(RightExpr,Str2),
list_concat([Str1,"+",Str2],String).
build_column(LeftExpr - RightExpr,String):-
build_column(LeftExpr,Str1),
build_column(RightExpr,Str2),
list_concat([Str1,"-",Str2],String).
build_column(agg_query(Function,Select,From,Where,Group),String):-
%% nl,
build_query(agg_query(Function,Select,From,Where,Group),Str),
list_concat(["(",Str,")"],String).
build_column(negated_existential_subquery(Select,From,Where),String):-
build_query(negated_existential_subquery(Select,From,Where),String).
%% 2nd FRAGMENT
sqlterm2string(SQLQueryTerm,SQLQueryString) :-
buildqueries(SQLQueryTerm,SQLQueryString),
!,
debug_message(" Translation done ",[]).
sqlterm2string(SQLQueryTerm,_) :-
error_message("could not convert to string ~w",[SQLQueryTerm]),
fail.
%% END OF 2nd FRAGMENT
%% %% Original code fails on some SQLQueryTerms for which printqueries/1 succeeds!
%% %% Doing this serious kludge instead for now:
%% REPAIRED!
%% sqlterm2string(SQLQueryTerm,SQLQueryString) :-
%% telling(O),
%% TMP = '/tmp/sqlqueryfileIgnacio',
%% tell(TMP),
%% write('"'),
%% printqueries(SQLQueryTerm),
%% write('"'),
%% write('.'),
%% nl,
%% told,
%% tell(O),
%% seeing(I),
%% see(TMP),
%% do_the_read(SQLQueryTerm,I,SQLQueryString).
%%
%% do_the_read(_SQLQueryTerm,I,SQLQueryString) :-
%% read(SQLQueryString),
%% seen,
%% see(I),
%% !.
%% do_the_read(SQLQueryTerm,I,_SQLQueryString) :-
%% error_message("could not convert to string ~w",[SQLQueryTerm]),
%% seen,
%% see(I),
%% fail.
%% sqlterm2string(Queries,QueryString) :-
%% queries_dstring(Queries,QueryString,[]),
%% !.
%% sqlterm2string(SQLQueryTerm,_SQLQueryString) :-
%% error_message("could not convert to string ~w",[SQLQueryTerm]).
| leuschel/ecce | www/CiaoDE/ciao/library/persdb_mysql/old/pl2sql_modifications/fragments.pl | Perl | apache-2.0 | 6,850 |
#!/usr/bin/perl -w
# PPath@Cornell
# Surya Saha Feb 25, 2011
use strict;
use warnings;
use Getopt::Long;
eval {
require Bio::SearchIO;
};
use Bio::SearchIO;
=head1 NAME
Blast2HitGFF3.pl - Create GFF file of blast hits from Blast text report
=head1 SYNOPSIS
% Blast2HitGFF3.pl --report blast.out --cutoff <1.0>
=head1 DESCRIPTION
Reads in BLAST report file. Should work for any type of Blast. CHECK!!
=head1 COMMAND-LINE OPTIONS
Command-line options can be abbreviated to single-letter options, e.g. -f instead of --file. Some options
are mandatory (see below).
--report <.out> Blast report in text format (required)
--cutoff <1.0> A float value for maximum e value <1.0>
--source <> Source of seqs in hit blast database (RefSeq,Genbank)
--out <.gff> GFF3 output filename
--connect <0/1> Connect HSPs if hit is on the same subject sequence and in same orientation
=head1 NOTES
HSPs are ordered by evalue and NOT by subject in the report by default. So connections may not be made if parser finds the next hit on another subject sequence.
=head1 AUTHOR
Surya Saha, ss2489@cornell.edu
=cut
my ($rep,$cutoff,$src,$out,$connect,$flag,$in,@temp,$result,$hit,$hsp,$i,$j);
GetOptions (
'report=s' => \$rep,
'cutoff:f' => \$cutoff,
'source:s' => \$src,
'out:s' => \$out,
'connect:i' => \$connect ) or (system('pod2text',$0), exit 1);
# defaults and checks
defined($rep) or (system('pod2text',$0), exit 1);
if (!(-e $rep)){print STDERR "$rep not found: $!\n"; exit 1;}
if ( defined($connect) && $connect != 0 && $connect != 1 ) {
system( 'pod2text', $0 ), exit 1;
}
$connect ||= 0;
$cutoff ||=1.0;
$src ||= 'RefSeq';
$out ||= "$rep\.gff";
print STDERR "Using E value cutoff of $cutoff ...\nSource as $src ...\n";
$in = new Bio::SearchIO(-format => 'blast', -file => $rep);
$flag=0;
my $counter=1;
my ($name,$desc, $GFF);
while($result = $in->next_result) {
## $result is a Bio::Search::Result::ResultI compliant object
if($result->no_hits_found()){next;}
#get hit data
if($result->num_hits>0){
if($flag==0){
unless( open $GFF, '>', "$out" ){print "not able to open $out\n\n";exit 1;}
print $GFF "\#\#gff-version 3\n\#\#Generated by Blast2HitGFF3.pl\n\#\#Algorithm: ",
$result->algorithm," Version: ",$result->algorithm_version,"\n";
print $GFF "\#\#DB name: ",$result->database_name," Sequences: ",$result->database_entries,
" Size: ",$result->database_letters,"\n";
$flag=1;
}
# @temp=split(/\|/,$result->query_name);
# print XLS "\nQuery\t",$result->query_name,"\nDesc\t",$result->query_description,"\n";
$name = $result->query_name ? $result->query_name : 'No name';
$name =~ s/;/ /g; #to remove GFF3 notes separator if present
$desc = $result->query_description ? $result->query_description : 'No description';
$desc =~ s/;/ /g; #to remove GFF3 notes separator if present
while($hit = $result->next_hit ) {
## $hit is a Bio::Search::Hit::HitI compliant object
if ($hit->significance < $cutoff){
my($is_first_hsp, $hit_strand, $ID);
$is_first_hsp = 1;
while($hsp = $hit->next_hsp()){
## $hsp is a Bio::Search::HSP::HSPI object
if ( $connect ){
if ( $is_first_hsp ){
$ID = $counter;
$hit_strand = $hsp->strand('hit');
print $GFF $hit->name,"\t$src\tmatch\t",$hsp->start('hit'),"\t",$hsp->end('hit'),"\t",
$hsp->bits(),"\t";
if($hsp->strand('hit') == -1){print $GFF '-';}
elsif($hsp->strand('hit') == 1){print $GFF '+';}
if ( $name ne 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$ID,";Name=",$name,";Note=$desc Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$ID,";Name=",$desc,";Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc eq 'No description' ){
print $GFF "\t.\tID=",$ID,";Name=NA;Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
else{
print STDERR "This should not happen\n\n"; exit 1;
}
$is_first_hsp = 0;
}
elsif ( $is_first_hsp != 1 && $hit_strand == $hsp->strand('hit') ){
print $GFF $hit->name,"\t$src\tmatch\t",$hsp->start('hit'),"\t",$hsp->end('hit'),"\t",
$hsp->bits(),"\t";
if($hsp->strand('hit') == -1){print $GFF '-';}
elsif($hsp->strand('hit') == 1){print $GFF '+';}
if ( $name ne 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$ID,";Name=",$name,";Note=$desc Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$ID,";Name=",$desc,";Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc eq 'No description' ){
print $GFF "\t.\tID=",$ID,";Name=NA;Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
else{
print STDERR "This should not happen\n\n"; exit 1;
}
}
else{
print $GFF $hit->name,"\t$src\tmatch_part\t",$hsp->start('hit'),"\t",$hsp->end('hit'),"\t",
$hsp->bits(),"\t";
if($hsp->strand('hit') == -1){print $GFF '-';}
elsif($hsp->strand('hit') == 1){print $GFF '+';}
if ( $name ne 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$counter,";Name=",$name,";Note=$desc Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$counter,";Name=",$desc,";Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc eq 'No description' ){
print $GFF "\t.\tID=",$counter,";Name=NA;Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
else{
print STDERR "This should not happen\n\n"; exit 1;
}
$counter++;
}
}
else{
print $GFF $hit->name,"\t$src\tmatch_part\t",$hsp->start('hit'),"\t",$hsp->end('hit'),"\t",
$hsp->bits(),"\t";
if($hsp->strand('hit') == -1){print $GFF '-';}
elsif($hsp->strand('hit') == 1){print $GFF '+';}
if ( $name ne 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$counter,";Name=",$name,";Note=$desc Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc ne 'No description' ){
print $GFF "\t.\tID=",$counter,";Name=",$desc,";Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
elsif ( $name eq 'No name' && $desc eq 'No description' ){
print $GFF "\t.\tID=",$counter,";Name=NA;Note=Percent_identity ",sprintf("%.2f",$hsp->percent_identity),' Evalue ',
$hsp->evalue(),' Length ',$hsp->length(),"\n";
}
else{
print STDERR "This should not happen\n\n"; exit 1;
}
$counter++;
}
}
}
}
}
$i=$result;
}
@temp=$i->available_parameters();
print $GFF "\#\#Parameters\n\#\#";
foreach my $j (@temp){print $GFF $j,': ',$i->get_parameter($j),' ';} print $GFF "\n";
@temp=$i->available_statistics();
print $GFF "\#\#Statistics\n\#\#";
foreach my $j (@temp){print $GFF $j,': ',$i->get_statistic($j),' ';} print $GFF "\n";
if($flag==0){print STDERR "\n\nNo hits found!!\n";}
else{close($GFF);}
exit;
| suryasaha/Utils | Blast2HitGFF3.pl | Perl | bsd-2-clause | 8,196 |
package Mojo::Webqq::Recent::Friend;
use strict;
use Mojo::Webqq::Base 'Mojo::Webqq::Model::Base';
has [qw(
id
type
)];
sub update{
my $self = shift;
my $hash = shift;
for(keys %$self){
$self->{$_} = $hash->{$_} if exists $hash->{$_} ;
}
$self;
}
1;
| sjdy521/Mojo-Webqq | lib/Mojo/Webqq/Recent/Friend.pm | Perl | bsd-2-clause | 287 |
#/usr/bin/perl -w
use strict;
$|=1; # flush print commands
my %id2gene=();
$id2gene{"N/A"}="N/A";
my $filename = "/home/alkan/c/gulayse/neurod2/mouse_data/ucsc/ensemblToGeneName.txt";
open(FP,$filename) or die "Can't open ensemblToGeneName.txt";
while(<FP>) {
chomp;
my ($id,$genename) = split/\t/;
$id2gene{$id}=$genename;
}
my $mousepath = "/home/alkan/c/gulayse/neurod2/mouse_data/ucsc/";
my $filename = "ensGene.txt";
open(FP,$mousepath.$filename) or die "Can't open $filename";
my %hgenelen=();
while(<FP>) {
chomp;
my ($chr,$txstart,$txend,$id,$a,$sense,@unused) = split/\t/;
my $l = $txend-$txstart;
unless (defined $hgenelen{$id2gene{$id}} && $hgenelen{$id2gene{$id}} > $l) {
$hgenelen{$id2gene{$id}} = $l; # pick max length tx for this gene name
}
}
close(FP);
print STDERR "finished reading $filename.\n";
my %hscore=();
my %hitcount=();
my $margin = 10000;
my $fmargin = 10000.0;
my $tightmargin = 1000;
my $cnt = 0;
while (<>){ # read context_ens.txt
$cnt += 1;
print STDERR "." if ($cnt % 100 == 0);
chomp;
my ($id,$score,$ingene,$inlen,$uptssgene,$uptsslen,$downtssgene,$downtsslen,$downgene,$downlen,$ensliststring) = split/\t/;
my @enslist = split/,/,$ensliststring;
if ($ingene !~ /N\/A/ && $inlen =~ /[0-9]*/) {
$hscore{$ingene}{in} += $fmargin/($fmargin+$hgenelen{$ingene});
# $hscore{$ingene}{in} += 1
# if ($downtsslen <= $tightmargin);
$hitcount{$ingene} += 1;
}
if (-$uptsslen <= $margin) { # $uplen is negative
$hscore{$uptssgene}{up} += 1; # unless (defined $h{$uptssgene} && $h{$uptssgene} > 1);
# $hscore{$uptssgene}{up} += 1
# if (-$uptsslen <= $tightmargin);
# if (defined $hscore{$uptssgene}{down} and $hscore{$uptssgene}{down} > $hscore{$uptssgene}{up});
$hitcount{$uptssgene} += 1;
}
if ($downlen <= $margin) { # $uplen is negative
$hscore{$downgene}{down} += 1; # unless (defined $h{$downgene} && $h{$downgene} > 1);
# $hscore{$downgene}{down} += 1
# if ($downlen <= $tightmargin);
# if (defined $hscore{$downgene}{up} and $hscore{$downgene}{up} > $hscore{$downgene}{down});
$hitcount{$downgene} += 1;
}
}
my %htotal = ();
for my $x (keys %hscore) {
$htotal{$x} = $hscore{$x}{in} + $hscore{$x}{up} + $hscore{$x}{down};
# $htotal{$x} *= 2 if ($hscore{$x}{up} > 0 && $hscore{$x}{down} > 0);
}
for my $x (reverse sort {$htotal{$a}<=>$htotal{$b}} keys %htotal) {
print "$x\t$htotal{$x}\t$hgenelen{$x}\t$hitcount{$x}\t$hscore{$x}{in}\t$hscore{$x}{up}\t$hscore{$x}{down}\n" if ($hitcount{$x} > 1 && $hgenelen{$x}>1000);
}
| akabakcioglu/neurod2-analyze | high_activity_uniqcnt.pl | Perl | mit | 2,594 |
/* Part of XPCE --- The SWI-Prolog GUI toolkit
Author: Jan Wielemaker and Anjo Anjewierden
E-mail: jan@swi.psy.uva.nl
WWW: http://www.swi.psy.uva.nl/projects/xpce/
Copyright (c) 2002-2011, University of Amsterdam
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
:- module(pce_print_graphics, []).
:- use_module(library(pce)).
:- use_module(library(pce_template)).
:- use_module(library(pce_shell)).
:- pce_autoload(finder, library(find_file)).
:- pce_global(@finder, new(finder)).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Public methods:
->print
Prints the content of the Window as a single page
->save_postscript: [file], [directory]
Save content of the Window as PostScript
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
:- pce_begin_class(print_graphics, template,
"Template defining ->print").
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Print the image to the default printer. Also this method should be
extended by requesting additional parameters from the user.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
print(Canvas) :->
"Send to default printer"::
print_canvas(Canvas).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
There are two routes to print. On MS-Windows printing is achieved by
drawing on a GDI representing a printer, after which the Windows printer
driver creates printer-codes and sends them to the printer. The standard
Windows print dialog is shown by win_printer->setup. Next we need some
calculation effort to place our diagram reasonably on the page.
In the Unix world, things go different. In general you make a PostScript
file and hand this to the print-spooler, which will translate the
device-independant PostScript to whatever the printer needs.
XPCE doesn't (yet) try to hide the difference between these two
approaches.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
print_canvas(Canvas) :- % MS-Windows
get(@pce, convert, win_printer, class, _),
!,
( send(Canvas, has_get_method, default_file),
get(Canvas, default_file, Job)
-> true
; Job = '<unknown job>'
),
new(Prt, win_printer(Job)),
send(Prt, setup, Canvas),
send(Prt, open),
get(Canvas, bounding_box, area(X, Y, W, H)),
get(@display, dots_per_inch, size(DX, DY)),
InchW is W/DX,
InchH is H/DY,
get(Prt, size, size(PW0, PH0)),
get(Prt, dots_per_inch, size(RX, RY)),
MarX is RX, % default 1 inch margins
MarY is RY,
PrInchW is (PW0-MarX*2)/RX,
PrInchH is (PH0-MarY*2)/RY,
send(Prt, map_mode, isotropic),
( InchW < PrInchW,
InchH < PrInchH % it fits on the page
-> OX is MarX + ((PrInchW-InchW)/2)*RX,
send(Prt, window, area(X, Y, DX, DY)),
send(Prt, viewport, area(OX, MarY, RX, RY))
; Aspect is min(PrInchW/InchW, PrInchH/InchH),
ARX is integer(Aspect*RX),
ARY is integer(Aspect*RY),
send(Prt, window, area(X, Y, DX, DY)),
send(Prt, viewport, area(MarX, MarY, ARX, ARY))
),
send(Prt, draw_in, Canvas?graphicals),
send(Prt, close),
free(Prt).
print_canvas(Canvas) :- % Unix/PostScript
get(Canvas, print_command, Command),
new(PsFile, file),
send(PsFile, open, write),
send(PsFile, append, Canvas?postscript),
send(PsFile, append, 'showpage\n'),
send(PsFile, close),
get(PsFile, absolute_path, File),
get(string('%s "%s"', Command, File), value, ShellCommand),
pce_shell_command('/bin/sh'('-c', ShellCommand)),
send(PsFile, remove),
send(PsFile, done),
send(Canvas, report, status, 'Sent to printer').
print_command(Canvas, Command:name) :<-
"Get name of the printer"::
get(Canvas, frame, Frame),
default_printer(DefPrinter),
get(Canvas, print_command_template, CmdTempl),
print_cmd(CmdTempl, DefPrinter, Cmd),
new(D, dialog(print_command?label_name)),
send(D, append, new(P, text_item(print_command, Cmd))),
send(D, append, button(cancel, message(D, return, @nil))),
send(D, append, button(ok, message(D, return, P?selection))),
send(D, default_button, ok),
send(D, transient_for, Frame),
send(D, modal, transient),
get(D, confirm_centered, Canvas?frame?area?center, Answer),
send(D, destroy),
Answer \== @nil,
Command = Answer.
default_printer(Printer) :-
get(@pce, environment_variable, 'PRINTER', Printer),
!.
default_printer(postscript).
print_job_name(_, Job) :<-
"Default name of the printer job"::
Job = 'XPCE/SWI-Prolog'.
print_command_template(_, Command) :<-
"Default command to send a job to the printer"::
Command = 'lpr -P%p'.
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
print_cmd(+Template, +Printer, +File, -Command) determines the shell
command to execute in order to get `File' printed on `Printer' using the
given template. The substitutions are handled by a regex object.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
print_cmd(Template, Printer, Cmd) :-
new(S, string('%s', Template)),
substitute(S, '%p', Printer),
get(S, value, Cmd),
free(S).
substitute(S, F, T) :-
new(R, regex(F)),
send(R, for_all, S,
message(@arg1, replace, @arg2, T)),
free(R).
/*******************************
* POSTSCRIPT *
*******************************/
save_postscript(Canvas, File:file=[file], Directory:directory=[directory]) :->
"Save content as PostScript to File"::
( File == @default
-> get(@finder, file, save,
chain(tuple('PostScript', ps),
tuple('Encapsulated PostScript', eps)),
Directory,
FileName)
; FileName = File
),
new(PsFile, file(FileName)),
send(PsFile, open, write),
send(PsFile, append, Canvas?postscript),
send(PsFile, append, 'showpage\n'),
send(PsFile, close),
send(Canvas, report, status, 'Saved PostScript to %s', PsFile).
:- pce_end_class(print_graphics).
| TeamSPoon/logicmoo_workspace | docker/rootfs/usr/local/lib/swipl/xpce/prolog/lib/print_graphics.pl | Perl | mit | 7,692 |
#!/bin/perl
# Written by Harris H. Wang, Church Lab, HMS
# File name: optMAGE.pl
# Created: 5/04/2009
# Last modified: 5/04/2009
#
# Description: Designs oligo and primers for MAGE experiments
use Bio::SeqIO;
use Bio::Perl;
use Bio::Location::Simple;
use POSIX;
# open(INFILE2, "INPUTtarg_verif.txt") or die "Can't open file: $!";
# open(OUTFILE2, ">>OUTprimers.txt") or die "Can't open output: $!";
# print "Option 1: generate oligo file only\nOption 2: generate PCR & sequencing verification file only\nOption 3: generate all files\n";
# print "Enter Option: ";
# $option = <>;
print "OptMAGE 0.9beta\n";
print "By Harris Wang\n";
print "Copyright (C) 2009\n";
print "Harvard Medical School\n";
print "Boston, MA 02115, USA\n";
print "\nloading genome sequence...\n";
$seqio_obj = Bio::SeqIO->new(-file => "genome.fasta", -format => "fasta");
$seq_obj = $seqio_obj->next_seq;
print "loading INPUTparam.txt...\n";
open(INPARAM, "INPUTparam.txt") or die "Error! Can't open file: $!";
foreach (<INPARAM>) {
if ($_ =~ /^(\d+)\t(\S+)\t(\d+)\t(\d+)\t(\d+)/) {
$oligosize = $1;
$dGssthresh = $2; # threshold for dG, a number less than the threshold will mean shifting the oligo for optimization
$mloc_dft = $3; # distance (bps) of mismatch to the 3' end of the oligo
$mloc_max = $4, # max amount of basepair shift in the oligo (15 = 15 bps from the 3' of the oligo)
$cmod = $5; # number of terminal 5' phosphorothioate bonds;
$calcreplic = $6; # automatically calculate replichore information: 0 = no, 1 = yes
$dGssmin_dft = -200;
}
}
close(INPPARAM);
my @OriC = (3932974,3933205);
my @dif = (1597981,1598008);
print "loading INPUTtarg.txt...\n";
open(INTARG, "INPUTtarg.txt") or die "Error! Can't open file: $!";
foreach (<INTARG>) {
if ($_ =~ /^(\w+)\t(\S+)\t(\d+)\t(\d+)\t(\d+)\t(\w+)\t(\w+)/) {
push(@id,"$1");
push(@strand,"$2");
if ($calcreplic == 0) { push(@rep,"$3"); }
else {
push(@rep,"1") if (($5-$4)/2+$4 < $dif[0] || ($5-$4)/2+$4 > $OriC[1]);
push(@rep,"2") if (($5-$4)/2+$4 > $dif[1] && ($5-$4)/2+$4 < $OriC[0]);
}
push(@start,"$4"-1);##hack to make the indexes inclusive
push(@end,"$5"+1);##also to go from exclusive to inclusive
push(@mut,"$6");
push(@mutseq,"$7");
if ($4>$5) { die "Error! ID: $1 - start coord ($4) is greater than end coord ($5)"; }
if (length($7)/2 > ($oligosize/2-$mloc_max)) { die "Error! ID: $1 - mutation length for $$7 exceeds mloc_max ($mloc_max) constraint"; }
}
}
close(INTARG);
open(OUTOLIGOS, ">>OUToligos.txt") or die "Error! Can't open output: $!";
print OUTOLIGOS "ID\tSTART\tEND\tSTRAND\tREP\tMUT\tMSHIFT\tdGss\tOLIGOSIZE\tMM_COUNT\tINS_COUNT\tDEL_COUNT\tPRED_RE\tOLIGO SEQ\n";
close(OUTOLIGOS);
for ($i = 0; $i <= $#id; $i++) {
### generate info about mutation sequence
if ($mut[$i] ne 'D') {
$mut_obj = Bio::Seq->new(-seq => $mutseq[$i], -alphabet => 'dna' );
$rc_mut_obj = $mut_obj->revcom;
$curr_mutseq = $mut_obj->seq;
$curr_rc_mutseq = $rc_mut_obj->seq;
$Msize = length($mutseq[$i]);
}
else { $Msize=0; }
$Dsize = $end[$i]-$start[$i]-1;
### generate info about type of mutation
if ($Msize >= $Dsize) {
$ins_ct = $Msize-$Dsize;
$mm_ct = $Dsize;
$del_ct = 0;
}
else {
$ins_ct = 0;
$mm_ct = $Msize;
$del_ct = $Dsize-$Msize;
}
### calculate homology shift information
$Hsize = $oligosize - $Msize;
$H1size = floor($Hsize/2);
$H2size = ceil($Hsize/2);
if ($mut[$i] ne 'D') {
if (length($mutseq[$i])<$mloc_max) { $mloc_max_tp = $mloc_max; }
else { $mloc_max_tp = length($mutseq[$i]); }
$Mshift = $H1size-$mloc_max_tp;
}
else { $Mshift = $H1size-$mloc_max; }
### generate homology blocks
if ($rep[$i] =~ "1") {
$block1 = Bio::Location::Simple->new(-start=>($start[$i]-$H1size+1), -end=>($start[$i]), -strand=>"-1");
$block2 = Bio::Location::Simple->new(-start=>($end[$i]), -end=>($end[$i]+$H2size-1+$Mshift), -strand=>"-1");
$block1_seq = $seq_obj->subseq($block1);
$block2_seq = $seq_obj->subseq($block2);
if ($mut[$i] ne 'D') {
if ($strand[$i] =~ m/\+/) { $block = $block2_seq.$curr_rc_mutseq.$block1_seq; }
if ($strand[$i] =~ m/\-/) { $block = $block2_seq.$curr_mutseq.$block1_seq; }
}
else { $block = $block2_seq.$block1_seq; }
}
if ($rep[$i] =~ "2") {
$block1 = Bio::Location::Simple->new(-start=>($start[$i]-$H2size+1-$Mshift), -end=>($start[$i]), -strand=>"+1");
$block2 = Bio::Location::Simple->new(-start=>($end[$i]), -end=>($end[$i]+$H1size-1), -strand=>"+1");
$block1_seq = $seq_obj->subseq($block1);
$block2_seq = $seq_obj->subseq($block2);
if ($mut[$i] ne 'D') {
if ($strand[$i] =~ m/\+/) { $block = $block1_seq.$curr_mutseq.$block2_seq; }
if ($strand[$i] =~ m/\-/) { $block = $block1_seq.$curr_rc_mutseq.$block2_seq; }
}
else { $block = $block1_seq.$block2_seq; }
}
open(OUTALLDUMP, ">>OUTalldump.txt") or die "Error! Can't open output: $!";
printf OUTALLDUMP "%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%s\n", $id[$i], $H1size, $H2size, $Mshift, $block1->start, $block1->end, $block2->start, $block2->end, $block;
printf "%s\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%s\n", $id[$i], $H1size, $H2size, $Mshift, $block1->start, $block1->end, $block2->start, $block2->end, $block;
### dGss optimize block sequence
$eosearch = 0;
$mloc = $mloc_dft;
$dGssmin = $dGssmin_dft;
$string = substr $block, $Mshift, $oligosize;
$dGss = `hybrid-ss-min --NA=DNA --energyOnly -q $string`;
$dGss = substr $dGss, 0, -1;
if ($dGss > $dGssthresh) { $Mshift_min = 0; }
else {
$Mshift_tp = $Mshift;
$Mshift_min = $Mshift;
do {
$string = substr $block, $Mshift_tp, $oligosize;
$dGss = `hybrid-ss-min --NA=DNA --energyOnly -q $string`;
$dGss = substr $dGss, 0, -1;
if ($dGss > $dGssmin) { $dGssmin = $dGss; $Mshift_min = $Mshift-$Mshift_tp; }
if ($eosearch == 1) { $eosearch = 2; }
if ($Mshift_tp == 0 && $eosearch !=2) { $Mshift_tp = $Mshift-$Mshift_min+1; $eosearch = 1; }
$temp = $Mshift-$Mshift_tp;
print OUTALLDUMP "\t\t$dGss\t$temp\t\t\t\t\t$string\n";
# printf "\t\t$dGss\t$temp\t\t\t\t\t$string\n";
$Mshift_tp = $Mshift_tp-1;
} while ($eosearch < 2 && $dGss < $dGssthresh);
}
print OUTALLDUMP "Optimized: \t\t$dGss\t$Mshift_min\t\t\t\t\t$string\n";
# print "Optimized: \t\t$dGss\t$Mshift_min\t\t\t\t\t\t$string\n";
close(OUTALLDUMP);
#$dGhyb = `hybrid-min --NA=DNA --energyOnly -q $string $wtstring`;
### Predict oligo replacement efficiency
$RE = PredictRE($dGss,$cmod,$oligosize,$mm_ct,$ins_ct,$del_ct);
$RE = sprintf("%.2f", $RE);
### add phosphorothioate modifications
for ($j=1;$j<$cmod*2;$j+=2) { substr($string, $j, 0) = '*'; }
open(OUTOLIGOS, ">>OUToligos.txt") or die "Error! Can't open output: $!";
print OUTOLIGOS "$id[$i]\t$start[$i]\t$end[$i]\t$strand[$i]\t$rep[$i]\t$mut[$i]\t$Mshift_min\t$dGss\t$oligosize\t$mm_ct\t$ins_ct\t$del_ct\t$RE\t$string\n";
close(OUTOLIGOS);
}
sub PredictRE(){
my $dGss = shift;
#my $dGhyb = shift;
my $cmod = shift;
my $oligosize = shift;
my $mm_ct = shift;
my $ins_ct = shift;
my $del_ct = shift;
my $dGss_idl = -5;
#my $dGhyb_idl - -117;
my $cmod_idl = 4;
my $oligosize_idl = 90;
my $mm_ct_idl = 1;
my $ins_ct_idl = 1;
my $del_ct_idl = 1;
my $del_ct_idl = 1;
$comfact = (32-0.991*($dGss_idl-$dGss))*(0.00126*power($cmod,3)-0.0342*power($cmod,2)+0.264*$cmod+0.408)*
(-0.000000139*power($oligosize,4)+0.0000269*power($oligosize,3)-0.0015*power($oligosize,2)+0.0301*$oligosize);
$mmfact = exp(-0.135*($mm_ct-$mm_ct_idl));
$insfact = exp(-0.075*($ins_ct-$ins_ct_idl));
if ($del_ct<=30) { $delfact = exp(-0.0579*($del_ct-$del_ct_idl)); }
else { $delfact = exp(-1.37*log($del_ct)/log(10)); }
$RE = $comfact;
if ($mm_ct ne 0) { $RE *= $mmfact; }
if ($ins_ct ne 0) { $RE *= $insfact; }
if ($del_ct ne 0) { $RE *= $delfact; }
return ($RE);
}
sub power {
local($i,$t);
local($n, $p) = @_;
$t = $n;
for($i = 1; $i < $p; $i++) {
$t = $t * $n;
}
return $t;
}
| CIDARLAB/magelet | WebContent/optMage_1/optMAGEv0.9.pl | Perl | bsd-3-clause | 8,884 |
package URI::file::OS2;
use strict;
use warnings;
use parent 'URI::file::Win32';
our $VERSION = '1.76';
# The Win32 version translates k:/foo to file://k:/foo (?!)
# We add an empty host
sub _file_extract_authority
{
my $class = shift;
return $1 if $_[0] =~ s,^\\\\([^\\]+),,; # UNC
return $1 if $_[0] =~ s,^//([^/]+),,; # UNC too?
if ($_[0] =~ m#^[a-zA-Z]{1,2}:#) { # allow for ab: drives
return "";
}
return;
}
sub file {
my $p = &URI::file::Win32::file;
return unless defined $p;
$p =~ s,\\,/,g;
$p;
}
1;
| operepo/ope | client_tools/svc/rc/usr/share/perl5/vendor_perl/URI/file/OS2.pm | Perl | mit | 561 |
package TestAPR::pool;
use strict;
use warnings FATAL => 'all';
use Apache::Test;
use Apache::TestUtil;
use Apache::TestTrace;
use Apache2::RequestRec ();
use APR::Pool ();
use APR::Table ();
use Apache2::Const -compile => 'OK';
use TestAPRlib::pool;
sub handler {
my $r = shift;
plan $r, tests => 4 + TestAPRlib::pool::num_of_tests();
### native pools ###
# explicit destroy shouldn't destroy native pools
{
my $p = $r->pool;
my $count = TestAPRlib::pool::ancestry_count($p);
t_debug "\$r->pool has 2 or more ancestors (found $count)";
ok $count >= 2;
$p->cleanup_register(\&set_cleanup, [$r, 'native destroy']);
$p->destroy;
my @notes = $r->notes->get('cleanup');
ok t_cmp(scalar(@notes), 0, "should be 0 notes");
$r->notes->clear;
}
# implicit DESTROY shouldn't destroy native pools
{
{
my $p = $r->pool;
my $count = TestAPRlib::pool::ancestry_count($p);
t_debug "\$r->pool has 2 or more ancestors (found $count)";
ok $count >= 2;
$p->cleanup_register(\&set_cleanup, [$r, 'native scoped']);
}
my @notes = $r->notes->get('cleanup');
ok t_cmp(scalar(@notes), 0, "should be 0 notes");
$r->notes->clear;
}
TestAPRlib::pool::test();
Apache2::Const::OK;
}
sub set_cleanup {
my $arg = shift;
debug "setting cleanup note: $arg->[1]";
$arg->[0]->notes->set(cleanup => $arg->[1]);
1;
}
1;
| gitpan/mod_perl | t/response/TestAPR/pool.pm | Perl | apache-2.0 | 1,541 |
=pod
=head1 NAME
SSL_set_fd - connect the SSL object with a file descriptor
=head1 SYNOPSIS
#include <openssl/ssl.h>
int SSL_set_fd(SSL *ssl, int fd);
int SSL_set_rfd(SSL *ssl, int fd);
int SSL_set_wfd(SSL *ssl, int fd);
=head1 DESCRIPTION
SSL_set_fd() sets the file descriptor B<fd> as the input/output facility
for the TLS/SSL (encrypted) side of B<ssl>. B<fd> will typically be the
socket file descriptor of a network connection.
When performing the operation, a B<socket BIO> is automatically created to
interface between the B<ssl> and B<fd>. The BIO and hence the SSL engine
inherit the behaviour of B<fd>. If B<fd> is non-blocking, the B<ssl> will
also have non-blocking behaviour.
If there was already a BIO connected to B<ssl>, BIO_free() will be called
(for both the reading and writing side, if different).
SSL_set_rfd() and SSL_set_wfd() perform the respective action, but only
for the read channel or the write channel, which can be set independently.
=head1 RETURN VALUES
The following return values can occur:
=over 4
=item C<0>
The operation failed. Check the error stack to find out why.
=item C<1>
The operation succeeded.
=back
=head1 SEE ALSO
L<SSL_get_fd(3)|SSL_get_fd(3)>, L<SSL_set_bio(3)|SSL_set_bio(3)>,
L<SSL_connect(3)|SSL_connect(3)>, L<SSL_accept(3)|SSL_accept(3)>,
L<SSL_shutdown(3)|SSL_shutdown(3)>, L<ssl(3)|ssl(3)> , L<bio(3)|bio(3)>
=cut
| GaloisInc/hacrypto | src/C/libssl/HEAD/src/doc/ssl/SSL_set_fd.pod | Perl | bsd-3-clause | 1,397 |
/* Part of SWI-Prolog
Author: Markus Triska
E-mail: triska@metalevel.at
WWW: http://www.swi-prolog.org
Copyright (C): 2014-2018 Markus Triska
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in
the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
CLP(B): Constraint Logic Programming over Boolean variables.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
:- module(clpb, [
op(300, fy, ~),
op(500, yfx, #),
sat/1,
taut/2,
labeling/1,
sat_count/2,
weighted_maximum/3,
random_labeling/2
]).
:- use_module(library(error)).
:- use_module(library(assoc)).
:- use_module(library(apply_macros)).
:- create_prolog_flag(clpb_monotonic, false, []).
:- create_prolog_flag(clpb_residuals, default, []).
/** <module> CLP(B): Constraint Logic Programming over Boolean Variables
## Introduction {#clpb-intro}
This library provides CLP(B), Constraint Logic Programming over
Boolean variables. It can be used to model and solve combinatorial
problems such as verification, allocation and covering tasks.
CLP(B) is an instance of the general [CLP(_X_) scheme](<#clp>),
extending logic programming with reasoning over specialised domains.
The implementation is based on reduced and ordered Binary Decision
Diagrams (BDDs).
Benchmarks and usage examples of this library are available from:
[__https://www.metalevel.at/clpb/__](https://www.metalevel.at/clpb/)
We recommend the following references for citing this library in
scientific publications:
==
@inproceedings{Triska2016,
author = "Markus Triska",
title = "The {Boolean} Constraint Solver of {SWI-Prolog}:
System Description",
booktitle = "FLOPS",
series = "LNCS",
volume = 9613,
year = 2016,
pages = "45--61"
}
@article{Triska2018,
title = "Boolean constraints in {SWI-Prolog}:
A comprehensive system description",
journal = "Science of Computer Programming",
volume = "164",
pages = "98 - 115",
year = "2018",
note = "Special issue of selected papers from FLOPS 2016",
issn = "0167-6423",
doi = "https://doi.org/10.1016/j.scico.2018.02.001",
url = "http://www.sciencedirect.com/science/article/pii/S0167642318300273",
author = "Markus Triska",
keywords = "CLP(B), Boolean unification, Decision diagrams, BDD"
}
==
These papers are available from
[https://www.metalevel.at/swiclpb.pdf](https://www.metalevel.at/swiclpb.pdf)
and
[https://www.metalevel.at/boolean.pdf](https://www.metalevel.at/boolean.pdf)
respectively.
## Boolean expressions {#clpb-exprs}
A _Boolean expression_ is one of:
| `0` | false |
| `1` | true |
| _variable_ | unknown truth value |
| _atom_ | universally quantified variable |
| ~ _Expr_ | logical NOT |
| _Expr_ + _Expr_ | logical OR |
| _Expr_ * _Expr_ | logical AND |
| _Expr_ # _Expr_ | exclusive OR |
| _Var_ ^ _Expr_ | existential quantification |
| _Expr_ =:= _Expr_ | equality |
| _Expr_ =\= _Expr_ | disequality (same as #) |
| _Expr_ =< _Expr_ | less or equal (implication) |
| _Expr_ >= _Expr_ | greater or equal |
| _Expr_ < _Expr_ | less than |
| _Expr_ > _Expr_ | greater than |
| card(Is,Exprs) | cardinality constraint (_see below_) |
| `+(Exprs)` | n-fold disjunction (_see below_) |
| `*(Exprs)` | n-fold conjunction (_see below_) |
where _Expr_ again denotes a Boolean expression.
The Boolean expression card(Is,Exprs) is true iff the number of true
expressions in the list `Exprs` is a member of the list `Is` of
integers and integer ranges of the form `From-To`. For example, to
state that precisely two of the three variables `X`, `Y` and `Z` are
`true`, you can use `sat(card([2],[X,Y,Z]))`.
`+(Exprs)` and `*(Exprs)` denote, respectively, the disjunction and
conjunction of all elements in the list `Exprs` of Boolean
expressions.
Atoms denote parametric values that are universally quantified. All
universal quantifiers appear implicitly in front of the entire
expression. In residual goals, universally quantified variables always
appear on the right-hand side of equations. Therefore, they can be
used to express functional dependencies on input variables.
## Interface predicates {#clpb-interface}
The most frequently used CLP(B) predicates are:
* sat(+Expr)
True iff the Boolean expression Expr is satisfiable.
* taut(+Expr, -T)
If Expr is a tautology with respect to the posted constraints, succeeds
with *T = 1*. If Expr cannot be satisfied, succeeds with *T = 0*.
Otherwise, it fails.
* labeling(+Vs)
Assigns truth values to the variables Vs such that all constraints
are satisfied.
The unification of a CLP(B) variable _X_ with a term _T_ is equivalent
to posting the constraint sat(X=:=T).
## Examples {#clpb-examples}
Here is an example session with a few queries and their answers:
==
?- use_module(library(clpb)).
true.
?- sat(X*Y).
X = Y, Y = 1.
?- sat(X * ~X).
false.
?- taut(X * ~X, T).
T = 0,
sat(X=:=X).
?- sat(X^Y^(X+Y)).
sat(X=:=X),
sat(Y=:=Y).
?- sat(X*Y + X*Z), labeling([X,Y,Z]).
X = Z, Z = 1, Y = 0 ;
X = Y, Y = 1, Z = 0 ;
X = Y, Y = Z, Z = 1.
?- sat(X =< Y), sat(Y =< Z), taut(X =< Z, T).
T = 1,
sat(X=:=X*Y),
sat(Y=:=Y*Z).
?- sat(1#X#a#b).
sat(X=:=a#b).
==
The pending residual goals constrain remaining variables to Boolean
expressions and are declaratively equivalent to the original query.
The last example illustrates that when applicable, remaining variables
are expressed as functions of universally quantified variables.
## Obtaining BDDs {#clpb-residual-goals}
By default, CLP(B) residual goals appear in (approximately) algebraic
normal form (ANF). This projection is often computationally expensive.
You can set the Prolog flag `clpb_residuals` to the value `bdd` to see
the BDD representation of all constraints. This results in faster
projection to residual goals, and is also useful for learning more
about BDDs. For example:
==
?- set_prolog_flag(clpb_residuals, bdd).
true.
?- sat(X#Y).
node(3)- (v(X, 0)->node(2);node(1)),
node(1)- (v(Y, 1)->true;false),
node(2)- (v(Y, 1)->false;true).
==
Note that this representation cannot be pasted back on the toplevel,
and its details are subject to change. Use copy_term/3 to obtain
such answers as Prolog terms.
The variable order of the BDD is determined by the order in which the
variables first appear in constraints. To obtain different orders,
you can for example use:
==
?- sat(+[1,Y,X]), sat(X#Y).
node(3)- (v(Y, 0)->node(2);node(1)),
node(1)- (v(X, 1)->true;false),
node(2)- (v(X, 1)->false;true).
==
## Enabling monotonic CLP(B) {#clpb-monotonic}
In the default execution mode, CLP(B) constraints are _not_ monotonic.
This means that _adding_ constraints can yield new solutions. For
example:
==
?- sat(X=:=1), X = 1+0.
false.
?- X = 1+0, sat(X=:=1), X = 1+0.
X = 1+0.
==
This behaviour is highly problematic from a logical point of view, and
it may render [**declarative
debugging**](https://www.metalevel.at/prolog/debugging)
techniques inapplicable.
Set the flag `clpb_monotonic` to `true` to make CLP(B) *monotonic*. If
this mode is enabled, then you must wrap CLP(B) variables with the
functor `v/1`. For example:
==
?- set_prolog_flag(clpb_monotonic, true).
true.
?- sat(v(X)=:=1#1).
X = 0.
==
@author [Markus Triska](https://www.metalevel.at)
*/
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Each CLP(B) variable belongs to exactly one BDD. Each CLP(B)
variable gets an attribute (in module "clpb") of the form:
index_root(Index,Root)
where Index is the variable's unique integer index, and Root is the
root of the BDD that the variable belongs to.
Each CLP(B) variable also gets an attribute in module clpb_hash: an
association table node(LID,HID) -> Node, to keep the BDD reduced.
The association table of each variable must be rebuilt on occasion
to remove nodes that are no longer reachable. We rebuild the
association tables of involved variables after BDDs are merged to
build a new root. This only serves to reclaim memory: Keeping a
node in a local table even when it no longer occurs in any BDD does
not affect the solver's correctness. However, apply_shortcut/4
relies on the invariant that every node that occurs in the relevant
BDDs is also registered in the table of its branching variable.
A root is a logical variable with a single attribute ("clpb_bdd")
of the form:
Sat-BDD
where Sat is the SAT formula (in original form) that corresponds to
BDD. Sat is necessary to rebuild the BDD after variable aliasing,
and to project all remaining constraints to a list of sat/1 goals.
Finally, a BDD is either:
*) The integers 0 or 1, denoting false and true, respectively, or
*) A node of the form
node(ID, Var, Low, High, Aux)
Where ID is the node's unique integer ID, Var is the
node's branching variable, and Low and High are the
node's low (Var = 0) and high (Var = 1) children. Aux
is a free variable, one for each node, that can be used
to attach attributes and store intermediate results.
Variable aliasing is treated as a conjunction of corresponding SAT
formulae.
You should think of CLP(B) as a potentially vast collection of BDDs
that can range from small to gigantic in size, and which can merge.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Type checking.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
is_sat(V) :- var(V), !, non_monotonic(V).
is_sat(v(V)) :- var(V), !.
is_sat(v(I)) :- integer(I), between(0, 1, I).
is_sat(I) :- integer(I), between(0, 1, I).
is_sat(A) :- atom(A).
is_sat(~A) :- is_sat(A).
is_sat(A*B) :- is_sat(A), is_sat(B).
is_sat(A+B) :- is_sat(A), is_sat(B).
is_sat(A#B) :- is_sat(A), is_sat(B).
is_sat(A=:=B) :- is_sat(A), is_sat(B).
is_sat(A=\=B) :- is_sat(A), is_sat(B).
is_sat(A=<B) :- is_sat(A), is_sat(B).
is_sat(A>=B) :- is_sat(A), is_sat(B).
is_sat(A<B) :- is_sat(A), is_sat(B).
is_sat(A>B) :- is_sat(A), is_sat(B).
is_sat(+(Ls)) :- must_be(list, Ls), maplist(is_sat, Ls).
is_sat(*(Ls)) :- must_be(list, Ls), maplist(is_sat, Ls).
is_sat(X^F) :- var(X), is_sat(F).
is_sat(card(Is,Fs)) :-
must_be(list(ground), Is),
must_be(list, Fs),
maplist(is_sat, Fs).
non_monotonic(X) :-
( var_index(X, _) ->
% OK: already constrained to a CLP(B) variable
true
; current_prolog_flag(clpb_monotonic, true) ->
instantiation_error(X)
; true
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Rewriting to canonical expressions.
Atoms are converted to variables with a special attribute.
A global lookup table maintains the correspondence between atoms and
their variables throughout different sat/1 goals.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
% elementary
sat_rewrite(V, V) :- var(V), !.
sat_rewrite(I, I) :- integer(I), !.
sat_rewrite(A, V) :- atom(A), !, clpb_atom_var(A, V).
sat_rewrite(v(V), V).
sat_rewrite(P0*Q0, P*Q) :- sat_rewrite(P0, P), sat_rewrite(Q0, Q).
sat_rewrite(P0+Q0, P+Q) :- sat_rewrite(P0, P), sat_rewrite(Q0, Q).
sat_rewrite(P0#Q0, P#Q) :- sat_rewrite(P0, P), sat_rewrite(Q0, Q).
sat_rewrite(X^F0, X^F) :- sat_rewrite(F0, F).
sat_rewrite(card(Is,Fs0), card(Is,Fs)) :-
maplist(sat_rewrite, Fs0, Fs).
% synonyms
sat_rewrite(~P, R) :- sat_rewrite(1 # P, R).
sat_rewrite(P =:= Q, R) :- sat_rewrite(~P # Q, R).
sat_rewrite(P =\= Q, R) :- sat_rewrite(P # Q, R).
sat_rewrite(P =< Q, R) :- sat_rewrite(~P + Q, R).
sat_rewrite(P >= Q, R) :- sat_rewrite(Q =< P, R).
sat_rewrite(P < Q, R) :- sat_rewrite(~P * Q, R).
sat_rewrite(P > Q, R) :- sat_rewrite(Q < P, R).
sat_rewrite(+(Ls), R) :- foldl(or, Ls, 0, F), sat_rewrite(F, R).
sat_rewrite(*(Ls), R) :- foldl(and, Ls, 1, F), sat_rewrite(F, R).
or(A, B, B + A).
and(A, B, B * A).
must_be_sat(Sat) :-
must_be(acyclic, Sat),
( is_sat(Sat) -> true
; no_truth_value(Sat)
).
no_truth_value(Term) :- domain_error(clpb_expr, Term).
parse_sat(Sat0, Sat) :-
must_be_sat(Sat0),
sat_rewrite(Sat0, Sat),
term_variables(Sat, Vs),
maplist(enumerate_variable, Vs).
enumerate_variable(V) :-
( var_index_root(V, _, _) -> true
; clpb_next_id('$clpb_next_var', Index),
put_attr(V, clpb, index_root(Index,_)),
put_empty_hash(V)
).
var_index(V, I) :- var_index_root(V, I, _).
var_index_root(V, I, Root) :- get_attr(V, clpb, index_root(I,Root)).
put_empty_hash(V) :-
empty_assoc(H0),
put_attr(V, clpb_hash, H0).
sat_roots(Sat, Roots) :-
term_variables(Sat, Vs),
maplist(var_index_root, Vs, _, Roots0),
term_variables(Roots0, Roots).
%% sat(+Expr) is semidet.
%
% True iff Expr is a satisfiable Boolean expression.
sat(Sat0) :-
( phrase(sat_ands(Sat0), Ands), Ands = [_,_|_] ->
maplist(sat, Ands)
; parse_sat(Sat0, Sat),
sat_bdd(Sat, BDD),
sat_roots(Sat, Roots),
roots_and(Roots, Sat0-BDD, And-BDD1),
maplist(del_bdd, Roots),
maplist(=(Root), Roots),
root_put_formula_bdd(Root, And, BDD1),
is_bdd(BDD1),
satisfiable_bdd(BDD1)
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Posting many small sat/1 constraints is better than posting a huge
conjunction (or negated disjunction), because unneeded nodes are
removed from node tables after BDDs are merged. This is not
possible in sat_bdd/2 because the nodes may occur in other BDDs. A
better version of sat_bdd/2 or a proper implementation of a unique
table including garbage collection would make this obsolete and
also improve taut/2 and sat_count/2 in such cases.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
sat_ands(X) -->
( { var(X) } -> [X]
; { X = (A*B) } -> sat_ands(A), sat_ands(B)
; { X = *(Ls) } -> sat_ands_(Ls)
; { X = ~Y } -> not_ors(Y)
; [X]
).
sat_ands_([]) --> [].
sat_ands_([L|Ls]) --> [L], sat_ands_(Ls).
not_ors(X) -->
( { var(X) } -> [~X]
; { X = (A+B) } -> not_ors(A), not_ors(B)
; { X = +(Ls) } -> not_ors_(Ls)
; [~X]
).
not_ors_([]) --> [].
not_ors_([L|Ls]) --> [~L], not_ors_(Ls).
del_bdd(Root) :- del_attr(Root, clpb_bdd).
root_get_formula_bdd(Root, F, BDD) :- get_attr(Root, clpb_bdd, F-BDD).
root_put_formula_bdd(Root, F, BDD) :- put_attr(Root, clpb_bdd, F-BDD).
roots_and(Roots, Sat0-BDD0, Sat-BDD) :-
foldl(root_and, Roots, Sat0-BDD0, Sat-BDD),
rebuild_hashes(BDD).
root_and(Root, Sat0-BDD0, Sat-BDD) :-
( root_get_formula_bdd(Root, F, B) ->
Sat = F*Sat0,
bdd_and(B, BDD0, BDD)
; Sat = Sat0,
BDD = BDD0
).
bdd_and(NA, NB, And) :-
apply(*, NA, NB, And),
is_bdd(And).
%% taut(+Expr, -T) is semidet
%
% Tautology check. Succeeds with T = 0 if the Boolean expression Expr
% cannot be satisfied, and with T = 1 if Expr is always true with
% respect to the current constraints. Fails otherwise.
taut(Sat0, T) :-
parse_sat(Sat0, Sat),
( T = 0, \+ sat(Sat) -> true
; T = 1, tautology(Sat) -> true
; false
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
The algebraic equivalence: tautology(F) <=> \+ sat(~F) does NOT
hold in CLP(B) because the quantifiers of universally quantified
variables always implicitly appear in front of the *entire*
expression. Thus we have for example: X+a is not a tautology, but
~(X+a), meaning forall(a, ~(X+a)), is unsatisfiable:
sat(~(X+a)) = sat(~X * ~a) = sat(~X), sat(~a) = X=0, false
The actual negation of X+a, namely ~forall(A,X+A), in terms of
CLP(B): ~ ~exists(A, ~(X+A)), is of course satisfiable:
?- sat(~ ~A^ ~(X+A)).
%@ X = 0,
%@ sat(A=:=A).
Instead, of such rewriting, we test whether the BDD of the negated
formula is 0. Critically, this avoids constraint propagation.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
tautology(Sat) :-
( phrase(sat_ands(Sat), Ands), Ands = [_,_|_] ->
maplist(tautology, Ands)
; catch((sat_roots(Sat, Roots),
roots_and(Roots, _-1, _-Ands),
sat_bdd(1#Sat, BDD),
bdd_and(BDD, Ands, B),
B == 0,
% reset all attributes
throw(tautology)),
tautology,
true)
).
satisfiable_bdd(BDD) :-
( BDD == 0 -> false
; BDD == 1 -> true
; ( bdd_nodes(var_unbound, BDD, Nodes) ->
bdd_variables_classification(BDD, Nodes, Classes),
partition(var_class, Classes, Eqs, Bs, Ds),
domain_consistency(Eqs, Goal),
aliasing_consistency(Bs, Ds, Goals),
maplist(unification, [Goal|Goals])
; % if any variable is instantiated, we do not perform
% any propagation for now
true
)
).
var_class(_=_, <).
var_class(further_branching(_,_), =).
var_class(negative_decisive(_), >).
unification(true).
unification(A=B) :- A = B. % safe_goal/1 detects safety of this call
var_unbound(Node) :-
node_var_low_high(Node, Var, _, _),
var(Var).
universal_var(Var) :- get_attr(Var, clpb_atom, _).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
By aliasing consistency, we mean that all unifications X=Y, where
taut(X=:=Y, 1) holds, are posted.
To detect this, we distinguish two kinds of variables among those
variables that are not skipped in any branch: further-branching and
negative-decisive. X is negative-decisive iff every node where X
appears as a branching variable has 0 as one of its children. X is
further-branching iff 1 is not a direct child of any node where X
appears as a branching variable.
Any potential aliasing must involve one further-branching, and one
negative-decisive variable. X=Y must hold if, for each low branch
of nodes with X as branching variable, Y has high branch 0, and for
each high branch of nodes involving X, Y has low branch 0.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
aliasing_consistency(Bs, Ds, Goals) :-
phrase(aliasings(Bs, Ds), Goals).
aliasings([], _) --> [].
aliasings([further_branching(B,Nodes)|Bs], Ds) -->
{ var_index(B, BI) },
aliasings_(Ds, B, BI, Nodes),
aliasings(Bs, Ds).
aliasings_([], _, _, _) --> [].
aliasings_([negative_decisive(D)|Ds], B, BI, Nodes) -->
{ var_index(D, DI) },
( { DI > BI,
always_false(high, DI, Nodes),
always_false(low, DI, Nodes),
var_or_atom(D, DA), var_or_atom(B, BA) } ->
[DA=BA]
; []
),
aliasings_(Ds, B, BI, Nodes).
var_or_atom(Var, VA) :-
( get_attr(Var, clpb_atom, VA) -> true
; VA = Var
).
always_false(Which, DI, Nodes) :-
phrase(nodes_always_false(Nodes, Which, DI), Opposites),
maplist(with_aux(unvisit), Opposites).
nodes_always_false([], _, _) --> [].
nodes_always_false([Node|Nodes], Which, DI) -->
{ which_node_child(Which, Node, Child),
opposite(Which, Opposite) },
opposite_always_false(Opposite, DI, Child),
nodes_always_false(Nodes, Which, DI).
which_node_child(low, Node, Child) :-
node_var_low_high(Node, _, Child, _).
which_node_child(high, Node, Child) :-
node_var_low_high(Node, _, _, Child).
opposite(low, high).
opposite(high, low).
opposite_always_false(Opposite, DI, Node) -->
( { node_visited(Node) } -> []
; { node_var_low_high(Node, Var, Low, High),
with_aux(put_visited, Node),
var_index(Var, VI) },
[Node],
( { VI =:= DI } ->
{ which_node_child(Opposite, Node, Child),
Child == 0 }
; opposite_always_false(Opposite, DI, Low),
opposite_always_false(Opposite, DI, High)
)
).
further_branching(Node) :-
node_var_low_high(Node, _, Low, High),
Low \== 1,
High \== 1.
negative_decisive(Node) :-
node_var_low_high(Node, _, Low, High),
( Low == 0 -> true
; High == 0 -> true
; false
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Instantiate all variables that only admit a single Boolean value.
This is the case if: The variable is not skipped in any branch
leading to 1 (its being skipped means that it may be assigned
either 0 or 1 and can thus not be fixed yet), and all nodes where
it occurs as a branching variable have either lower or upper child
fixed to 0 consistently.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
domain_consistency(Eqs, Goal) :-
maplist(eq_a_b, Eqs, Vs, Values),
Goal = (Vs = Values). % propagate all assignments at once
eq_a_b(A=B, A, B).
consistently_false_(Which, Node) :-
which_node_child(Which, Node, Child),
Child == 0.
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
In essentially one sweep of the BDD, all variables can be classified:
Unification with 0 or 1, further branching and/or negative decisive.
Strategy: Breadth-first traversal of the BDD, failing (and thus
clearing all attributes) if the variable is skipped in some branch,
and moving the frontier along each time.
A formula is only satisfiable if it is a tautology after all (also
implicitly) existentially quantified variables are projected away.
However, we only need to check this explicitly if at least one
universally quantified variable appears. Otherwise, we know that
the formula is satisfiable at this point, because its BDD is not 0.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
bdd_variables_classification(BDD, Nodes, Classes) :-
nodes_variables(Nodes, Vs0),
variables_in_index_order(Vs0, Vs),
( partition(universal_var, Vs, [_|_], Es) ->
foldl(existential, Es, BDD, 1)
; true
),
phrase(variables_classification(Vs, [BDD]), Classes),
maplist(with_aux(unvisit), Nodes).
variables_classification([], _) --> [].
variables_classification([V|Vs], Nodes0) -->
{ var_index(V, Index) },
( { phrase(nodes_with_variable(Nodes0, Index), Nodes) } ->
( { maplist(consistently_false_(low), Nodes) } -> [V=1]
; { maplist(consistently_false_(high), Nodes) } -> [V=0]
; []
),
( { maplist(further_branching, Nodes) } ->
[further_branching(V, Nodes)]
; []
),
( { maplist(negative_decisive, Nodes) } ->
[negative_decisive(V)]
; []
),
{ maplist(with_aux(unvisit), Nodes) },
variables_classification(Vs, Nodes)
; variables_classification(Vs, Nodes0)
).
nodes_with_variable([], _) --> [].
nodes_with_variable([Node|Nodes], VI) -->
{ Node \== 1 },
( { node_visited(Node) } -> nodes_with_variable(Nodes, VI)
; { with_aux(put_visited, Node),
node_var_low_high(Node, OVar, Low, High),
var_index(OVar, OVI) },
{ OVI =< VI },
( { OVI =:= VI } -> [Node]
; nodes_with_variable([Low,High], VI)
),
nodes_with_variable(Nodes, VI)
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Node management. Always use an existing node, if there is one.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
make_node(Var, Low, High, Node) :-
( Low == High -> Node = Low
; low_high_key(Low, High, Key),
( lookup_node(Var, Key, Node) -> true
; clpb_next_id('$clpb_next_node', ID),
Node = node(ID,Var,Low,High,_Aux),
register_node(Var, Key, Node)
)
).
make_node(Var, Low, High, Node) -->
% make it conveniently usable within DCGs
{ make_node(Var, Low, High, Node) }.
% The key of a node for hashing is determined by the IDs of its
% children.
low_high_key(Low, High, node(LID,HID)) :-
node_id(Low, LID),
node_id(High, HID).
rebuild_hashes(BDD) :-
bdd_nodes(nodevar_put_empty_hash, BDD, Nodes),
maplist(re_register_node, Nodes).
nodevar_put_empty_hash(Node) :-
node_var_low_high(Node, Var, _, _),
empty_assoc(H0),
put_attr(Var, clpb_hash, H0).
re_register_node(Node) :-
node_var_low_high(Node, Var, Low, High),
low_high_key(Low, High, Key),
register_node(Var, Key, Node).
register_node(Var, Key, Node) :-
get_attr(Var, clpb_hash, H0),
put_assoc(Key, H0, Node, H),
put_attr(Var, clpb_hash, H).
lookup_node(Var, Key, Node) :-
get_attr(Var, clpb_hash, H0),
get_assoc(Key, H0, Node).
node_id(0, false).
node_id(1, true).
node_id(node(ID,_,_,_,_), ID).
node_aux(Node, Aux) :- arg(5, Node, Aux).
node_var_low_high(Node, Var, Low, High) :-
Node = node(_,Var,Low,High,_).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
sat_bdd/2 converts a SAT formula in canonical form to an ordered
and reduced BDD.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
sat_bdd(V, Node) :- var(V), !, make_node(V, 0, 1, Node).
sat_bdd(I, I) :- integer(I), !.
sat_bdd(V^Sat, Node) :- !, sat_bdd(Sat, BDD), existential(V, BDD, Node).
sat_bdd(card(Is,Fs), Node) :- !, counter_network(Is, Fs, Node).
sat_bdd(Sat, Node) :- !,
Sat =.. [F,A,B],
sat_bdd(A, NA),
sat_bdd(B, NB),
apply(F, NA, NB, Node).
existential(V, BDD, Node) :-
var_index(V, Index),
bdd_restriction(BDD, Index, 0, NA),
bdd_restriction(BDD, Index, 1, NB),
apply(+, NA, NB, Node).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Counter network for card(Is,Fs).
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
counter_network(Cs, Fs, Node) :-
same_length([_|Fs], Indicators),
fill_indicators(Indicators, 0, Cs),
phrase(formulas_variables(Fs, Vars0), ExBDDs),
maplist(unvisit, Vars0),
% The counter network is built bottom-up, so variables with
% highest index must be processed first.
variables_in_index_order(Vars0, Vars1),
reverse(Vars1, Vars),
counter_network_(Vars, Indicators, Node0),
foldl(existential_and, ExBDDs, Node0, Node).
% Introduce fresh variables for expressions that are not variables.
% These variables are later existentially quantified to remove them.
% Also, new variables are introduced for variables that are used more
% than once, as in card([0,1],[X,X,Y]), to keep the BDD ordered.
formulas_variables([], []) --> [].
formulas_variables([F|Fs], [V|Vs]) -->
( { var(F), \+ is_visited(F) } ->
{ V = F,
put_visited(F) }
; { enumerate_variable(V),
sat_rewrite(V =:= F, Sat),
sat_bdd(Sat, BDD) },
[V-BDD]
),
formulas_variables(Fs, Vs).
counter_network_([], [Node], Node).
counter_network_([Var|Vars], [I|Is0], Node) :-
foldl(indicators_pairing(Var), Is0, Is, I, _),
counter_network_(Vars, Is, Node).
indicators_pairing(Var, I, Node, Prev, I) :- make_node(Var, Prev, I, Node).
fill_indicators([], _, _).
fill_indicators([I|Is], Index0, Cs) :-
( memberchk(Index0, Cs) -> I = 1
; member(A-B, Cs), between(A, B, Index0) -> I = 1
; I = 0
),
Index1 is Index0 + 1,
fill_indicators(Is, Index1, Cs).
existential_and(Ex-BDD, Node0, Node) :-
bdd_and(BDD, Node0, Node1),
existential(Ex, Node1, Node),
% remove attributes to avoid residual goals for variables that
% are only used as substitutes for formulas
del_attrs(Ex).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Compute F(NA, NB).
We use a DCG to thread through an implicit argument G0, an
association table F(IDA,IDB) -> Node, used for memoization.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
apply(F, NA, NB, Node) :-
empty_assoc(G0),
phrase(apply(F, NA, NB, Node), [G0], _).
apply(F, NA, NB, Node) -->
( { integer(NA), integer(NB) } -> { once(bool_op(F, NA, NB, Node)) }
; { apply_shortcut(F, NA, NB, Node) } -> []
; { node_id(NA, IDA), node_id(NB, IDB), Key =.. [F,IDA,IDB] },
( state(G0), { get_assoc(Key, G0, Node) } -> []
; apply_(F, NA, NB, Node),
state(G0, G),
{ put_assoc(Key, G0, Node, G) }
)
).
apply_shortcut(+, NA, NB, Node) :-
( NA == 0 -> Node = NB
; NA == 1 -> Node = 1
; NB == 0 -> Node = NA
; NB == 1 -> Node = 1
; false
).
apply_shortcut(*, NA, NB, Node) :-
( NA == 0 -> Node = 0
; NA == 1 -> Node = NB
; NB == 0 -> Node = 0
; NB == 1 -> Node = NA
; false
).
apply_(F, NA, NB, Node) -->
{ var_less_than(NA, NB),
!,
node_var_low_high(NA, VA, LA, HA) },
apply(F, LA, NB, Low),
apply(F, HA, NB, High),
make_node(VA, Low, High, Node).
apply_(F, NA, NB, Node) -->
{ node_var_low_high(NA, VA, LA, HA),
node_var_low_high(NB, VB, LB, HB),
VA == VB },
!,
apply(F, LA, LB, Low),
apply(F, HA, HB, High),
make_node(VA, Low, High, Node).
apply_(F, NA, NB, Node) --> % NB < NA
{ node_var_low_high(NB, VB, LB, HB) },
apply(F, NA, LB, Low),
apply(F, NA, HB, High),
make_node(VB, Low, High, Node).
node_varindex(Node, VI) :-
node_var_low_high(Node, V, _, _),
var_index(V, VI).
var_less_than(NA, NB) :-
( integer(NB) -> true
; node_varindex(NA, VAI),
node_varindex(NB, VBI),
VAI < VBI
).
bool_op(+, 0, 0, 0).
bool_op(+, 0, 1, 1).
bool_op(+, 1, 0, 1).
bool_op(+, 1, 1, 1).
bool_op(*, 0, 0, 0).
bool_op(*, 0, 1, 0).
bool_op(*, 1, 0, 0).
bool_op(*, 1, 1, 1).
bool_op(#, 0, 0, 0).
bool_op(#, 0, 1, 1).
bool_op(#, 1, 0, 1).
bool_op(#, 1, 1, 0).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Access implicit state in DCGs.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
state(S) --> state(S, S).
state(S0, S), [S] --> [S0].
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Unification. X = Expr is equivalent to sat(X =:= Expr).
Current limitation:
===================
The current interface of attributed variables is not general enough
to express what we need. For example,
?- sat(A + B), A = A + 1.
should be equivalent to
?- sat(A + B), sat(A =:= A + 1).
However, attr_unify_hook/2 is only called *after* the unification
of A with A + 1 has already taken place and turned A into a cyclic
ground term, raised an error or failed (depending on the flag
occurs_check), making it impossible to reason about the variable A
in the unification hook. Therefore, a more general interface for
attributed variables should replace the current one. In particular,
unification filters should be able to reason about terms before
they are unified with anything.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
attr_unify_hook(index_root(I,Root), Other) :-
( integer(Other) ->
( between(0, 1, Other) ->
root_get_formula_bdd(Root, Sat, BDD0),
bdd_restriction(BDD0, I, Other, BDD),
root_put_formula_bdd(Root, Sat, BDD),
satisfiable_bdd(BDD)
; no_truth_value(Other)
)
; atom(Other) ->
root_get_formula_bdd(Root, Sat0, _),
parse_sat(Sat0, Sat),
sat_bdd(Sat, BDD),
root_put_formula_bdd(Root, Sat0, BDD),
is_bdd(BDD),
satisfiable_bdd(BDD)
; % due to variable aliasing, any BDDs may now be unordered,
% so we need to rebuild the new BDD from the conjunction.
root_get_formula_bdd(Root, Sat0, _),
Sat = Sat0*OtherSat,
( var(Other), var_index_root(Other, _, OtherRoot),
OtherRoot \== Root ->
root_get_formula_bdd(OtherRoot, OtherSat, _),
parse_sat(Sat, Sat1),
sat_bdd(Sat1, BDD1),
And = Sat,
sat_roots(Sat, Roots)
; parse_sat(Other, OtherSat),
sat_roots(Sat, Roots),
maplist(root_rebuild_bdd, Roots),
roots_and(Roots, 1-1, And-BDD1)
),
maplist(del_bdd, Roots),
maplist(=(NewRoot), Roots),
root_put_formula_bdd(NewRoot, And, BDD1),
is_bdd(BDD1),
satisfiable_bdd(BDD1)
).
root_rebuild_bdd(Root) :-
( root_get_formula_bdd(Root, F0, _) ->
parse_sat(F0, Sat),
sat_bdd(Sat, BDD),
root_put_formula_bdd(Root, F0, BDD)
; true
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Support for project_attributes/2.
This is called by the toplevel as
project_attributes(+QueryVars, +AttrVars)
in order to project all remaining constraints onto QueryVars.
All CLP(B) variables that do not occur in QueryVars or AttrVars
need to be existentially quantified, so that they do not occur in
residual goals. This is very easy to do in the case of CLP(B).
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
project_attributes(QueryVars0, AttrVars) :-
append(QueryVars0, AttrVars, QueryVars1),
include(clpb_variable, QueryVars1, QueryVars),
maplist(var_index_root, QueryVars, _, Roots0),
sort(Roots0, Roots),
maplist(remove_hidden_variables(QueryVars), Roots).
clpb_variable(Var) :- var_index(Var, _).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
All CLP(B) variables occurring in BDDs but not in query variables
become existentially quantified. This must also be reflected in the
formula. In addition, an attribute is attached to these variables
to suppress superfluous sat(V=:=V) goals.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
remove_hidden_variables(QueryVars, Root) :-
root_get_formula_bdd(Root, Formula, BDD0),
maplist(put_visited, QueryVars),
bdd_variables(BDD0, HiddenVars0),
exclude(universal_var, HiddenVars0, HiddenVars),
maplist(unvisit, QueryVars),
foldl(existential, HiddenVars, BDD0, BDD),
foldl(quantify_existantially, HiddenVars, Formula, ExFormula),
root_put_formula_bdd(Root, ExFormula, BDD).
quantify_existantially(E, E0, E^E0) :- put_attr(E, clpb_omit_boolean, true).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
BDD restriction.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
bdd_restriction(Node, VI, Value, Res) :-
empty_assoc(G0),
phrase(bdd_restriction_(Node, VI, Value, Res), [G0], _),
is_bdd(Res).
bdd_restriction_(Node, VI, Value, Res) -->
( { integer(Node) } -> { Res = Node }
; { node_var_low_high(Node, Var, Low, High) } ->
( { integer(Var) } ->
( { Var =:= 0 } -> bdd_restriction_(Low, VI, Value, Res)
; { Var =:= 1 } -> bdd_restriction_(High, VI, Value, Res)
; { no_truth_value(Var) }
)
; { var_index(Var, I0),
node_id(Node, ID) },
( { I0 =:= VI } ->
( { Value =:= 0 } -> { Res = Low }
; { Value =:= 1 } -> { Res = High }
)
; { I0 > VI } -> { Res = Node }
; state(G0), { get_assoc(ID, G0, Res) } -> []
; bdd_restriction_(Low, VI, Value, LRes),
bdd_restriction_(High, VI, Value, HRes),
make_node(Var, LRes, HRes, Res),
state(G0, G),
{ put_assoc(ID, G0, Res, G) }
)
)
; { domain_error(node, Node) }
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Relating a BDD to its elements (nodes and variables).
Note that BDDs can become quite big (easily millions of nodes), and
memory space is a major bottleneck for many problems. If possible,
we therefore do not duplicate the entire BDD in memory (as in
bdd_ites/2), but only extract its features as needed.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
bdd_nodes(BDD, Ns) :- bdd_nodes(ignore_node, BDD, Ns).
ignore_node(_).
% VPred is a unary predicate that is called for each node that has a
% branching variable (= each inner node).
bdd_nodes(VPred, BDD, Ns) :-
phrase(bdd_nodes_(VPred, BDD), Ns),
maplist(with_aux(unvisit), Ns).
bdd_nodes_(VPred, Node) -->
( { node_visited(Node) } -> []
; { call(VPred, Node),
with_aux(put_visited, Node),
node_var_low_high(Node, _, Low, High) },
[Node],
bdd_nodes_(VPred, Low),
bdd_nodes_(VPred, High)
).
node_visited(Node) :- integer(Node).
node_visited(Node) :- with_aux(is_visited, Node).
bdd_variables(BDD, Vs) :-
bdd_nodes(BDD, Nodes),
nodes_variables(Nodes, Vs).
nodes_variables(Nodes, Vs) :-
phrase(nodes_variables_(Nodes), Vs),
maplist(unvisit, Vs).
nodes_variables_([]) --> [].
nodes_variables_([Node|Nodes]) -->
{ node_var_low_high(Node, Var, _, _) },
( { integer(Var) } -> []
; { is_visited(Var) } -> []
; { put_visited(Var) },
[Var]
),
nodes_variables_(Nodes).
unvisit(V) :- del_attr(V, clpb_visited).
is_visited(V) :- get_attr(V, clpb_visited, true).
put_visited(V) :- put_attr(V, clpb_visited, true).
with_aux(Pred, Node) :-
node_aux(Node, Aux),
call(Pred, Aux).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Internal consistency checks.
To enable these checks, set the flag clpb_validation to true.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
is_bdd(BDD) :-
( current_prolog_flag(clpb_validation, true) ->
bdd_ites(BDD, ITEs),
pairs_values(ITEs, Ls0),
sort(Ls0, Ls1),
( same_length(Ls0, Ls1) -> true
; domain_error(reduced_ites, (ITEs,Ls0,Ls1))
),
( member(ITE, ITEs), \+ registered_node(ITE) ->
domain_error(registered_node, ITE)
; true
),
( member(I, ITEs), \+ ordered(I) ->
domain_error(ordered_node, I)
; true
)
; true
).
ordered(_-ite(Var,High,Low)) :-
( var_index(Var, VI) ->
greater_varindex_than(High, VI),
greater_varindex_than(Low, VI)
; true
).
greater_varindex_than(Node, VI) :-
( integer(Node) -> true
; node_var_low_high(Node, Var, _, _),
( var_index(Var, OI) ->
OI > VI
; true
)
).
registered_node(Node-ite(Var,High,Low)) :-
( var(Var) ->
low_high_key(Low, High, Key),
lookup_node(Var, Key, Node0),
Node == Node0
; true
).
bdd_ites(BDD, ITEs) :-
bdd_nodes(BDD, Nodes),
maplist(node_ite, Nodes, ITEs).
node_ite(Node, Node-ite(Var,High,Low)) :-
node_var_low_high(Node, Var, Low, High).
%% labeling(+Vs) is multi.
%
% Enumerate concrete solutions. Assigns truth values to the Boolean
% variables Vs such that all stated constraints are satisfied.
labeling(Vs0) :-
must_be(list, Vs0),
maplist(labeling_var, Vs0),
variables_in_index_order(Vs0, Vs),
maplist(indomain, Vs).
labeling_var(V) :- var(V), !.
labeling_var(V) :- V == 0, !.
labeling_var(V) :- V == 1, !.
labeling_var(V) :- domain_error(clpb_variable, V).
variables_in_index_order(Vs0, Vs) :-
maplist(var_with_index, Vs0, IVs0),
keysort(IVs0, IVs),
pairs_values(IVs, Vs).
var_with_index(V, I-V) :-
( var_index_root(V, I, _) -> true
; I = 0
).
indomain(0).
indomain(1).
%% sat_count(+Expr, -Count) is det.
%
% Count the number of admissible assignments. Count is the number of
% different assignments of truth values to the variables in the
% Boolean expression Expr, such that Expr is true and all posted
% constraints are satisfiable.
%
% A common form of invocation is `sat_count(+[1|Vs], Count)`: This
% counts the number of admissible assignments to `Vs` without imposing
% any further constraints.
%
% Examples:
%
% ==
% ?- sat(A =< B), Vs = [A,B], sat_count(+[1|Vs], Count).
% Vs = [A, B],
% Count = 3,
% sat(A=:=A*B).
%
% ?- length(Vs, 120),
% sat_count(+Vs, CountOr),
% sat_count(*(Vs), CountAnd).
% Vs = [...],
% CountOr = 1329227995784915872903807060280344575,
% CountAnd = 1.
% ==
sat_count(Sat0, N) :-
catch((parse_sat(Sat0, Sat),
sat_bdd(Sat, BDD),
sat_roots(Sat, Roots),
roots_and(Roots, _-BDD, _-BDD1),
% we mark variables that occur in Sat0 as visited ...
term_variables(Sat0, Vs),
maplist(put_visited, Vs),
% ... so that they do not appear in Vs1 ...
bdd_variables(BDD1, Vs1),
partition(universal_var, Vs1, Univs, Exis),
% ... and then remove remaining variables:
foldl(universal, Univs, BDD1, BDD2),
foldl(existential, Exis, BDD2, BDD3),
variables_in_index_order(Vs, IVs),
foldl(renumber_variable, IVs, 1, VNum),
bdd_count(BDD3, VNum, Count0),
var_u(BDD3, VNum, P),
% Do not unify N directly, because we are not prepared
% for propagation here in case N is a CLP(B) variable.
N0 is 2^(P - 1)*Count0,
% reset all attributes and Aux variables
throw(count(N0))),
count(N0),
N = N0).
universal(V, BDD, Node) :-
var_index(V, Index),
bdd_restriction(BDD, Index, 0, NA),
bdd_restriction(BDD, Index, 1, NB),
apply(*, NA, NB, Node).
renumber_variable(V, I0, I) :-
put_attr(V, clpb, index_root(I0,_)),
I is I0 + 1.
bdd_count(Node, VNum, Count) :-
( integer(Node) -> Count = Node
; node_aux(Node, Count),
( integer(Count) -> true
; node_var_low_high(Node, V, Low, High),
bdd_count(Low, VNum, LCount),
bdd_count(High, VNum, HCount),
bdd_pow(Low, V, VNum, LPow),
bdd_pow(High, V, VNum, HPow),
Count is LPow*LCount + HPow*HCount
)
).
bdd_pow(Node, V, VNum, Pow) :-
var_index(V, Index),
var_u(Node, VNum, P),
Pow is 2^(P - Index - 1).
var_u(Node, VNum, Index) :-
( integer(Node) -> Index = VNum
; node_varindex(Node, Index)
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Pick a solution in such a way that each solution is equally likely.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
%% random_labeling(+Seed, +Vs) is det.
%
% Select a single random solution. An admissible assignment of truth
% values to the Boolean variables in Vs is chosen in such a way that
% each admissible assignment is equally likely. Seed is an integer,
% used as the initial seed for the random number generator.
single_bdd(Vars0) :-
maplist(monotonic_variable, Vars0, Vars),
% capture all variables with a single BDD
sat(+[1|Vars]).
random_labeling(Seed, Vars) :-
must_be(list, Vars),
set_random(seed(Seed)),
( ground(Vars) -> true
; catch((single_bdd(Vars),
once((member(Var, Vars),var(Var))),
var_index_root(Var, _, Root),
root_get_formula_bdd(Root, _, BDD),
bdd_variables(BDD, Vs),
variables_in_index_order(Vs, IVs),
foldl(renumber_variable, IVs, 1, VNum),
phrase(random_bindings(VNum, BDD), Bs),
maplist(del_attrs, Vs),
% reset all attribute modifications
throw(randsol(Vars, Bs))),
randsol(Vars, Bs),
true),
maplist(call, Bs),
% set remaining variables to 0 or 1 with equal probability
include(var, Vars, Remaining),
maplist(maybe_zero, Remaining)
).
maybe_zero(Var) :-
( maybe -> Var = 0
; Var = 1
).
random_bindings(_, Node) --> { Node == 1 }, !.
random_bindings(VNum, Node) -->
{ node_var_low_high(Node, Var, Low, High),
bdd_count(Node, VNum, Total),
bdd_count(Low, VNum, LCount) },
( { maybe(LCount, Total) } ->
[Var=0], random_bindings(VNum, Low)
; [Var=1], random_bindings(VNum, High)
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Find solutions with maximum weight.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
%% weighted_maximum(+Weights, +Vs, -Maximum) is multi.
%
% Enumerate weighted optima over admissible assignments. Maximize a
% linear objective function over Boolean variables Vs with integer
% coefficients Weights. This predicate assigns 0 and 1 to the
% variables in Vs such that all stated constraints are satisfied, and
% Maximum is the maximum of sum(Weight_i*V_i) over all admissible
% assignments. On backtracking, all admissible assignments that
% attain the optimum are generated.
%
% This predicate can also be used to _minimize_ a linear Boolean
% program, since negative integers can appear in Weights.
%
% Example:
%
% ==
% ?- sat(A#B), weighted_maximum([1,2,1], [A,B,C], Maximum).
% A = 0, B = 1, C = 1, Maximum = 3.
% ==
weighted_maximum(Ws, Vars, Max) :-
must_be(list(integer), Ws),
must_be(list(var), Vars),
single_bdd(Vars),
Vars = [Var|_],
var_index_root(Var, _, Root),
root_get_formula_bdd(Root, _, BDD0),
bdd_variables(BDD0, Vs),
% existentially quantify variables that are not considered
maplist(put_visited, Vars),
exclude(is_visited, Vs, Unvisited),
maplist(unvisit, Vars),
foldl(existential, Unvisited, BDD0, BDD),
maplist(var_with_index, Vars, IVs),
pairs_keys_values(Pairs0, IVs, Ws),
keysort(Pairs0, Pairs1),
pairs_keys_values(Pairs1, IVs1, WeightsIndexOrder),
pairs_values(IVs1, VarsIndexOrder),
% Pairs is a list of Var-Weight terms, in index order of Vars
pairs_keys_values(Pairs, VarsIndexOrder, WeightsIndexOrder),
bdd_maximum(BDD, Pairs, Max),
max_labeling(BDD, Pairs).
max_labeling(1, Pairs) :- max_upto(Pairs, _, _).
max_labeling(node(_,Var,Low,High,Aux), Pairs0) :-
max_upto(Pairs0, Var, Pairs),
get_attr(Aux, clpb_max, max(_,Dir)),
direction_labeling(Dir, Var, Low, High, Pairs).
max_upto([], _, _).
max_upto([Var0-Weight|VWs0], Var, VWs) :-
( Var == Var0 -> VWs = VWs0
; Weight =:= 0 ->
( Var0 = 0 ; Var0 = 1 ),
max_upto(VWs0, Var, VWs)
; Weight < 0 -> Var0 = 0, max_upto(VWs0, Var, VWs)
; Var0 = 1, max_upto(VWs0, Var, VWs)
).
direction_labeling(low, 0, Low, _, Pairs) :- max_labeling(Low, Pairs).
direction_labeling(high, 1, _, High, Pairs) :- max_labeling(High, Pairs).
bdd_maximum(1, Pairs, Max) :-
pairs_values(Pairs, Weights0),
include(<(0), Weights0, Weights),
sum_list(Weights, Max).
bdd_maximum(node(_,Var,Low,High,Aux), Pairs0, Max) :-
( get_attr(Aux, clpb_max, max(Max,_)) -> true
; ( skip_to_var(Var, Weight, Pairs0, Pairs),
( Low == 0 ->
bdd_maximum_(High, Pairs, MaxHigh, MaxToHigh),
Max is MaxToHigh + MaxHigh + Weight,
Dir = high
; High == 0 ->
bdd_maximum_(Low, Pairs, MaxLow, MaxToLow),
Max is MaxToLow + MaxLow,
Dir = low
; bdd_maximum_(Low, Pairs, MaxLow, MaxToLow),
bdd_maximum_(High, Pairs, MaxHigh, MaxToHigh),
Max0 is MaxToLow + MaxLow,
Max1 is MaxToHigh + MaxHigh + Weight,
Max is max(Max0,Max1),
( Max0 =:= Max1 -> Dir = _Any
; Max0 < Max1 -> Dir = high
; Dir = low
)
),
store_maximum(Aux, Max, Dir)
)
).
bdd_maximum_(Node, Pairs, Max, MaxTo) :-
bdd_maximum(Node, Pairs, Max),
between_weights(Node, Pairs, MaxTo).
store_maximum(Aux, Max, Dir) :- put_attr(Aux, clpb_max, max(Max,Dir)).
between_weights(Node, Pairs0, MaxTo) :-
( Node == 1 -> MaxTo = 0
; node_var_low_high(Node, Var, _, _),
phrase(skip_to_var_(Var, _, Pairs0, _), Weights0),
include(<(0), Weights0, Weights),
sum_list(Weights, MaxTo)
).
skip_to_var(Var, Weight, Pairs0, Pairs) :-
phrase(skip_to_var_(Var, Weight, Pairs0, Pairs), _).
skip_to_var_(Var, Weight, [Var0-Weight0|VWs0], VWs) -->
( { Var == Var0 } ->
{ Weight = Weight0, VWs0 = VWs }
; ( { Weight0 =< 0 } -> []
; [Weight0]
),
skip_to_var_(Var, Weight, VWs0, VWs)
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Projection to residual goals.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
attribute_goals(Var) -->
{ var_index_root(Var, _, Root) },
( { root_get_formula_bdd(Root, Formula, BDD) } ->
{ del_bdd(Root) },
( { current_prolog_flag(clpb_residuals, bdd) } ->
{ bdd_nodes(BDD, Nodes),
phrase(nodes(Nodes), Ns) },
[clpb:'$clpb_bdd'(Ns)]
; { prepare_global_variables(BDD),
phrase(sat_ands(Formula), Ands0),
ands_fusion(Ands0, Ands),
maplist(formula_anf, Ands, ANFs0),
sort(ANFs0, ANFs1),
exclude(eq_1, ANFs1, ANFs2),
variables_separation(ANFs2, ANFs) },
sats(ANFs)
),
( { get_attr(Var, clpb_atom, Atom) } ->
[clpb:sat(Var=:=Atom)]
; []
),
% formula variables not occurring in the BDD should be booleans
{ bdd_variables(BDD, Vs),
maplist(del_clpb, Vs),
term_variables(Formula, RestVs0),
include(clpb_variable, RestVs0, RestVs) },
booleans(RestVs)
; boolean(Var) % the variable may have occurred only in taut/2
).
del_clpb(Var) :- del_attr(Var, clpb).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
To make residual projection work with recorded constraints, the
global counters must be adjusted so that new variables and nodes
also get new IDs. Also, clpb_next_id/2 is used to actually create
these counters, because creating them with b_setval/2 would make
them [] on backtracking, which is quite unfortunate in itself.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
prepare_global_variables(BDD) :-
clpb_next_id('$clpb_next_var', V0),
clpb_next_id('$clpb_next_node', N0),
bdd_nodes(BDD, Nodes),
foldl(max_variable_node, Nodes, V0-N0, MaxV0-MaxN0),
MaxV is MaxV0 + 1,
MaxN is MaxN0 + 1,
b_setval('$clpb_next_var', MaxV),
b_setval('$clpb_next_node', MaxN).
max_variable_node(Node, V0-N0, V-N) :-
node_id(Node, N1),
node_varindex(Node, V1),
N is max(N0,N1),
V is max(V0,V1).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Fuse formulas that share the same variables into single conjunctions.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
ands_fusion(Ands0, Ands) :-
maplist(with_variables, Ands0, Pairs0),
keysort(Pairs0, Pairs),
group_pairs_by_key(Pairs, Groups),
pairs_values(Groups, Andss),
maplist(list_to_conjunction, Andss, Ands).
with_variables(F, Vs-F) :-
term_variables(F, Vs0),
variables_in_index_order(Vs0, Vs).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
If possible, separate variables into different sat/1 goals.
A formula F can be split in two if for two of its variables A and B,
taut((A^F)*(B^F) =:= F, 1) holds. In the first conjunct, A does not
occur, and in the second, B does not occur. We separate variables
until that is no longer possible. There may be a better way to do this.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
variables_separation(Fs0, Fs) :- separation_fixpoint(Fs0, [], Fs).
separation_fixpoint(Fs0, Ds0, Fs) :-
phrase(variables_separation_(Fs0, Ds0, Rest), Fs1),
partition(anf_done, Fs1, Ds1, Fs2),
maplist(arg(1), Ds1, Ds2),
maplist(arg(1), Fs2, Fs3),
append(Ds0, Ds2, Ds3),
append(Rest, Fs3, Fs4),
sort(Fs4, Fs5),
sort(Ds3, Ds4),
( Fs5 == [] -> Fs = Ds4
; separation_fixpoint(Fs5, Ds4, Fs)
).
anf_done(done(_)).
variables_separation_([], _, []) --> [].
variables_separation_([F0|Fs0], Ds, Rest) -->
( { member(Done, Ds), F0 == Done } ->
variables_separation_(Fs0, Ds, Rest)
; { sat_rewrite(F0, F),
sat_bdd(F, BDD),
bdd_variables(BDD, Vs0),
exclude(universal_var, Vs0, Vs),
maplist(existential_(BDD), Vs, Nodes),
phrase(pairs(Nodes), Pairs),
group_pairs_by_key(Pairs, Groups),
phrase(groups_separation(Groups, BDD), ANFs) },
( { ANFs = [_|_] } ->
list(ANFs),
{ Rest = Fs0 }
; [done(F0)],
variables_separation_(Fs0, Ds, Rest)
)
).
existential_(BDD, V, Node) :- existential(V, BDD, Node).
groups_separation([], _) --> [].
groups_separation([BDD1-BDDs|Groups], OrigBDD) -->
{ phrase(separate_pairs(BDDs, BDD1, OrigBDD), Nodes) },
( { Nodes = [_|_] } ->
nodes_anfs([BDD1|Nodes])
; []
),
groups_separation(Groups, OrigBDD).
separate_pairs([], _, _) --> [].
separate_pairs([BDD2|Ps], BDD1, OrigBDD) -->
( { apply(*, BDD1, BDD2, And),
And == OrigBDD } ->
[BDD2]
; []
),
separate_pairs(Ps, BDD1, OrigBDD).
nodes_anfs([]) --> [].
nodes_anfs([N|Ns]) --> { node_anf(N, ANF) }, [anf(ANF)], nodes_anfs(Ns).
pairs([]) --> [].
pairs([V|Vs]) --> pairs_(Vs, V), pairs(Vs).
pairs_([], _) --> [].
pairs_([B|Bs], A) --> [A-B], pairs_(Bs, A).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Set the Prolog flag clpb_residuals to bdd to obtain the BDD nodes
as residuals. Note that they cannot be used as regular goals.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
nodes([]) --> [].
nodes([Node|Nodes]) -->
{ node_var_low_high(Node, Var0, Low, High),
var_or_atom(Var0, Var),
maplist(node_projection, [Node,High,Low], [ID,HID,LID]),
var_index(Var0, VI) },
[ID-(v(Var,VI) -> HID ; LID)],
nodes(Nodes).
node_projection(Node, Projection) :-
node_id(Node, ID),
( integer(ID) -> Projection = node(ID)
; Projection = ID
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
By default, residual goals are sat/1 calls of the remaining formulas,
using (mostly) algebraic normal form.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
sats([]) --> [].
sats([A|As]) --> [clpb:sat(A)], sats(As).
booleans([]) --> [].
booleans([B|Bs]) --> boolean(B), { del_clpb(B) }, booleans(Bs).
boolean(Var) -->
( { get_attr(Var, clpb_omit_boolean, true) } -> []
; [clpb:sat(Var =:= Var)]
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Relate a formula to its algebraic normal form (ANF).
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
formula_anf(Formula0, ANF) :-
parse_sat(Formula0, Formula),
sat_bdd(Formula, Node),
node_anf(Node, ANF).
node_anf(Node, ANF) :-
node_xors(Node, Xors0),
maplist(maplist(monotonic_variable), Xors0, Xors),
maplist(list_to_conjunction, Xors, Conjs),
( Conjs = [Var,C|Rest], clpb_var(Var) ->
foldl(xor, Rest, C, RANF),
ANF = (Var =\= RANF)
; Conjs = [One,Var,C|Rest], One == 1, clpb_var(Var) ->
foldl(xor, Rest, C, RANF),
ANF = (Var =:= RANF)
; Conjs = [C|Cs],
foldl(xor, Cs, C, ANF)
).
monotonic_variable(Var0, Var) :-
( var(Var0), current_prolog_flag(clpb_monotonic, true) ->
Var = v(Var0)
; Var = Var0
).
clpb_var(Var) :- var(Var), !.
clpb_var(v(_)).
list_to_conjunction([], 1).
list_to_conjunction([L|Ls], Conj) :- foldl(and, Ls, L, Conj).
xor(A, B, B # A).
eq_1(V) :- V == 1.
node_xors(Node, Xors) :-
phrase(xors(Node), Xors0),
% we remove elements that occur an even number of times (A#A --> 0)
maplist(sort, Xors0, Xors1),
pairs_keys_values(Pairs0, Xors1, _),
keysort(Pairs0, Pairs),
group_pairs_by_key(Pairs, Groups),
exclude(even_occurrences, Groups, Odds),
pairs_keys(Odds, Xors2),
maplist(exclude(eq_1), Xors2, Xors).
even_occurrences(_-Ls) :- length(Ls, L), L mod 2 =:= 0.
xors(Node) -->
( { Node == 0 } -> []
; { Node == 1 } -> [[1]]
; { node_var_low_high(Node, Var0, Low, High),
var_or_atom(Var0, Var),
node_xors(Low, Ls0),
node_xors(High, Hs0),
maplist(with_var(Var), Ls0, Ls),
maplist(with_var(Var), Hs0, Hs) },
list(Ls0),
list(Ls),
list(Hs)
).
list([]) --> [].
list([L|Ls]) --> [L], list(Ls).
with_var(Var, Ls, [Var|Ls]).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Global variables for unique node and variable IDs and atoms.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
make_clpb_var('$clpb_next_var') :- nb_setval('$clpb_next_var', 0).
make_clpb_var('$clpb_next_node') :- nb_setval('$clpb_next_node', 0).
make_clpb_var('$clpb_atoms') :-
empty_assoc(E),
nb_setval('$clpb_atoms', E).
:- multifile user:exception/3.
user:exception(undefined_global_variable, Name, retry) :-
make_clpb_var(Name), !.
clpb_next_id(Var, ID) :-
b_getval(Var, ID),
Next is ID + 1,
b_setval(Var, Next).
clpb_atom_var(Atom, Var) :-
b_getval('$clpb_atoms', A0),
( get_assoc(Atom, A0, Var) -> true
; put_attr(Var, clpb_atom, Atom),
put_attr(Var, clpb_omit_boolean, true),
put_assoc(Atom, A0, Var, A),
b_setval('$clpb_atoms', A)
).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
The variable attributes below are not used as constraints by this
library. Project remaining attributes to empty lists of residuals.
Because accessing these hooks is basically a cross-module call, we
must declare them public.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
:- public
clpb_hash:attr_unify_hook/2,
clpb_bdd:attribute_goals//1,
clpb_hash:attribute_goals//1,
clpb_omit_boolean:attr_unify_hook/2,
clpb_omit_boolean:attribute_goals//1,
clpb_atom:attr_unify_hook/2,
clpb_atom:attribute_goals//1.
clpb_hash:attr_unify_hook(_,_). % this unification is always admissible
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
If a universally quantified variable is unified to a Boolean value,
it indicates that the formula does not hold for the other value, so
it is false.
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
clpb_atom:attr_unify_hook(_, _) :- false.
clpb_omit_boolean:attr_unify_hook(_,_).
clpb_bdd:attribute_goals(_) --> [].
clpb_hash:attribute_goals(_) --> [].
clpb_omit_boolean:attribute_goals(_) --> [].
clpb_atom:attribute_goals(_) --> [].
% clpb_hash:attribute_goals(Var) -->
% { get_attr(Var, clpb_hash, Assoc),
% assoc_to_list(Assoc, List0),
% maplist(node_portray, List0, List) }, [Var-List].
% node_portray(Key-Node, Key-Node-ite(Var,High,Low)) :-
% node_var_low_high(Node, Var, Low, High).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Messages
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
:- multifile prolog:message//1.
prolog:message(clpb(bounded)) -->
['Using CLP(B) with bounded arithmetic may yield wrong results.'-[]].
warn_if_bounded_arithmetic :-
( current_prolog_flag(bounded, true) ->
print_message(warning, clpb(bounded))
; true
).
:- initialization(warn_if_bounded_arithmetic).
/* - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
Sanbox declarations
- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - */
:- multifile
sandbox:safe_global_variable/1,
sandbox:safe_primitive/1.
sandbox:safe_global_variable('$clpb_next_var').
sandbox:safe_global_variable('$clpb_next_node').
sandbox:safe_global_variable('$clpb_atoms').
sandbox:safe_primitive(set_prolog_flag(clpb_residuals, _)).
| josd/eye | eye-wasm/swipl-wasm/home/library/clp/clpb.pl | Perl | mit | 66,052 |
package Module::Plan::Lite;
=pod
=head1 NAME
Module::Plan::Lite - Lite installation scripts for third-party modules
=head1 SYNOPSIS
The following is the contents of your default.pip file.
Module::Plan::Lite
# Everything in the plan file is installed in order
# Supported file forms
Install-This-First-1.00.tar.gz
Install-This-Second.1.31.tar.gz
extensions/This-This-0.02.tar.gz
/absolute/Module-Location-4.12.tar.gz
# Supported URI types
ftp://foo.com/pip-0.13.tar.gz
http://foo.com/pip-0.13.tar.gz
# Support for PAR installation and conventions
http://foo.com/DBI-1.37-MSWin32-5.8.0.par
http://foo.com/DBI-1.37
cpan://SMUELLER/PAR-Packer-0.975
=cut
use strict;
use URI ();
use Module::Plan::Base ();
use vars qw{$VERSION @ISA};
BEGIN {
$VERSION = '1.19';
@ISA = 'Module::Plan::Base';
}
#####################################################################
# Constructor
sub new {
my $class = shift;
my $self = $class->SUPER::new(@_);
# Parsing here isn't the best, but this is Lite after all
foreach ( $self->lines ) {
# Strip whitespace and comments
next if /^\s*(?:\#|$)/;
# Create the URI
my $uri = URI->new_abs( $_, $self->p5i_uri );
unless ( $uri ) {
croak("Failed to get the URI for $_");
}
# Add the uri
$self->add_uri( $uri );
}
$self;
}
sub fetch {
my $self = shift;
# Download the needed modules
foreach my $name ( $self->names ) {
next if $self->{dists}->{$name};
$self->_fetch_uri($name);
}
return 1;
}
sub run {
my $self = shift;
# Download the needed modules
foreach my $name ( $self->names ) {
next if $name =~ /(\.par|[\d.]+)$/;
next if $self->{dists}->{$name};
$self->_fetch_uri($name);
}
# Inject them into CPAN and install
foreach my $name ( $self->names ) {
# Install via PAR::Dist
if ( $name =~ /(\.par|[\d.]+)$/ ) {
$self->_par_install($name);
next;
}
# Install via CPAN.pm
$self->_cpan_inject($name);
$self->_cpan_install($name);
}
return 1;
}
1;
=pod
=head1 SUPPORT
See the main L<pip> module for support information.
=head1 AUTHORS
Adam Kennedy E<lt>adamk@cpan.orgE<gt>
=head1 SEE ALSO
L<pip>, L<Module::Plan>
=head1 COPYRIGHT
Copyright 2006 - 2010 Adam Kennedy.
This program is free software; you can redistribute
it and/or modify it under the same terms as Perl itself.
The full text of the license can be found in the
LICENSE file included with this module.
=cut
| Dokaponteam/ITF_Project | xampp/perl/vendor/lib/Module/Plan/Lite.pm | Perl | mit | 2,460 |
=head1 NAME
XML::DOM::Node - Super class of all nodes in XML::DOM
=head1 DESCRIPTION
XML::DOM::Node is the super class of all nodes in an XML::DOM document.
This means that all nodes that subclass XML::DOM::Node also inherit all
the methods that XML::DOM::Node implements.
=head2 GLOBAL VARIABLES
=over 4
=item @NodeNames
The variable @XML::DOM::Node::NodeNames maps the node type constants to strings.
It is used by XML::DOM::Node::getNodeTypeName.
=back
=head2 METHODS
=over 4
=item getNodeType
Return an integer indicating the node type. See XML::DOM constants.
=item getNodeName
Return a property or a hardcoded string, depending on the node type.
Here are the corresponding functions or values:
Attr getName
AttDef getName
AttlistDecl getName
CDATASection "#cdata-section"
Comment "#comment"
Document "#document"
DocumentType getNodeName
DocumentFragment "#document-fragment"
Element getTagName
ElementDecl getName
EntityReference getEntityName
Entity getNotationName
Notation getName
ProcessingInstruction getTarget
Text "#text"
XMLDecl "#xml-declaration"
B<Not In DOM Spec>: AttDef, AttlistDecl, ElementDecl and XMLDecl were added for
completeness.
=item getNodeValue and setNodeValue (value)
Returns a string or undef, depending on the node type. This method is provided
for completeness. In other languages it saves the programmer an upcast.
The value is either available thru some other method defined in the subclass, or
else undef is returned. Here are the corresponding methods:
Attr::getValue, Text::getData, CDATASection::getData, Comment::getData,
ProcessingInstruction::getData.
=item getParentNode and setParentNode (parentNode)
The parent of this node. All nodes, except Document,
DocumentFragment, and Attr may have a parent. However, if a
node has just been created and not yet added to the tree, or
if it has been removed from the tree, this is undef.
=item getChildNodes
A NodeList that contains all children of this node. If there
are no children, this is a NodeList containing no nodes. The
content of the returned NodeList is "live" in the sense that,
for instance, changes to the children of the node object that
it was created from are immediately reflected in the nodes
returned by the NodeList accessors; it is not a static
snapshot of the content of the node. This is true for every
NodeList, including the ones returned by the
getElementsByTagName method.
NOTE: this implementation does not return a "live" NodeList for
getElementsByTagName. See L<CAVEATS>.
When this method is called in a list context, it returns a regular perl list
containing the child nodes. Note that this list is not "live". E.g.
@list = $node->getChildNodes; # returns a perl list
$nodelist = $node->getChildNodes; # returns a NodeList (object reference)
for my $kid ($node->getChildNodes) # iterate over the children of $node
=item getFirstChild
The first child of this node. If there is no such node, this returns undef.
=item getLastChild
The last child of this node. If there is no such node, this returns undef.
=item getPreviousSibling
The node immediately preceding this node. If there is no such
node, this returns undef.
=item getNextSibling
The node immediately following this node. If there is no such node, this returns
undef.
=item getAttributes
A NamedNodeMap containing the attributes (Attr nodes) of this node
(if it is an Element) or undef otherwise.
Note that adding/removing attributes from the returned object, also adds/removes
attributes from the Element node that the NamedNodeMap came from.
=item getOwnerDocument
The Document object associated with this node. This is also
the Document object used to create new nodes. When this node
is a Document this is undef.
=item insertBefore (newChild, refChild)
Inserts the node newChild before the existing child node
refChild. If refChild is undef, insert newChild at the end of
the list of children.
If newChild is a DocumentFragment object, all of its children
are inserted, in the same order, before refChild. If the
newChild is already in the tree, it is first removed.
Return Value: The node being inserted.
DOMExceptions:
=over 4
=item * HIERARCHY_REQUEST_ERR
Raised if this node is of a type that does not allow children of the type of
the newChild node, or if the node to insert is one of this node's ancestors.
=item * WRONG_DOCUMENT_ERR
Raised if newChild was created from a different document than the one that
created this node.
=item * NO_MODIFICATION_ALLOWED_ERR
Raised if this node is readonly.
=item * NOT_FOUND_ERR
Raised if refChild is not a child of this node.
=back
=item replaceChild (newChild, oldChild)
Replaces the child node oldChild with newChild in the list of
children, and returns the oldChild node. If the newChild is
already in the tree, it is first removed.
Return Value: The node replaced.
DOMExceptions:
=over 4
=item * HIERARCHY_REQUEST_ERR
Raised if this node is of a type that does not allow children of the type of
the newChild node, or it the node to put in is one of this node's ancestors.
=item * WRONG_DOCUMENT_ERR
Raised if newChild was created from a different document than the one that
created this node.
=item * NO_MODIFICATION_ALLOWED_ERR
Raised if this node is readonly.
=item * NOT_FOUND_ERR
Raised if oldChild is not a child of this node.
=back
=item removeChild (oldChild)
Removes the child node indicated by oldChild from the list of
children, and returns it.
Return Value: The node removed.
DOMExceptions:
=over 4
=item * NO_MODIFICATION_ALLOWED_ERR
Raised if this node is readonly.
=item * NOT_FOUND_ERR
Raised if oldChild is not a child of this node.
=back
=item appendChild (newChild)
Adds the node newChild to the end of the list of children of
this node. If the newChild is already in the tree, it is
first removed. If it is a DocumentFragment object, the entire contents of
the document fragment are moved into the child list of this node
Return Value: The node added.
DOMExceptions:
=over 4
=item * HIERARCHY_REQUEST_ERR
Raised if this node is of a type that does not allow children of the type of
the newChild node, or if the node to append is one of this node's ancestors.
=item * WRONG_DOCUMENT_ERR
Raised if newChild was created from a different document than the one that
created this node.
=item * NO_MODIFICATION_ALLOWED_ERR
Raised if this node is readonly.
=back
=item hasChildNodes
This is a convenience method to allow easy determination of
whether a node has any children.
Return Value: 1 if the node has any children, 0 otherwise.
=item cloneNode (deep)
Returns a duplicate of this node, i.e., serves as a generic
copy constructor for nodes. The duplicate node has no parent
(parentNode returns undef.).
Cloning an Element copies all attributes and their values,
including those generated by the XML processor to represent
defaulted attributes, but this method does not copy any text
it contains unless it is a deep clone, since the text is
contained in a child Text node. Cloning any other type of
node simply returns a copy of this node.
Parameters:
I<deep> If true, recursively clone the subtree under the specified node.
If false, clone only the node itself (and its attributes, if it is an Element).
Return Value: The duplicate node.
=item normalize
Puts all Text nodes in the full depth of the sub-tree
underneath this Element into a "normal" form where only
markup (e.g., tags, comments, processing instructions, CDATA
sections, and entity references) separates Text nodes, i.e.,
there are no adjacent Text nodes. This can be used to ensure
that the DOM view of a document is the same as if it were
saved and re-loaded, and is useful when operations (such as
XPointer lookups) that depend on a particular document tree
structure are to be used.
B<Not In DOM Spec>: In the DOM Spec this method is defined in the Element and
Document class interfaces only, but it doesn't hurt to have it here...
=item getElementsByTagName (name [, recurse])
Returns a NodeList of all descendant elements with a given
tag name, in the order in which they would be encountered in
a preorder traversal of the Element tree.
Parameters:
I<name> The name of the tag to match on. The special value "*" matches all tags.
I<recurse> Whether it should return only direct child nodes (0) or any descendant that matches the tag name (1). This argument is optional and defaults to 1. It is not part of the DOM spec.
Return Value: A list of matching Element nodes.
NOTE: this implementation does not return a "live" NodeList for
getElementsByTagName. See L<CAVEATS>.
When this method is called in a list context, it returns a regular perl list
containing the result nodes. E.g.
@list = $node->getElementsByTagName("tag"); # returns a perl list
$nodelist = $node->getElementsByTagName("tag"); # returns a NodeList (object ref.)
for my $elem ($node->getElementsByTagName("tag")) # iterate over the result nodes
=back
=head2 Additional methods not in the DOM Spec
=over 4
=item getNodeTypeName
Return the string describing the node type.
E.g. returns "ELEMENT_NODE" if getNodeType returns ELEMENT_NODE.
It uses @XML::DOM::Node::NodeNames.
=item toString
Returns the entire subtree as a string.
=item printToFile (filename)
Prints the entire subtree to the file with the specified filename.
Croaks: if the file could not be opened for writing.
=item printToFileHandle (handle)
Prints the entire subtree to the file handle.
E.g. to print to STDOUT:
$node->printToFileHandle (\*STDOUT);
=item print (obj)
Prints the entire subtree using the object's print method. E.g to print to a
FileHandle object:
$f = new FileHandle ("file.out", "w");
$node->print ($f);
=item getChildIndex (child)
Returns the index of the child node in the list returned by getChildNodes.
Return Value: the index or -1 if the node is not found.
=item getChildAtIndex (index)
Returns the child node at the specifed index or undef.
=item addText (text)
Appends the specified string to the last child if it is a Text node, or else
appends a new Text node (with the specified text.)
Return Value: the last child if it was a Text node or else the new Text node.
=item dispose
Removes all circular references in this node and its descendants so the
objects can be claimed for garbage collection. The objects should not be used
afterwards.
=item setOwnerDocument (doc)
Sets the ownerDocument property of this node and all its children (and
attributes etc.) to the specified document.
This allows the user to cut and paste document subtrees between different
XML::DOM::Documents. The node should be removed from the original document
first, before calling setOwnerDocument.
This method does nothing when called on a Document node.
=item isAncestor (parent)
Returns 1 if parent is an ancestor of this node or if it is this node itself.
=item expandEntityRefs (str)
Expands all the entity references in the string and returns the result.
The entity references can be character references (e.g. "{" or "ῂ"),
default entity references (""", ">", "<", "'" and "&") or
entity references defined in Entity objects as part of the DocumentType of
the owning Document. Character references are expanded into UTF-8.
Parameter entity references (e.g. %ent;) are not expanded.
=item to_sax ( %HANDLERS )
E.g.
$node->to_sax (DocumentHandler => $my_handler,
Handler => $handler2 );
%HANDLERS may contain the following handlers:
=over 4
=item * DocumentHandler
=item * DTDHandler
=item * EntityResolver
=item * Handler
Default handler when one of the above is not specified
=back
Each XML::DOM::Node generates the appropriate SAX callbacks (for the
appropriate SAX handler.) Different SAX handlers can be plugged in to
accomplish different things, e.g. L<XML::Checker> would check the node
(currently only Document and Element nodes are supported), L<XML::Handler::BuildDOM>
would create a new DOM subtree (thereby, in essence, copying the Node)
and in the near future, XML::Writer could print the node.
All Perl SAX related work is still in flux, so this interface may change a
little.
See PerlSAX for the description of the SAX interface.
=item check ( [$checker] )
See descriptions for check() in L<XML::DOM::Document> and L<XML::DOM::Element>.
=item xql ( @XQL_OPTIONS )
To use the xql method, you must first I<use> L<XML::XQL> and L<XML::XQL::DOM>.
This method is basically a shortcut for:
$query = new XML::XQL::Query ( @XQL_OPTIONS );
return $query->solve ($node);
If the first parameter in @XQL_OPTIONS is the XQL expression, you can leave off
the 'Expr' keyword, so:
$node->xql ("doc//elem1[@attr]", @other_options);
is identical to:
$node->xql (Expr => "doc//elem1[@attr]", @other_options);
See L<XML::XQL::Query> for other available XQL_OPTIONS.
See L<XML::XQL> and L<XML::XQL::Tutorial> for more info.
=item isHidden ()
Whether the node is hidden.
See L<Hidden Nodes|XML::DOM/_Hidden_Nodes_> for details.
=back
| electric-cloud/EC-EC2 | src/main/resources/project/lib/XML/DOM/Node.pod | Perl | apache-2.0 | 13,079 |
If you read this file _as_is_, just ignore the funny characters you
see. It is written in the POD format (see pod/perlpod.pod) which is
specifically designed to be readable as is.
=head1 NAME
README.riscos - Perl version 5 for RISC OS
=head1 DESCRIPTION
This document gives instructions for building Perl for RISC OS. It is
complicated by the need to cross compile. There is a binary version of
perl available from L<http://www.cp15.org/perl/> which you may wish to
use instead of trying to compile it yourself.
=head1 BUILD
You need an installed and working gccsdk cross compiler
L<http://gccsdk.riscos.info/> and REXEN
L<http://www.cp15.org/programming/>
Firstly, copy the source and build a native copy of perl for your host
system.
Then, in the source to be cross compiled:
=over 4
=item
$ ./Configure
=item
Select the riscos hint file. The default answers for the rest of the
questions are usually sufficient.
Note that, if you wish to run Configure non-interactively (see the INSTALL
document for details), to have it select the correct hint file, you'll
need to provide the argument -Dhintfile=riscos on the Configure
command-line.
=item
$ make miniperl
=item
This should build miniperl and then fail when it tries to run it.
=item
Copy the miniperl executable from the native build done earlier to
replace the cross compiled miniperl.
=item
$ make
=item
This will use miniperl to complete the rest of the build.
=back
=head1 AUTHOR
Alex Waugh <alex@alexwaugh.com>
| leighpauls/k2cro4 | third_party/cygwin/lib/perl5/5.10/pods/perlriscos.pod | Perl | bsd-3-clause | 1,511 |
=pod
=head1 NAME
SwishSpiderConfig.pl - Sample swish-e spider configuration
=head1 DESCRIPTION
This is a sample configuration file for the spider.pl program provided
with the swish-e distribution.
A spider.pl configuration file is not required as spider.pl has reasonable
defaults. In fact, it's recommended that you only use a spider.pl
configuration file *after* successfully indexing with spider.pl's default
settings. To use the default settings run the spider using the special magical
word "default" as the first parameter:
spider.pl default <URL> [...]
If no parameters are passed to spider.pl then spider.pl will look for a file
called F<SwishSpiderConfig.pl> in the current directory.
A spider.pl config file is useful when you need to change the default
behavior of the way spider.pl operates. For example, you may wish to index
just part of your site, or tell the spider that example.com,
www.example.com and web.example.com are all the same site.
The configuration file is actually Perl code. This makes it possible to do
reasonably complicated things directly within the config file. For example,
parse HTML content into sections and index each section as a separate "document"
allowing searches to be targeted.
The spider.pl config file must set an array called "@servers".
The "@servers" array holds one or more descriptions of a server
to index. In other words, you may define multiple configurations to index
different servers (or different parts of the same server) and group then
together in the @servers array.
Each server description is contained in a single Perl hash.
For example, to index two sites define two Perl hashes:
my %main_site = (
base_url => 'http://example.com',
same_hosts => 'www.example.com',
email => 'admin@example.com',
);
my %news_site = (
base_url => 'http://news.example.com',
email => 'admin@example.com',
);
@servers = ( \%main_site, \%news_site );
1;
The above defines two Perl hashes (%main_site and %news_site) and then places
a *reference* (the backslash before the name of the hash) to each of those
hashes in the @servers array. The "1;" at the end is required at the end
of the file (Perl must see a true value at the end of the file).
Let's start out with a simple example. As of Swish-e 2.4.3 there's a new option
that allow you to merge your config file with the default config file used when
you specify "default" as the first parameter to F<spider.pl>.
So, say you only wanted to change the limit the number of files
indexed.
@servers = (
{
use_default_config => 1, # same as using 'default'
max_files => 100,
},
);
1;
That last number one is important, by the way. It keeps Perl happy.
Below are two example configurations, but included in the same @servers
array (as anonymous Perl hashes). They both have the skip flag set which
disables their use (this is just an example after all).
The first is a simple example of a few parameters, and shows the use of
a "test_url" function to limit what files are fetched from the server (in
this example only .html files are fetched).
The second example is slightly more complex and makes use the the
SWISH::Filter module to filter documents (such as PDF and MS Word).
Note: The examples below are outside "pod" documentation -- if you are reading
this with the "perldoc" command you will not see the examples below.
=cut
# @servers is a list of hashes -- so you can spider more than one site
# in one run (or different parts of the same tree)
# The main program expects to use this array (@SwishSpiderConfig::servers).
### Please do not spider these examples -- spider your own servers, with permission ####
#=============================================================================
# This is a simple example, that includes a few limits
# Only files ending in .html will be spidered (probably a bit too restrictive)
@ servers = ({
skip => 1, # skip spidering this server
base_url => 'http://www.swish-e.org/index.html',
same_hosts => [ qw/swish-e.org/ ],
agent => 'swish-e spider http://swish-e.org/',
email => 'swish@domain.invalid',
# limit to only .html files
test_url => sub { $_[0]->path =~ /\.html?$/ },
delay_sec => 2, # Delay in seconds between requests
max_time => 10, # Max time to spider in minutes
max_files => 100, # Max Unique URLs to spider
max_indexed => 20, # Max number of files to send to swish for indexing
keep_alive => 1, # enable keep alives requests
} );
1;
#=============================================================================
# This example just shows more settings, and makes use of the SWISH::Filter
# module for converting documents. Some sites require cookies, so this
# config enables spider.pl's use of cookies, and also enables MD5
# checksums to catch duplicate pages (i.e. if / and /index.html point
# to the same page).
# This example also only indexes the "docs" sub-tree of the swish-e
# site by checking the path of the URLs
# Let spider.pl setup SWISH::Filter
# These will be used in the config below
my ($filter_sub, $response_sub) = swish_filter();
@servers = ({
skip => 0, # Flag to disable spidering this host.
base_url => 'http://swish-e.org/current/docs/',
same_hosts => [ qw/www.swish-e.org/ ],
agent => 'swish-e spider http://swish-e.org/',
email => 'swish@domain.invalid',
keep_alive => 1, # Try to keep the connection open
max_time => 10, # Max time to spider in minutes
max_files => 20, # Max files to spider
delay_secs => 2, # Delay in seconds between requests
ignore_robots_file => 0, # Don't set that to one, unless you are sure.
use_cookies => 1, # True will keep cookie jar
# Some sites require cookies
# Requires HTTP::Cookies
use_md5 => 1, # If true, this will use the Digest::MD5
# module to create checksums on content
# This will very likely catch files
# with differet URLs that are the same
# content. Will trap / and /index.html,
# for example.
# This will generate A LOT of debugging information to STDOUT
debug => DEBUG_URL | DEBUG_SKIPPED | DEBUG_HEADERS,
# Here are hooks to callback routines to validate urls and responses
# Probably a good idea to use them so you don't try to index
# Binary data. Look at content-type headers!
test_url => \&test_url,
test_response => $response_sub,
filter_content => $filter_sub,
} );
1;
#---------------------- Public Functions ------------------------------
# Here are some examples of callback functions
#
#
# Use these to adjust skip/ignore based on filename/content-type
# Or to filter content (pdf -> text, for example)
#
# Remember to include the code references in the config as above.
#
#----------------------------------------------------------------------
# This subroutine lets you check a URL before requesting the
# document from the server
# return false to skip the link
sub test_url {
my ( $uri, $server ) = @_;
# return 1; # Ok to index/spider
# return 0; # No, don't index or spider;
# ignore any common image files
return if $uri->path =~ /\.(gif|jpg|jpeg|png)?$/;
# make sure that the path is limited to the docs path
return $uri->path =~ m[^/current/docs/];
}
## Here's an example of a "test_response" callback. You would
# add it to your config like:
#
# test_response => \&test_response_sub,
#
# This routine is called when the *first* block of data comes back
# from the server. If you return false no more content will be read
# from the server. $response is a HTTP::Response object.
# It's useful for checking the content type of documents.
#
# For example, say we have a lot of audio files linked our our site that we
# do not want to index. But we also have a lot of image files that we want
# to index the path name only.
sub test_response_sub {
my ( $uri, $server, $response ) = @_;
return if $response->content_type =~ m[^audio/];
# In this example set the "no_contents" flag for
$server->{no_contents}++ unless $response->content_type =~ m[^image/];
return 1; # ok to index and spider
}
# Dont' forget to return a true value at the end...
1;
| uddhab/swish-e | swish-e-2.4.7/prog-bin/SwishSpiderConfig.pl | Perl | mit | 8,976 |
package FuseBead::From::PNG;
use strict;
use warnings;
BEGIN {
$FuseBead::From::PNG::VERSION = '0.03';
}
use Image::PNG::Libpng qw(:all);
use Image::PNG::Const qw(:all);
use FuseBead::From::PNG::Const qw(:all);
use FuseBead::From::PNG::Bead;
use Data::Debug;
use Memoize;
memoize('_find_bead_color', INSTALL => '_find_bead_color_fast');
sub new {
my $class = shift;
my %args = ref $_[0] eq 'HASH' ? %{$_[0]} : @_;
my $hash = {};
$hash->{'filename'} = $args{'filename'};
$hash->{'unit_size'} = $args{'unit_size'} || 1;
# mirror plans compared to image by default
$hash->{'mirror'} = defined $args{'mirror'} ? $args{'mirror'}
? 1
: 0
: 1;
# White list default
$hash->{'whitelist'} = ($args{'whitelist'} && ref($args{'whitelist'}) eq 'ARRAY' && scalar(@{$args{'whitelist'}}) > 0) ? $args{'whitelist'} : undef;
# Black list default
$hash->{'blacklist'} = ($args{'blacklist'} && ref($args{'blacklist'}) eq 'ARRAY' && scalar(@{$args{'blacklist'}}) > 0) ? $args{'blacklist'} : undef;
my $self = bless ($hash, ref ($class) || $class);
return $self;
}
sub bead_dimensions {
my $self = shift;
return $self->{'bead_dimensions'} ||= do {
my $hash = {};
for my $type (qw/imperial metric/) {
my $bead_diameter =
FuseBead::From::PNG::Const->BEAD_DIAMETER
* ($type eq 'imperial' ? FuseBead::From::PNG::Const->MILLIMETER_TO_INCH : 1);
$hash->{$type} = {
bead_diameter => $bead_diameter,
};
}
$hash;
};
}
sub bead_colors {
my $self = shift;
return $self->{'bead_colors'} ||= do {
my $hash = {};
for my $color ( BEAD_COLORS ) {
my ($n_key, $hex_key, $r_key, $g_key, $b_key) = (
$color . '_NAME',
$color . '_HEX_COLOR',
$color . '_RGB_COLOR_RED',
$color . '_RGB_COLOR_GREEN',
$color . '_RGB_COLOR_BLUE',
);
no strict 'refs';
$hash->{ $color } = {
'cid' => $color,
'name' => FuseBead::From::PNG::Const->$n_key,
'hex_color' => FuseBead::From::PNG::Const->$hex_key,
'rgb_color' => [
FuseBead::From::PNG::Const->$r_key,
FuseBead::From::PNG::Const->$g_key,
FuseBead::From::PNG::Const->$b_key,
],
};
}
$hash;
};
}
sub beads {
my $self = shift;
return $self->{'beads'} ||= do {
my $hash = {};
for my $color ( BEAD_COLORS ) {
my $bead = FuseBead::From::PNG::Bead->new( color => $color );
$hash->{ $bead->identifier } = $bead;
}
$hash;
};
}
sub png {
my $self = shift;
return $self->{'png'} ||= do {
my $png = read_png_file($self->{'filename'}, transforms => PNG_TRANSFORM_STRIP_ALPHA);
$png;
};
};
sub png_info {
my $self = shift;
return $self->{'png_info'} ||= $self->png->get_IHDR;
}
sub bead_row_length {
my $self = shift;
return $self->{'bead_row_length'} ||= $self->png_info->{'width'} / $self->{'unit_size'};
}
sub bead_col_height {
my $self = shift;
return $self->{'bead_col_height'} ||= $self->png_info->{'height'} / $self->{'unit_size'};
}
sub process {
my $self = shift;
my %args = ref $_[0] eq 'HASH' ? %{$_[0]} : @_;
my $tally = {
beads => {},
plan => [],
};
if ($self->{'filename'}) {
my @blocks = $self->_png_blocks_of_color;
my @units = $self->_approximate_bead_colors( blocks => \@blocks );
my @beads = $self->_generate_bead_list(units => \@units);
$tally->{'plan'} = [ map { $_->flatten } @beads ];
my %list;
for my $bead (@beads) {
if(! exists $list{ $bead->identifier }) {
$list{ $bead->identifier } = $bead->flatten;
delete $list{ $bead->identifier }{'meta'}; # No need for meta in bead list
$list{ $bead->identifier }{'quantity'} = 1;
}
else {
$list{ $bead->identifier }{'quantity'}++;
}
}
$tally->{'beads'} = \%list;
$tally->{'info'} = $self->_plan_info();
}
if ($args{'view'}) {
my $view = $args{'view'};
my $module = "FuseBead::From::PNG::View::$view";
$tally = eval {
(my $file = $module) =~ s|::|/|g;
require $file . '.pm';
$module->new($self)->print($tally);
};
die "Failed to format as a view ($view). $@" if $@;
}
return $tally;
}
sub mirror {
my $self = shift;
my $arg = shift;
if (defined $arg) {
$self->{'mirror'} = $arg ? 1 : 0;
}
return $self->{'mirror'};
}
sub whitelist { shift->{'whitelist'} }
sub has_whitelist {
my $self = shift;
my $allowed = shift; # arrayref listing filters we can use
my $found = 0;
for my $filter ( values %{ $self->_list_filters($allowed) } ) {
$found += scalar( grep { /$filter/ } @{ $self->whitelist || [] } );
}
return $found;
}
sub is_whitelisted {
my $self = shift;
my $val = shift;
my $allowed = shift; # arrayref listing filters we can use
return 1 if ! $self->has_whitelist($allowed); # return true if there is no whitelist
for my $entry ( @{ $self->whitelist || [] } ) {
for my $filter( values %{ $self->_list_filters($allowed) } ) {
next unless $entry =~ /$filter/; # if there is at least a letter at the beginning then this entry has a color we can check
my $capture = $entry;
$capture =~ s/$filter/$1/;
return 1 if $val eq $capture;
}
}
return 0; # value is not in whitelist
}
sub blacklist { shift->{'blacklist'} }
sub has_blacklist {
my $self = shift;
my $allowed = shift; # optional filter restriction
my $found = 0;
for my $filter ( values %{ $self->_list_filters($allowed) } ) {
$found += scalar( grep { /$filter/ } @{ $self->blacklist || [] } );
}
return $found;
}
sub is_blacklisted {
my $self = shift;
my $val = shift;
my $allowed = shift; # optional filter restriction
return 0 if ! $self->has_blacklist($allowed); # return false if there is no blacklist
for my $entry ( @{ $self->blacklist || [] } ) {
for my $filter( values %{ $self->_list_filters($allowed) } ) {
next unless $entry =~ /$filter/; # if there is at least a letter at the beginning then this entry has a color we can check
my $capture = $1 || $entry;
return 1 if $val eq $capture;
}
}
return 0; # value is not in blacklist
}
sub _png_blocks_of_color {
my $self = shift;
my %args = ref $_[0] eq 'HASH' ? %{$_[0]} : @_;
my @blocks;
return @blocks unless $self->{'filename'}; # No file, no blocks
my $pixel_bytecount = 3;
my $y = -1;
for my $pixel_row ( @{$self->png->get_rows} ) {
$y++;
next unless ($y % $self->{'unit_size'}) == 0;
my $row = $y / $self->{'unit_size'}; # get actual row of blocks we are current on
my @values = unpack 'C*', $pixel_row;
my $row_width = ( scalar(@values) / $pixel_bytecount ) / $self->{'unit_size'};
for (my $col = 0; $col < $row_width; $col++) {
my ($r, $g, $b) = (
$values[ ($self->{'unit_size'} * $pixel_bytecount * $col) ],
$values[ ($self->{'unit_size'} * $pixel_bytecount * $col) + 1 ],
$values[ ($self->{'unit_size'} * $pixel_bytecount * $col) + 2 ]
);
$blocks[ ($row * $row_width) + $col ] = {
r => $r,
g => $g,
b => $b,
};
}
}
return @blocks;
}
sub _color_score {
my $self = shift;
my ($c1, $c2) = @_;
return abs( $c1->[0] - $c2->[0] ) + abs( $c1->[1] - $c2->[1] ) + abs( $c1->[2] - $c2->[2] );
}
sub _find_bead_color {
my $self = shift;
my $rgb = [ @_ ];
my @optimal_color =
map { $_->{'cid'} }
sort { $a->{'score'} <=> $b->{'score'} }
map {
+{
cid => $_->{'cid'},
score => $self->_color_score($rgb, $_->{'rgb_color'}),
};
}
values %{ $self->bead_colors };
my ($optimal_color) = grep {
$self->is_whitelisted( $_, 'color' )
&& ! $self->is_blacklisted( $_, 'color' )
} @optimal_color; # first color in list that passes whitelist and blacklist should be the optimal color for tested block
return $optimal_color;
}
sub _approximate_bead_colors {
my $self = shift;
my %args = ref $_[0] eq 'HASH' ? %{ $_[0] } : @_;
die 'blocks not valid' unless $args{'blocks'} && ref( $args{'blocks'} ) eq 'ARRAY';
my @colors;
for my $block (@{ $args{'blocks'} }) {
push @colors, $self->_find_bead_color_fast( $block->{'r'}, $block->{'g'}, $block->{'b'} );
}
return @colors;
}
sub _generate_bead_list {
my $self = shift;
my %args = ref $_[0] eq 'HASH' ? %{ $_[0] } : @_;
die 'units not valid' unless $args{'units'} && ref( $args{'units'} ) eq 'ARRAY';
my @beads = $self->_bead_list($args{'units'});
return @beads;
}
sub _bead_list {
my $self = shift;
my @units = ref($_[0]) eq 'ARRAY' ? @{ $_[0] } : @_;
my $unit_count = scalar(@units);
my $row_width = $self->bead_row_length;
my $bead_ref = -1; # artificial auto-incremented id
my @beads;
for (my $y = 0; $y < ($unit_count / $row_width); $y++) {
my @row = splice @units, 0, $row_width;
my $x = 0;
# mirror each row as it is set in the plan if we are mirroring the output
@row = reverse @row if $self->mirror;
for my $color ( @row ) {
push @beads, FuseBead::From::PNG::Bead->new(
color => $color,
meta => {
x => $x,
y => $y,
ref => ++$bead_ref,
},
);
$x++;
}
}
return @beads;
}
sub _list_filters {
my $self = shift;
my $allowed = $_[0] && ref($_[0]) eq 'ARRAY' ? $_[0]
: ($_[0]) ? [ shift ]
: []; # optional filter restriction
my $filters = {
color => qr{^([A-Z_]+)$}i,
bead => qr{^([A-Z_]+)$}i,
};
$filters = +{ map { $_ => $filters->{$_} } @$allowed } if scalar @$allowed;
return $filters;
}
sub _plan_info {
my $self = shift;
my %info;
for my $type (qw/metric imperial/) {
$info{$type} = {
length => $self->bead_row_length * $self->bead_dimensions->{$type}->{'bead_diameter'},
height => $self->bead_col_height * $self->bead_dimensions->{$type}->{'bead_diameter'},
};
}
$info{'rows'} = $self->bead_row_length;
$info{'cols'} = $self->bead_col_height;
return \%info;
}
=pod
=head1 NAME
FuseBead::From::PNG - Convert PNGs into plans to build a two dimensional fuse bead replica.
=head1 SYNOPSIS
use FuseBead::From::PNG;
my $object = FuseBead::From::PNG;
$object->bead_tally();
=head1 DESCRIPTION
Convert a PNG into a block list and plans to build a fuse bead replica of the PNG. This is for projects that use fuse bead such as perler or hama.
The RGB values where obtained from Bead List with RGB Values (https://docs.google.com/spreadsheets/d/1f988o68HDvk335xXllJD16vxLBuRcmm3vg6U9lVaYpA/edit#gid=0).
Which was posted in the bead color subreddit beadsprites (https://www.reddit.com/r/beadsprites) under this post Bead List with RGB Values (https://www.reddit.com/r/beadsprites/comments/291495/bead_list_with_rgb_values/).
The generate_instructions.pl script under bin/ has been setup to optimally be used the 22k bucket of beads from Perler. (http://www.perler.com/22000-beads-multi-mix-_17000/17000.html)
$hash->{'filename'} = $args{'filename'};
$hash->{'unit_size'} = $args{'unit_size'} || 1;
# White list default
$hash->{'whitelist'} = ($args{'whitelist'} && ref($args{'whitelist'}) eq 'ARRAY' && scalar(@{$args{'whitelist'}}) > 0) ? $args{'whitelist'} : undef;
# Black list default
$hash->{'blacklist'} = ($args{'blacklist'} && ref($args{'blacklist'}) eq 'ARRAY' && scalar(@{$args{'blacklist'}}) > 0) ? $args{'blacklist'} : undef;
=head1 USAGE
=head2 new
Usage : ->new()
Purpose : Returns FuseBead::From::PNG object
Returns : FuseBead::From::PNG object
Argument :
filename - Optional. The file name of the PNG to process. Optional but if not provided, can't process the png.
e.g. filename => '/location/of/the.png'
unit_size - Optional. The size of pixels squared to determine a single unit of a bead. Defaults to 1.
e.g. unit_size => 2 # pixelated colors are 2x2 in size
whitelist - Optional. Array ref of colors, dimensions or color and dimensions that are allowed in the final plan output.
e.g. whitelist => [ 'BLACK', 'WHITE', '1x1x1', '1x2x1', '1x4x1', 'BLACK_1x6x1' ]
blacklist - Optional. Array ref of colors, dimensions or color and dimensions that are not allowed in the final plan output.
e.g. blacklist => [ 'RED', '1x10x1', '1x12x1', '1x16x1', 'BLUE_1x8x1' ]
Throws :
Comment :
See Also :
=head2 bead_dimensions
Usage : ->bead_dimensions()
Purpose : returns a hashref with bead dimension information in millimeters (metric) or inches (imperial)
Returns : hashref with bead dimension information, millimeters is default
Argument : $type - if set to imperial then dimension information is returned in inches
Throws :
Comment :
See Also :
=head2 bead_colors
Usage : ->bead_colors()
Purpose : returns bead color constants consolidated as a hash.
Returns : hashref with color constants keyed by the official color name in key form.
Argument :
Throws :
Comment :
See Also :
=head2 beads
Usage : ->beads()
Purpose : Returns a list of all possible bead beads
Returns : Hash ref with L<FuseBead::From::PNG::Bead> objects keyed by their identifier
Argument :
Throws :
Comment :
See Also :
=head2 png
Usage : ->png()
Purpose : Returns Image::PNG::Libpng object.
Returns : Returns Image::PNG::Libpng object. See L<Image::PNG::Libpng> for more details.
Argument :
Throws :
Comment :
See Also :
=head2 png_info
Usage : ->png_info()
Purpose : Returns png IHDR info from the Image::PNG::Libpng object
Returns : A hash of values containing information abou the png such as width and height. See get_IHDR in L<Image::PNG::Libpng> for more details.
Argument : filename => the PNG to load and part
unit_size => the pixel width and height of one unit, blocks are generally identified as Nx1 blocks where N is the number of units of the same color
Throws :
Comment :
See Also :
=head2 bead_row_length
Usage : ->bead_row_length()
Purpose : Return the width of one row of beads. Since a bead list is a single dimension array this is useful to figure out whict row a bead is on.
Returns : The length of a row of beads (image width / unit size)
Argument :
Throws :
Comment :
See Also :
=head2 bead_col_height
Usage : ->bead_col_height()
Purpose : Return the height in beads.
Returns : The height of a col of beads (image height / unit size)
Argument :
Throws :
Comment :
See Also :
=head2 process
Usage : ->process()
Purpose : Convert a provided PNG into a list of bead blocks that will allow building of a two dimensional bead replica.
Returns : Hashref containing information about particular bead beads found to be needed based on the provided PNG.
Also included is the build order for those beads.
Argument : view => 'a view' - optionally format the return data. options include: JSON and HTML
Throws :
Comment :
See Also :
=head2 mirror
Usage : ->mirror()
Purpose : Getter / Setter for the mirror option. Set to 1 (true) by default. This option will mirror the image when displaying it as plans. The reason is then the mirror of the image is what is placed on the peg board so that side can be ironed and, when turned over, the image is represented in it's proper orientation.
Returns : Either 1 or 0
Argument : a true or false value that will set whether the plans are mirrored to the image or not
Throws :
Comment :
See Also :
=head2 whitelist
Usage : ->whitelist()
Purpose : return any whitelist settings stored in this object
Returns : an arrayref of whitelisted colors and/or blocks, or undef
Argument :
Throws :
Comment :
See Also :
=head2 has_whitelist
Usage : ->has_whitelist(), ->has_whitelist($filter)
Purpose : return a true value if there is a whitelist with at least one entry in it based on the allowed filters, otherwise a false value is returned
Returns : 1 or 0
Argument : $filter - optional scalar containing the filter to restrict test to
Throws :
Comment :
See Also :
=head2 is_whitelisted
Usage : ->is_whitelisted($value), ->is_whitelisted($value, $filter)
Purpose : return a true if the value is whitelisted, otherwise false is returned
Returns : 1 or 0
Argument : $value - the value to test, $filter - optional scalar containing the filter to restrict test to
Throws :
Comment :
See Also :
=head2 blacklist
Usage : ->blacklist
Purpose : return any blacklist settings stored in this object
Returns : an arrayref of blacklisted colors and/or blocks, or undef
Argument :
Throws :
Comment :
See Also :
=head2 has_blacklist
Usage : ->has_blacklist(), ->has_whitelist($filter)
Purpose : return a true value if there is a blacklist with at least one entry in it based on the allowed filters, otherwise a false value is returned
Returns : 1 or 0
Argument : $filter - optional scalar containing the filter to restrict test to
Throws :
Comment :
See Also :
=head2 is_blacklisted
Usage : ->is_blacklisted($value), ->is_whitelisted($value, $filter)
Purpose : return a true if the value is blacklisted, otherwise false is returned
Returns : 1 or 0
Argument : $value - the value to test, $filter - optional scalar containing the filter to restrict test to
Throws :
Comment :
See Also :
=head2 _png_blocks_of_color
Usage : ->_png_blocks_of_color()
Purpose : Convert a provided PNG into a list of rgb values based on [row][color]. Size of blocks are determined by 'unit_size'
Returns : A list of hashes contain r, g and b values. e.g. ( { r => #, g => #, b => # }, { ... }, ... )
Argument :
Throws :
Comment :
See Also :
=head2 _color_score
Usage : ->_color_score()
Purpose : returns a score indicating the likeness of one color to another. The lower the number the closer the colors are to each other.
Returns : returns a positive integer score
Argument : $c1 - array ref with rgb color values in that order
$c2 - array ref with rgb color values in that order
Throws :
Comment :
See Also :
=head2 _find_bead_color
Usage : ->_find_bead_color
Purpose : given an rgb params, finds the optimal bead color
Returns : A bead color common name key that can then reference bead color information using L<FuseBead::From::PNG::bead_colors>
Argument : $r - the red value of a color
$g - the green value of a color
$b - the blue value of a color
Throws :
Comment : this subroutine is memoized as the name _find_bead_color_fast
See Also :
=head2 _approximate_bead_colors
Usage : ->_approximate_bead_colors()
Purpose : Generate a list of bead colors based on a list of blocks ( array of hashes containing rgb values )
Returns : A list of bead color common name keys that can then reference bead color information using L<FuseBead::From::PNG::bead_colors>
Argument :
Throws :
Comment :
See Also :
=head2 _generate_bead_list
Usage : ->_approximate_bead_colors()
Purpose : Generate a list of bead colors based on a list of blocks ( array of hashes containing rgb values ) for either knob orientation (calls _knob_forward_bead_list or _knob_up_bead_list)
Returns : A list of bead color common name keys that can then reference bead color information using L<FuseBead::From::PNG::bead_colors>
Argument :
Throws :
Comment :
See Also :
=head2 _bead_list
Usage : ->_bead_list()
Purpose : Generate a list of bead colors based on a list of blocks ( array of hashes containing rgb values ) for knob up orientation
Returns : A list of bead color common name keys that can then reference bead color information using L<FuseBead::From::PNG::bead_colors>
Argument :
Throws :
Comment :
See Also :
=head2 _list_filters
Usage : ->_list_filters()
Purpose : return whitelist/blacklist filters
Returns : an hashref of filters
Argument : an optional filter restriction to limit set of filters returned to just one
Throws :
Comment :
See Also :
=head1 BUGS
=head1 SUPPORT
=head1 AUTHOR
Travis Chase
CPAN ID: GAUDEON
gaudeon@cpan.org
https://github.com/gaudeon/FuseBead-From-Png
=head1 COPYRIGHT
This program is free software licensed under the...
The MIT License
The full text of the license can be found in the
LICENSE file included with this module.
=head1 SEE ALSO
perl(1).
=cut
1;
| gaudeon/FuseBead-From-PNG | lib/FuseBead/From/PNG.pm | Perl | mit | 21,924 |
package IsKernel::Infrastructure::FileUtilities;
use warnings;
use strict;
use v5.14;
use File::Spec::Functions;
use IsKernel::Infrastructure::StringHelper;
=pod
Description:
Creates a new object
Parameters:
None
Returns:
A reference to the object
=cut
sub new
{
(my $class) = @_;
my $self = {};
bless $self, $class;
return $self;
}
=pod
Description:
Verifies if a file is modifiable
Parameters:
path - the path to the file
Returns:
-1 - Error
1 - the file is ordinary, writable and readable
0 - the file is ordinary, but is not writable or not readable
2 - the file is not ordinary
=cut
sub is_ordinary_file
{
(my $self, my $path) = @_;
my $result = -1;
if( (-f $path) )
{
if( (-w $path) && (-r $path) )
{
$result = 1;
}
else
{
$result = 0;
}
}
else
{
$result = 2;
}
return $result;
}
=pod
Description:
Generates a unique filename in regards to the files in the specified directory
Parameters:
length - the length of the new filename
directory_path - the path to the directory
Returns:
An unique filename for the specified directory
=cut
sub create_random_filename
{
my $self = shift;
my $length = shift;
my $directory_path = shift;
my $generator = IsKernel::Infrastructure::StringHelper->new($length);
my $filename_guard = 1;
my $filename = undef;
my $new_path = undef;
while($filename_guard==1)
{
$filename = $generator->generate_random($length);
$new_path = catfile($directory_path, $filename);
if(!-e $new_path)
{
$filename_guard = 0;
}
}
return $new_path;
}
1; | iskernel/candy-antivirus | lib/IsKernel/Infrastructure/FileUtilities.pm | Perl | mit | 1,548 |
package SGN::Controller::List;
use Moose;
use URI::FromHash 'uri';
use Data::Dumper;
BEGIN { extends 'Catalyst::Controller'; }
sub list_details :Path('/list/details') :Args(0) {
my $self = shift;
my $c = shift;
my $list_id = $c->req->param('list_id');
if (! $c->user()) {
$c->res->redirect( uri( path => '/user/login', query => { goto_url => $c->req->uri->path_query } ) );
return;
}
my $list = CXGN::List->new( { dbh => $c->dbc->dbh, list_id=>$list_id });
$c->stash->{list_id} = $list_id;
$c->stash->{list_name} = $list->name;
$c->stash->{list_description} = $list->description;
$c->stash->{list_type} = $list->type;
$c->stash->{list_size} = $list->list_size;
$c->stash->{template} = '/list/list_details.mas';
}
1;
| solgenomics/sgn | lib/SGN/Controller/List.pm | Perl | mit | 785 |
#!/usr/bin/env perl
# This pings each of several instances, and should be run over a long period of
# time, to see if/when they crash. Start the instances on separate ports.
# Change the @instances table below to define which instances you want to ping,
# and how often.
use strict;
use warnings;
local $| = 1; # autoflush stdout
use open ':encoding(utf8)';
binmode STDOUT, ':utf8';
use LWP::UserAgent;
use HTTP::Request::Common qw{ POST };
use Getopt::Long;
use Data::Dumper;
my $ua = LWP::UserAgent->new();
$ua->timeout(10);
# This table defines which instances we'll ping, and how often (in seconds)
my @instances = (
{ port => 16001, freq => 30, },
#{ port => 16002, freq => 30, },
);
my $start_time = time();
while(1) {
sleep(1);
my $time_now = time();
foreach my $inst (@instances) {
my $last = exists($inst->{last}) ? $inst->{last} : -1;
my $freq = $inst->{freq};
if ($time_now - $last > $freq) {
my $port = $inst->{port};
my $url = "http://localhost:$port/?type=tex\&q=n^2";
print "$time_now: $url: ";
my $response = $ua->get($url);
my $status = $response->is_success ? "success" : "error: " . $response->status_line;
print "$status\n";
$inst->{last} = $time_now;
}
}
}
| ncbi/render-math | examples/ping-test.pl | Perl | mit | 1,332 |
#! ./perl
#
# tests the Windows Script Host functionality
@list = ( 'ADOSample1.wsf',
'ADOSample2.wsf',
'args.wsf "ok 1" "ok 2"',
'helloworld.wsf',
'notepad.wsf',
'showenv.wsf',
'specialfolder.wsf' );
for my $item (@list) {
system ("CScript $item");
print "Press [ ENTER ] to continue\n";
<STDIN>;
}
| amidoimidazol/bio_info | Beginning Perl for Bioinformatics/eg/Windows Script Host/test.pl | Perl | mit | 339 |
# Copyrights 2001-2008 by Mark Overmeer.
# For other contributors see ChangeLog.
# See the manual pages for details on the licensing terms.
# Pod stripped from pm file by OODoc 1.04.
use strict;
use warnings;
package Mail::Box::Collection;
use vars '$VERSION';
$VERSION = '2.082';
use base qw/User::Identity::Collection Mail::Reporter/;
use Mail::Box::Identity;
use Scalar::Util qw/weaken/;
sub new(@)
{ my $class = shift;
unshift @_,'name' if @_ % 2;
$class->Mail::Reporter::new(@_);
}
sub init($)
{ my ($self, $args) = @_;
$args->{item_type} ||= 'Mail::Box::Identity';
$self->Mail::Reporter::init($args);
$self->User::Identity::Collection::init($args);
weaken($self->{MBC_manager})
if $self->{MBC_manager} = delete $args->{manager};
$self->{MBC_ftype} = delete $args->{folder_type};
$self;
}
sub type() { 'folders' }
#------------------------------------------
sub manager()
{ my $self = shift;
return $self->{MBC_manager}
if defined $self->{MBC_manager};
my $parent = $self->parent;
defined $parent ? $self->parent->manager : undef;
}
#------------------------------------------
sub folderType()
{ my $self = shift;
return($self->{MBC_ftype} = shift) if @_;
return $self->{MBC_ftype} if exists $self->{MBC_ftype};
if(my $parent = $self->parent)
{ return $self->{MBC_ftype} = $parent->folderType;
}
undef;
}
#------------------------------------------
1;
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Mail/Box/Collection.pm | Perl | mit | 1,646 |
:- module(conf_prefixes, []).
:- use_module(library(semweb/rdf_db)).
/** <module> Configure prefixes (namespaces)
Register additional prefixes. Registering a prefix serves three
purposes:
* It can be used in code, e.g., rdf(X, rdf:type, rdfs:'Class')
* It can be used in the same fashion from the toplevel
* It is used by the web-services to present resources more compact.
@see rdf_register_ns/2 and rdf_register_ns/3
*/
% :- rdf_register_ns(cornetto, 'http://purl.org/vocabularies/cornetto/').
| TeamSPoon/logicmoo_workspace | packs_web/ClioPatria/config-available/020-prefixes.pl | Perl | mit | 522 |
# toExMS(S)cli.pl
#
# Arranges information in Mascot-exported CVS files into the text files necessary for
# ExMS. Assumes that scan title includes the retention time in the for "RT:$time".
# Made to work with Mascot as run by the IBBR as of 2014-07-18. Mascot claims last update 2010-03-30.
#
# Based on the pepinfo.m function written as part of the ExMS program by ZhongYuan Kan et. al. at the University of Pennsylvania.
#
###############################################################################
# Required Export options:
# (If it isn't listed, this doesn't care about it.)
#
# Protein Hit Information (*)
#
# Peptide Match Information (*)
# Experimental Charge (*)
# Calculated Mr (Da) (*)
# Mass error (Da) (*)
# Start (*)
# End (*)
# Score (*)
# Sequence (*)
# Query title (*)
#
# Sample line follows:
# prot_hit_num,prot_acc,pep_query,pep_rank,pep_isbold,pep_isunique,pep_exp_mz,pep_exp_z,pep_calc_mr,pep_delta,pep_start,pep_end,pep_score,pep_res_before,pep_seq,pep_res_after,pep_scan_title
# 1,NLGT,581,1,1,1,520.2807,1,519.2727,0.0007,130,134,31.09,A,SVVCL,L,File:06262014-Fab-MSMS-1.mzXML Scans:801 RT:4.6696min Charge:1+ Fragmentation:cid
###############################################################################
use strict;
use warnings;
use Math::BigFloat;
#open (DEBUG, ">log.txt");
sub binopdf
{
#binary probability density function
#$_[0] = Array Reference X = List of target numbers
#$_[1] = Scalar N = Number of trials
#$_[2] = Scalar P = Probability
#Returns Array Reference R = List of probabilities of target numbers of trials succeeding
#array dereferencing here
my @X = @{$_[0]};
my $N = new Math::BigFloat $_[1];
my $P = new Math::BigFloat $_[2];
my $Q = new Math::BigFloat '1';
#my $N = Math::BigFloat->new($_[1]);
#my $P = Math::BigFloat->new($_[2]);
#my $Q = Math::BigFloat->new(1);
$Q -= $P;
#print "\n\t\tSubroutine Input Array:\n\n\t@X\n";
my @R = @X;
#print "Format: X\tN\tP\tQ\tPX\tNX\tQNX\n\n";
foreach(@R)
{
#do maths on $_
#printf ("\tRun:\nState = (%6f, %6f, %6f, %6f)\n", $_, $N, $P, $Q);
#my $PX = $P->bpow($_);
my $PX = $P ** $_;
#printf ("PX:\nState = (%6f, %6f, %6f, %6f, %6f)\n", $_, $N, $P, $Q, $PX);
#my $NX = $N->bsub($_);
my $NX = $N - $_;
#printf ("NX:\nState = (%6f, %6f, %6f, %6f, %6f, %6f)\n", $_, $N, $P, $Q, $PX, $NX);
#my $QNX = $Q->bpow($NX);
my $QNX = $Q ** $NX;
#printf ("QNX:\nState = (%6f, %6f, %6f, %6f, %6f, %6f, %6f)\n", $_, $N, $P, $Q, $PX, $NX, $QNX);
$_ = Math::BigFloat->new(($N->copy()->bnok($_))->bmul($PX))->bmul($QNX);
#printf ("\n\tResult computed, \$_ = %6f\n\n", $_);
}
#print "\n\t\tSubroutine Output Array:\n";
#printf "%6f " x @R . "\n", @R;
return \@R;
}
sub conv
{
#Convolution
#$_[0] = Array Reference K = Kernel
#$_[1] = Array Reference D = Data
#Returns Array Reference R
#print "\t\tconv($_[0], $_[1])\n";
my @K = @{$_[0]};
#print "Kernel is @K.\n";
my $K = @K;
#print "Kernel is $K long.\n";
my @D = @{$_[1]};
#print "Data is @D.\n";
my $D = @D;
#print "Data is $D long.\n";
my $R = ($K + $D - 1);
#print "Result will be $R long.\n";
my @R = (0 .. ($R - 1));
for (my $i = 0; $i < $R; $i++)
{
#outer loop
$R[$i] = 0;
for (my $j = 0; $j <= $i; $j ++)
{
#inner loop
#print "\t\t(i, j) are ($i, $j).\n";
my $Kj = 0;
if ($j < $K) {$Kj = $K[$j];}
my $Dij = 0;
if (($i - $j) < $D) {$Dij = $D[$i - $j];}
$R[$i] += ($Kj * $Dij);
#print "K[$j] = $Kj. D[$i - $j] = $Dij.\n";
#print "\t R[$i] = $R[$i].\n";
#print "";
}
}
#print "Result is @R.\n";
return \@R;
}
sub pepinfo
{
#pepinfo
#@_[0] = Scalar or Array Reference subSeq = sequence of amino acids making up the protein
#@_[1] = Scalar X = exclude N-terminal X residues (default 2)
#Returns Array Reference R
#R[0] = Scalar peptideMass
#R[1] = Array Reference Reference distND = ?
#R[2] = Scalar maxND = minimum possible deuterium uptake?
#R[3] = Scalar maxD = maximum possible deuterium uptake?
my @R;
my $subSeq;
if (scalar(@_) < 1) {die "No input to parse.\n"};
if (ref($_[0]) eq '')
{
if ($_[0] !~ /^[AC-Z]+$/) {die ('No amino acid sequence passed.\n')}
else {$subSeq = $_[0];}
}
elsif (ref ($_[0]) eq 'ARRAY')
{
$subSeq = join ('', @{$_[0]});
}
#print "\tpepinfo($subSeq, 2)\n";
my $peptideMass = 0;
my $C = 0;
my $N = 0;
my $O = 0;
my $S = 0;
my $Fe = 0;
my $X = 2;
my (@distND, $maxND, $maxD, $index);
if (scalar(@_) > 2)
{$X = $_[1];}
#following values taken from http://en.wikipedia.org/wiki/Proteinogenic_amino_acid
#heme(C34H31N4O4Fe) from http://www.lfd.uci.edu/~gohlke/molmass/?q=C34H31N4O4Fe
#acylation(-COCH3)
my @AAshort = ('A', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'Y', '*', ']');
my @AAcarbonNum = ( 3, 3, 4, 5, 9, 2, 6, 6, 6, 6, 5, 4, 12, 5, 5, 6, 3, 4, 3, 5, 11, 9, 34, 2);
my @AAnitrogenNum = ( 1, 1, 1, 1, 1, 1, 3, 1, 2, 1, 1, 2, 3, 1, 2, 4, 1, 1, 1, 1, 2, 1, 4, 0);
my @AAoxygenNum = ( 1, 1, 3, 3, 1, 1, 1, 1, 1, 1, 1, 2, 3, 1, 2, 1, 2, 2, 1, 1, 1, 2, 4, 1);
my @AAsulfurNum = ( 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
my @AAironNum = ( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0);
my @AAmonoMass = (71.03711, 103.00919, 115.02694, 129.04259, 147.06841, 57.02146, 137.05891, 113.08406, 128.09496, 113.08406, 131.04049, 114.04293, 255.15829, 97.05276, 128.05858, 156.10111, 87.03203, 101.04768, 150.95364, 99.06841, 186.07931, 163.06333, 613.1741, 42.0106);
for (my $i = 0; $i < length($subSeq); $i ++)
{
#print "\ni = $i.\tAA = " . substr($subSeq, $i, 1) . ".\n";
$index = 0;
++$index until $AAshort[$index] eq substr($subSeq, $i, 1);
for ($index = 0; $index < scalar @AAshort && $AAshort[$index] ne substr($subSeq, $i, 1); $index ++)
{
#print "index = $index.\tAA_test = $AAshort[$index].\n";
}
#print "Matched AA = $AAshort[$index].\n\n";
$peptideMass = $peptideMass + $AAmonoMass[$index];
$C = $C + $AAcarbonNum[$index];
$N = $N + $AAnitrogenNum[$index];
$O = $O + $AAoxygenNum[$index];
$S = $S + $AAsulfurNum[$index];
$Fe = $Fe + $AAironNum[$index];
}
$peptideMass = $peptideMass + (1.007825 * 2 + 15.994915); #peptide's mass is the sum of the residue masses plus the mass of water.
###calculate maxND:
my $obsCThreshold = 1e-3; #set threshold
my $pC13 = 0.0111; #natural richness of C13
#print "pC13 = $pC13.\n";
my @CA = (0 .. $C);
#print "CA:\n@CA\n";
my @distC = @{binopdf(\@CA, $C, $pC13)}; #originally called MATLAB function binopdf(), now calls binopdf() implemented above
#print "distC:\n";
#printf "%6f " x @distC . "\n", @distC;
my $pN15 = 0.00364; #natural richness of N15
my @NA = (0 .. $N);
my @distN= @{binopdf(\@NA, $N, $pN15)};
#print "distN:\n@distN\n\n";
my $pO18 = 0.00205; #natural richness of O18
my @OA = (0 .. $O);
my @dist = @{binopdf(\@OA, $O, $pO18)};
my @distO = (0) x ($O * 2);
#print "distO before:\n@distO\n\n";
for (my $i = 0; $i < $O; $i ++)
{
@distO[($i + 1)*2 - 2] = $dist[$i];
}
#print "distO after:\n@distO\n\n";
# pS33=0.00762; %natural richness of S33 [ignored here]
my $pS34=0.04293; #%natural richness of S34
my @distS = (0) x ($S * 2);
if ($S>0)
{
my @SA = (0 .. $S);
@dist = @{binopdf(\@SA,$S,$pS34)};
for (my $i=0; $i < $S; $i++)
{
$index = ($i + 1) * 2 - 2;
$distS[$index] = $dist[$i];
}
}
else {@distS = 1;}
#print "distS:\n@distS\n\n";
my $pFe56=0.91754; #natural richness of Fe56
my $pFe57=0.02119; #natural richness of Fe57
my @distFe = (0) x ($Fe * 2);
if ($Fe>0)
{
my @FA = (0 .. $Fe);
@dist = @{binopdf(\@FA,$Fe,$pFe56)}; #//this calc is considering from Fe54 (natural richness 0.05845)
for (my $i = 1; $i < $Fe; $i++)
{
@distFe[($i + 1)*2 - 2] = $dist[$i];
}
@distFe = @{conv(\@distFe, binopdf(\@FA, $Fe, $pFe57))};
}
else
{
@distFe=1;
}
#print "distFe:\n@distFe\n\n";
my @interDist = @{conv(\@distC, \@distN)};
#print "\tconv(distC, distN):\n";
#printf "%6f " x @interDist . "\n", @interDist;
@interDist = @{conv(\@distO, \@interDist)};
#print "\tconv(distO, interDist):\n";
#printf "%6f " x @interDist . "\n", @interDist;
@interDist = @{conv(\@distS, \@interDist)};
#print "\tconv(distS, interDist):\n";
#printf "%6f " x @interDist . "\n", @interDist;
@interDist = @{conv(\@distFe, \@interDist)};
#print "\tconv(distFe, interDist):\n";
#printf "%6f " x @interDist . "\n", @interDist;
my @finalDist = @interDist;
#print "finalDist size: " . (scalar @finalDist) . "\n";
$maxND = (scalar @finalDist) - 1;
#original pepinfo.m loop to identify maxND.
#for m=3:(maxND+1)
#if finalDist(m)<obsCThreshold && finalDist(m-1)<obsCThreshold && finalDist(m-2)>=obsCThreshold
#maxND=m-3; break
#end
#end
for (my $i = 2; $i < $maxND; $i++)
{
#print "$obsCThreshold\t> $finalDist[$i]\t> " . $finalDist[$i-1] . "\t< " . $finalDist[$i-2] . "\n";
if ($finalDist[$i] < $obsCThreshold && $finalDist[$i - 1] < $obsCThreshold && $finalDist[$i - 2] >= $obsCThreshold)
{
$maxND = $i - 2;
last;
}
}
for (my $i = 0; $i <= $maxND; $i++)
{
$distND[$i] = $finalDist[$i];
}
#%%%%%%%%%%%%%%%%%%%%%%%%%%%%
#%%%calculate maxD:
$maxD = length ($subSeq) - $X;
for (my $i = $X; $i < length ($subSeq); $i ++)
{
if (substr($subSeq, $i) eq 'P') #exclude Proline
{
$maxD--;
}
}
@R = ($peptideMass, \@distND, $maxND, $maxD);
return \@R;
}
my $infilename = "";
my $outfilename = "";
#get $infilename
print "Please enter the input file:\t";
$infilename = <STDIN>;
chomp $infilename;
#print $DEBUG "Input file is $infilename.\n";
if (-e $infilename && -r $infilename)
{
open (IFH, '<', $infilename) or die "Could not open input file $infilename.\n";
#get $outputfilename
print "Enter desired output filename. If none is entered, file will be `out.txt'.\t";
$outfilename = <STDIN>;
chomp $outfilename;
if ($outfilename eq "") {$outfilename = "out.txt";}
#print $DEBUG "Output file will be $outfilename.\n";
open (OFH, '>', $outfilename) or die "Could not open output file $outfilename.\n";
if (-w $outfilename)
{
#do all the things to ifh
my ($start, $end, $z, $mz, $score, $delta, $seq, $scan, @row, @AoR);
my $mode = 0;
my $max = 0;
#my $i = 0;
while (<IFH>)
{
#do all the things to $_
if ($_ =~ m/^prot_*+/) #header row
{
#assign locations to variables
$mode = 1;
#print DEBUG $_;
@row = split(/,/, $_);
for (my $i = 0; $i < scalar @row; $i++)
{
#ID fields
if ($row[$i] eq "pep_exp_mz") {$mz = $i;}
elsif ($row[$i] eq "pep_exp_z") {$z = $i;}
elsif ($row[$i] eq "pep_delta") {$delta = $i;}
elsif ($row[$i] eq "pep_score") {$score = $i;}
elsif ($row[$i] eq "pep_start") {$start = $i;}
elsif ($row[$i] eq "pep_end") {$end = $i;}
elsif ($row[$i] eq "pep_seq") {$seq = $i;}
elsif ($row[$i] eq "pep_scan_title\n") {$scan = $i;}
else {}
}
}
elsif ($_ =~ m/^\d.+$/ && $mode == 1) #as long as $_ has data
{
#print "Acquiring data row #$i.\n";
#$i++;
my $rt;
@row = split(/,/, $_);
#call pepinfo
my @pepinfo = @{pepinfo($row[$seq], 2)};
if ($row[$scan] =~ /.*RT:(\d+(?:\.\d+)).*/) {$rt = $1;}
chomp($row[$scan]);
my $R = $row[$start] . "\t" . $row[$end] . "\t" . $row[$z] . "\t" . $row[$mz] . "\t" . $pepinfo[2] . "\t" . $pepinfo[3] . "\t" . $rt . "\t" . $row[$score] . "\t" . $row[$delta] . "\t";
foreach (@{$pepinfo[1]})
{
$R = $R . $_ . "\t";
}
my $columns = $R =~ tr/\t//;
if ($columns > $max) {$max = $columns;}
push @AoR, $R;
}
else {$mode = 0;} #no more data to read
}
#$i = 0;
foreach (@AoR)
{
#print "Outputting row #$i.\n";
#$i++;
#pad each row to length
my $columns = $_ =~ tr/\t//;
for (my $i = 0; $i < ($max - $columns); $i++)
{
$_ = $_ . "0\t";
}
chop ($_);
$_ = $_ . "\n";
#print @AoR to output here:
print OFH $_;
}
}
else {die "Output file $outfilename is not writeable.\n";}
}
else
{
if (!-e $infilename) {die "Input file $infilename does not exist.\n";}
elsif (!-r $infilename) {die "Input file $infilename is not readable.\n";}
else {die "This should be unreachable.\n"}
}
#close DEBUG; | dweber3/MascotCVSconversion | toExMS(S)cli.pl | Perl | mit | 12,543 |
#!/usr/bin/perl
use strict;
use warnings;
use feature qw(say);
use JSON;
use LWP::UserAgent;
use HTTP::Request::Common;
# List all known clusters with their queuemanagers
my $qmgr = shift;
die("Please pass me the name of a queuemanager as argument")
unless defined($qmgr);
my $json = JSON->new;
my %input = (
'ClusterQMgrName' => '*',
'ClusterName' => '*'
);
my $content = $json->encode(\%input);
my $ua = LWP::UserAgent->new;
my $req = POST 'http://localhost:8081/api/clusqmgr/inquire/' . $qmgr;
$req->header(
'Content-Type' => 'application/json',
'Content-length' => length($content)
);
$req->content($content);
my $res = $ua->request($req);
my $mqweb = $json->decode($res->content());
if ( exists($mqweb->{error}) ) {
say 'An MQ error occurred while inquiring queues.';
say 'Reason Code: '
, $mqweb->{error}->{reason}->{code}
, ' - '
, $mqweb->{error}->{reason}->{desc};
}
else {
my %clusters;
foreach my $clusqmgr(@{$mqweb->{data}}) {
my $clusterName = $clusqmgr->{ClusterName}->{value};
push(@{$clusters{$clusterName}}, $clusqmgr->{QMgrName}->{value});
}
foreach my $cluster(keys(%clusters))
{
say $cluster;
say '-' x length($cluster);
foreach my $qmgr(@{$clusters{$cluster}})
{
say ' ', $qmgr;
}
}
}
| fbraem/mqweb | samples/perl/clusqmgr_inq.pl | Perl | mit | 1,272 |
#!/msrc/apps/bin/perl
#
# latex_pretty_table.pl
# Hugh L Taylor, Battelle / Pacific Northwest Laboratory
# hl_taylor@pnl.gov
# Sun Jun 25 1995
# $Log: not supported by cvs2svn $
# Revision 1.7 1995/06/27 05:04:39 pg511
# Cleanup. Comments, variable names, help option.
#;;
#;; the following is an elisp keyboard macro to call this function from emacs
#;; place it in your .emacs file
#;;
#;; temporary keyboard macro form of latex-pretty-table
#;;
#(fset 'latex-pretty-table
# [19 101 110 100 123 116 97 98 117 108 97 114 1 4194336 18 103 105 110 123 116 97 98 117 108 14 1 24 24 21 escape 124 108 97 116 101 120 95 112 114 101 116 116 121 95 116 97 98 108 101 46 112 108 return])
#
#
{ require 'newgetopt.pl';
#
# Handle the options (not used for the moment)
#
&init_options;
&NGetOpt(@all_opts);
&handle_options;
#
$iline = -1;
#
# read lines into array, saving the maximum length field for each column
# of the table
#
while (<>) {
chop;
$lines[++$iline] = " ".$_; # add lead space
$lines[$iline] =~ s/^\s+/ /g; # exactly one lead space
$lines[$iline] =~ s/&/ & /g; # spaces around ampersands
@fields = split(/&/,$_);
#
# last field might include \\ or that and following \hline(s)
# last field is then split in two
#
@end_fields = split(/\\\\/,$fields[$#fields]);
pop(@fields);
push(@fields,($end_fields[0]));
$multicolumn_skip = 0;
for ( $jfield=0; $jfield <=$#fields; $jfield++) { #
$fields[$jfield] =~ s/^ +/ /g; #
$fields[$jfield] =~ s/ +$/ /g; # <= 1 leading or trailing space
$temp_len = length($fields[$jfield])
+ &number_braces_in_string($fields[$jfield]);
if ($fields[$jfield] =~ /\\multicolumn\{(\d+)/) { # multicolumn entry
$mc_columns = $1;
push(@multicolumn_list,($jfield+$multicolumn_skip,
$jfield+$multicolumn_skip+$mc_columns-1,
$temp_len));
$multicolumn_skip += $mc_columns-1;
} else {
$max_field_lengths[$jfield] = $temp_len
if $max_field_lengths[$jfield] < $temp_len;
}
}
}
#
# if a \multicolumn is longer than the combined lengths of the columns
# it covers, add to the length of the rightmost column
#
$num_mc = ($#multicolumn_list+1)/3 - 1 ;
for ( $jfield=0; $jfield <=$#max_field_lengths; $jfield++) {
$max_field_lengths[$jfield] += 4; # a little extra room
}
for ($ii = 0; $ii <= $num_mc; $ii++) {
$left_column = shift(@multicolumn_list);
$right_column = shift(@multicolumn_list);
$mc_length = shift(@multicolumn_list);
$tot_length = 0;
for ($jj = $left_column; $jj <= $right_column; $jj++) {
$tot_length += $max_field_lengths[$jj];
}
if ($mc_length > $tot_length ) {
$max_field_lengths[$right_column] += $mc_length - $tot_length;
}
}
#
# loop over lines, rewriting them
#
for ( $iline=0; $iline <= $#lines; $iline++ ) { # last line starting from 0
$#fields = 0;
@fields = split(/&/,$lines[$iline]);
#
# last field might include \\ or that and following \hline
# field is then split in two
#
@end_fields = split(/\\\\/,$fields[$#fields]);
pop(@fields);
push(@fields,($end_fields[0]));
$delim = " & ";
$multicolumn_skip = 0; # running total of reduction in no. of columns
$mc_width = 0;
for ( $jfield=0; $jfield <=$#max_field_lengths; $jfield++) {
$fields[$jfield] =~ s/^ +/ /; #
$fields[$jfield] =~ s/ +$/ /; # 1 leading or trailing space
$width = $max_field_lengths[$jfield+$multicolumn_skip];
if ($fields[$jfield] =~ /\\multicolumn\{(\d+)/) { # multicolumn field
$mc_width = $1;
for ($ii = 1; $ii < $mc_width; $ii++) {
$width = $width + $max_field_lengths[$ii+$multicolumn_skip+$jfield];
}
$multicolumn_skip += $mc_width-1;
}
$format_string = "%-".$width."s";
$delim = "\\\\" if ($jfield == $#max_field_lengths-$multicolumn_skip);
$delim = " " if ($jfield > $#max_field_lengths-$multicolumn_skip);
$outfield = sprintf("$format_string %s", $fields[$jfield], $delim);
#
# sprintf sometimes yields too many chars (e.g. braces aren't counted)
#
$extrachar = length($outfield) - $width;
while ($extrachar-- > 0) {$outfield =~ s/ / /};
printf("$format_string",$outfield);
}
printf(" %s",$end_fields[1]) if $#end_fields == 1;
print " \n";
}
exit;
}
#
#
#
sub init_options {
@gen_opts = ('h','help');
@all_opts = ();
push(@all_opts,@gen_opts);
}
#
#
#
sub handle_options {
&help && exit if ( $opt_h || $opt_help );
# $verbose = $opt_v || $opt_verbose ? 1 : 0;
}
#
#
#
sub help {
print "Program: latex_pretty_table.pl A Perl script\n";
print "Purpose: Reformats latex table contents into nice columns\n";
print " for easy editing and reading.\n";
print "Usage: latex_pretty_table.pl < infile > outfile\n";
print " Meant to be called from emacs C-u M-|; \n";
print " shell command on region, with output replacing region.\n";
}
sub number_braces_in_string { # $string
local ($string,@temp) = @_;
@temp = split(/[\{\}]/,$string);
$number_braces_in_string = $#temp;
}
| rangsimanketkaew/NWChem | contrib/latex_pretty_table.pl | Perl | mit | 5,133 |
% vim: set syntax=prolog
:- consult('helpers/run').
:- use_module(library(pairs)).
:- use_module(library(regex)).
:- use_module('helpers/io').
:- use_module('helpers/lists').
:- dynamic(weight/2).
main :-
read_input(Nodes),
node_with_wrong_weight(Nodes, Node, Siblings),
Name - Weight - _ = Node,
new_weight(Node, Siblings, NewWeight),
put_node_weight(Nodes, Name, NewWeight, UpdatedNodes),
\+ node_with_wrong_weight(UpdatedNodes, _, _),
format("~s: ~d -> ~d\n", [Name, Weight, NewWeight]).
read_input(NodeTree) :-
current_input(S),
read_lines(S, Lines),
maplist(parse, Lines, NodePairs),
root(NodePairs, Root),
list_to_assoc(NodePairs, Nodes),
tree(Nodes, Root, NodeTree),
!.
parse(Line, Node) :-
regex("(\\w+) \\((\\d+)\\)( -> ([a-z, ]+))?", [], Line, Captures),
(
[ACodes, WeightString, _, StringOfBs] = Captures,
string_codes(A, ACodes),
number_string(Weight, WeightString),
split_string(StringOfBs, ", ", "", SplitBs),
exclude(=(""), SplitBs, Bs),
Node = A - (Weight - Bs)
;
[ACodes, WeightString] = Captures,
string_codes(A, ACodes),
number_string(Weight, WeightString),
Node = A - (Weight - [])
).
root(NodePairs, Root) :-
edges(NodePairs, Edges),
pairs_keys_values(Edges, As, Bs),
member(Root, As),
\+ member(Root, Bs).
edges(NodePairs, Edges) :-
maplist(node_edge, NodePairs, NestedEdges),
flatten(NestedEdges, Edges).
node_edge(A - (_ - Bs), Edges) :-
repeated(A, As),
zip(As, Bs, Edges).
tree(Nodes, NodeName, Tree) :-
get_assoc(NodeName, Nodes, Weight - ChildrenNames),
maplist(tree(Nodes), ChildrenNames, Children),
Tree = NodeName - Weight - Children.
node_with_wrong_weight(Node, OutputNode, Siblings) :- node_with_wrong_weight(Node, [], OutputNode, Siblings).
node_with_wrong_weight(Node, Siblings, OutputNode, OutputSiblings) :-
Siblings \= [],
forall(member(Sibling, Siblings), (
weight(Node, NodeWeight),
weight(Sibling, SiblingWeight),
NodeWeight \= SiblingWeight
)),
OutputNode = Node,
OutputSiblings = Siblings.
node_with_wrong_weight(_ - _ - Children, _, OutputNode, OutputSiblings) :-
select(Child, Children, OtherChildren),
node_with_wrong_weight(Child, OtherChildren, OutputNode, OutputSiblings).
put_node_weight(Name - _ - Children, Name, NewWeight, Name - NewWeight - Children).
put_node_weight(Name - Weight - Children, NodeName, NewWeight, Updated) :-
maplist(reordered_put_node_weight(NodeName, NewWeight), Children, UpdatedChildren),
Updated = Name - Weight - UpdatedChildren.
reordered_put_node_weight(Name, NewWeight, Node, Updated) :-
put_node_weight(Node, Name, NewWeight, Updated).
new_weight(Node, Siblings, NewWeight) :-
member(Sibling, Siblings),
_ - NodeWeight - _ = Node,
weight(Node, NodePlusChildrenWeight),
weight(Sibling, SiblingPlusChildrenWeight),
Change is SiblingPlusChildrenWeight - NodePlusChildrenWeight,
NewWeight = NodeWeight + Change.
weight(Node, Weight) :-
_ - NodeWeight - Children = Node,
maplist(weight, Children, ChildrenWeights),
sum_list(ChildrenWeights, TotalChildrenWeight),
Weight is NodeWeight + TotalChildrenWeight,
asserta( (weight(Node, Weight) :- !) ).
| SamirTalwar/advent-of-code | 2017/AOC_07_2.pl | Perl | mit | 3,191 |
package Spoon::Hub;
use Spoon::Base -Base;
const class_id => 'hub';
field action => '_default_';
field main => -weak;
field config_files => [];
field all_hooks => [];
sub new {
$self = super;
$self->init;
$Spoon::Base::HUB = $self;
}
our $AUTOLOAD;
sub AUTOLOAD {
$AUTOLOAD =~ /.*::(.*)/
or die "Can't AUTOLOAD '$AUTOLOAD'";
my $class_id = $1;
return if $class_id eq 'DESTROY';
field $class_id =>
-init => "\$self->load_class('$class_id')";
$self->$class_id(@_);
}
sub pre_process {}
sub post_process {}
sub process {
$self->preload;
my $action = $self->action;
die "No plugin for action '$action'"
unless defined $self->registry->lookup->action->{$action};
my ($class_id, $method) =
@{$self->registry->lookup->action->{$action}};
$method ||= $action;
return $self->$class_id->$method;
}
sub preload {
my $preload = $self->registry->lookup->preload;
map {
$self->load_class($_->[0])
} sort {
$b->[1] <=> $a->[1]
} map {
my %hash = @{$preload->{$_}}[1..$#{$preload->{$_}}];
[$_, $hash{priority} || 0];
} keys %$preload;
return $self;
}
sub load_class {
my $class_id = shift;
return $self if $class_id eq 'hub';
return $self->$class_id
if $self->can($class_id) and defined $self->{$class_id};
my $class_class = $class_id . '_class';
my $class_name = $self->config->can($class_class)
? $self->config->$class_class
: $self->registry_loaded
? $self->registry->lookup->classes->{$class_id}
: Carp::confess "Can't find a class for class_id '$class_id'";
Carp::confess "No class defined for class_id '$class_id'"
unless $class_name;
unless ($class_name->can('new')) {
eval "require $class_name";
die $@ if $@;
}
$self->add_hooks
unless $class_id eq 'hooks';
my $object = $class_name->new
or die "Can't create new '$class_name' object";
$class_id ||= $object->class_id;
die "No class_id defined for class: '$class_name'\n"
unless $class_id;
field $class_id =>
-init => "\$self->load_class('$class_id')";
$self->$class_id($object);
$object->init;
return $object;
}
sub add_hooks {
return unless $self->registry_loaded;
my $hooks = $self->registry->lookup->{hook}
or return;
for my $class_name (keys %$hooks) {
next unless $class_name->can('new');
$self->add_hook(@$_) for @{$hooks->{$class_name} || []};
delete $hooks->{$class_name};
}
delete $self->registry->lookup->{hook}
if not keys %$hooks;
}
sub add_hook {
my $hooks = $self->all_hooks;
push @$hooks, $self->hooks->add(@_);
return $hooks->[-1];
}
sub remove_hooks {
my $hooks = $self->all_hooks;
while (@$hooks) {
pop(@$hooks)->unhook;
}
}
sub registry_loaded {
defined $self->{registry} &&
defined $self->{registry}{lookup};
}
sub DESTROY {
$self->remove_hooks;
}
__END__
=head1 NAME
Spoon::Hub - Spoon Hub Base Class
=head1 SYNOPSIS
=head1 DESCRIPTION
=head1 AUTHOR
Brian Ingerson <INGY@cpan.org>
=head1 COPYRIGHT
Copyright (c) 2004. Brian Ingerson. All rights reserved.
This program is free software; you can redistribute it and/or modify it
under the same terms as Perl itself.
See http://www.perl.com/perl/misc/Artistic.html
=cut
| carlgao/lenga | images/lenny64-peon/usr/share/perl5/Spoon/Hub.pm | Perl | mit | 3,410 |
#!/usr/bin/env perl
# Create matrix consists needed number of lines containg 5 random elements in each line. Sum of elements in each line
# will be the requested.
use Getopt::Std;
use strict;
our ($opt_h);
USAGE() unless( getopts( 'h:' ));
USAGE() if ( $opt_h );
my $N = shift;
my $E = shift;
for(my $e = 0; $e < $E; $e++) {
my @cnt;
push @cnt, (0) x (5); #Initialize array to avoid errors in 5 columns
for(my $i = 0; $i < $N; $i++) {
my $rn = rand();
if ($rn <= 0.2) {
$cnt[0]++;
} elsif ($rn <= 0.4) {
$cnt[1]++;
} elsif ($rn <= 0.7) {
$cnt[2]++;
} elsif ($rn <= 0.85) {
$cnt[3]++;
} else {
$cnt[4]++;
}
}
print join("\t", @cnt), "\n";
}
sub USAGE {
print STDERR <<USAGE;
$0 sum_of_elements lines
The program will create matrix consists needed number of lines containg 5 random elements in each line.
Sum of elements in each line will be the requested.
Arguments are:
- sum_of_elements: sum of random elements in each line.
- lines: number of lines
AUTHOR
Written by Zhongwu Lai, AstraZeneca, Boston, USA
REPORTING BUGS
Report bugs to zhongwu\@yahoo.com
COPYRIGHT
This is free software: you are free to change and redistribute it. There is NO WARRANTY, to the extent permitted by law.
USAGE
exit(0);
}
| AstraZeneca-NGS/Seq2C | testrand.pl | Perl | mit | 1,317 |
#!/usr/bin/perl
use strict;
use warnings;
my $SECONDS = 2503;
my $deer_info = {};
while (<>) {
chomp;
if ($_ =~ /^(.+) can fly (\d+) .* for (\d+) seconds, but then must rest for (\d+) seconds.$/) {
$deer_info->{$1} = {
fly => {
speed => int $2,
duration => int $3,
},
rest => int $4,
};
}
}
my $leaderboard = {};
for my $deer (keys %{$deer_info}) {
$leaderboard->{$deer} = 0;
}
for my $i (1..$SECONDS) {
my $results = {};
for my $deer (keys %{$deer_info}) {
$results->{$deer} = calc_flight($deer, $i);
}
my $lead = calc_lead($results);
for my $deer (keys %{$lead}) {
$leaderboard->{$deer} += 1;
}
}
use Data::Dumper;
print Dumper $leaderboard;
sub calc_flight {
my $deer = shift;
my $duration = shift;
my $i = 0;
my $distance = 0;
my $resting = 0;
while ($i < $duration) {
for my $j (0 .. ($deer_info->{$deer}->{fly}->{duration} - 1)) {
$resting = 0;
$distance += $deer_info->{$deer}->{fly}->{speed};
$i++;
last if $i == $duration;
}
last if $i == $duration;
for my $j (0 .. ($deer_info->{$deer}->{rest} - 1)) {
$resting = 1;
$i++;
last if $i == $duration;
}
}
return {
distance => $distance,
resting => $resting,
};
}
sub calc_lead {
my $distances = shift;
my $max_distance = 0;
for my $deer (keys %{$distances}) {
my $distance = $distances->{$deer}->{distance};
$max_distance = $distance if $distance > $max_distance;
}
my $results = {};
for my $deer (keys %{$distances}) {
if ($distances->{$deer}->{distance} == $max_distance) {
$results->{$deer} = $max_distance;
}
}
return $results;
}
| KenMGJ/advent-of-code | 2015/14/day14-2.pl | Perl | mit | 1,906 |
package MsEvent;
use strict;
use warnings;
use Digest::MD5 qw(md5_hex);
use Log;
use Time::ParseDate;
use POSIX qw(strftime);
use Data::Dumper;
use JSON qw( encode_json decode_json );
my $IMP_SOURCE = 0;
my $IMP_STATS = {};
sub init_import_stats {
my $source_id = shift;
die("init_import_stats: Need source id") unless $source_id;
die("init_import_stats: Too many parameters") if shift;
$IMP_SOURCE = $source_id;
$IMP_STATS = {};
}
sub save_import_stats {
#
# my $dbh = shift;
my $errors = ($IMP_STATS->{total}||0) - ($IMP_STATS->{new}||0) - ($IMP_STATS->{updated}||0);
# my $sth = $dbh->prepare( '
# INSERT INTO event_import_stats( location_source_id, found_events, new_events, updated_events, error_events, error_message )
# values ( ?,?,?,?,?,? )'
# );
# $sth->execute( $IMP_SOURCE, $IMP_STATS->{total} ||0,$IMP_STATS->{new}||0, $IMP_STATS->{updated}||0,
# $errors, $IMP_STATS->{errors} ? Dumper( $IMP_STATS->{errors}): undef );
# $sth->finish();
#
# # delete future events that have changed / were cancelled
# $sth = $dbh->prepare( 'DELETE FROM event WHERE location_source_id = ? AND event_datetime > now() AND date(last_modified) <> date(now()) ');
# $sth->execute( $IMP_SOURCE );
# $sth->finish();
#
log_info( "<-------- import source ".$IMP_SOURCE." --------->");
log_info( "found events:", $IMP_STATS->{total} );
log_error("no events found! That's probably wrong!" ) unless ( $IMP_STATS->{total} );
log_info( "new: ", $IMP_STATS->{new} );
log_info( "updated: ", $IMP_STATS->{updated} );
log_info( "errors: ", $errors );
}
sub save_event {
my $event = shift;
$IMP_STATS->{total} ++;
$event->{description} = cleanup( $event->{description}) if ( $event->{description} );
$event->{title} = cleanup( $event->{title}) if ( $event->{title} );
if ( !$event->{md5} ) {
$event->{md5} = md5_hex( $event->{link} );
}
if ( !$event->{title} ) {
return event_error( -5,"EVENT TITLE IS MISSING", $event );
}
if ( !$event->{datetime} ) {
if ($event->{parsedate} ) {
my @dates = ();
my ($sec,$min,$hour,$day,$month,$yr19,@rest) = localtime(time);
my $current_year = $yr19 +1900;
while ( $event->{parsedate} =~ /(\d\d?)\.(\d\d?)\.(\d{4})?/g ) {
my $checkday = $1;
my $checkmonth = $2;
my $checkyear = $3;
if ( !$checkyear ) {
$checkyear = $current_year;
if ( $checkmonth < $month ) {
$checkyear ++;
}
}
my $date = ( $checkyear ).'-'.sprintf('%02d',$checkmonth ).'-'.sprintf('%02d',$checkday );
push @dates, $date;
}
$event->{datetime} = $dates[0];
$event->{enddate} = $dates[1] if ( scalar @dates == 2);
# still got no date? then try to parse with parsedate()
unless ( $event->{datetime} ) {
my $seconds_since_jan1_1970 = parsedate($event->{parsedate} );
$event->{datetime} = strftime("%Y-%m-%d %H:%M:%S" , localtime( $seconds_since_jan1_1970 ) );
}
}
if ( $event->{parsetime} ) {
my $ptime = $event->{parsetime};
if ($ptime =~ /([012]?\d)[.:](\d\d)/ ) {
$event->{datetime} .= ' ' . sprintf( '%02d', $1 ) . ':' . $2 ;
} elsif ( $ptime =~ /([012]?\d)\s*Uhr/ ) {
$event->{datetime} .= ' ' . sprintf( '%02d', $1 ) . ':00' ;
}
}
if ( !$event->{datetime} ) {
return event_error( -4, "EVENT datetime IS MISSING", $event );
}
if ( $event->{datetime} !~ /^\d{4}-\d{2}-\d\d(\s\d\d:\d\d)?/ ) {
return event_error( -3,"WRONG DATE FORMAT ON 'datetime'", $event );
}
}
if ( !$event->{type} ) {
$event->{type} = 'theater' if ($event->{title} =~ /theater/i );
$event->{type} = 'konzert' if ( (!$event->{type}) && ($event->{title} =~ /konzert/i ) );
$event->{type} = 'disco' if ( (!$event->{type}) && ($event->{title} =~ /disco/i ) );
$event->{type} = 'party' if ( (!$event->{type}) && ($event->{title} =~ /party/i ) );
$event->{type} = $event->{default_type} unless $event->{type};
return event_error( -2, "EVENT TYPE IS MISSING", $event ) unless ( $event->{type} );
}
if ( !( $event->{link} || $event->{description} ) ) {
return event_error( -1,"EVENT NEEDS LINK OR DESCRIPTION", $event );
}
my $result = 1;
# my $sth = $dbh->prepare( 'SELECT event_id FROM event WHERE event_md5 = ?' );
# $sth->execute( $event->{md5} );
# my $row = $sth->fetchrow_hashref();
# $result = 2 if( $row->{event_id});
my $itype = ($result == 2) ? "updated" : "new";
$IMP_STATS->{$itype} ++;
log_debug( "==> event", $event->{type}, "|", $event->{datetime}, "-", $event->{enddate}, "|", $event->{title} );
# $sth = $dbh->prepare( 'INSERT INTO event( location_id, location_source_id, event_title, event_datetime, event_enddate, event_md5, event_link, event_description, event_image, event_type )
# values( ?,?,?, ?, ?,?, ?,?,?, ? )
# on duplicate key update event_title = values( event_title), event_link = values( event_link), event_enddate = values( event_enddate ),
# event_datetime = values( event_datetime),event_description = values( event_description),
# location_source_id = values( location_source_id ), last_modified = now()
#
# ' );
# $sth->execute( $location_id, $IMP_SOURCE, $event->{title},$event->{datetime}, $event->{enddate}, $event->{md5}, $event->{link}, $event->{description}, $event->{image}, $event->{type} );
# my $event_schema_org = {
# "\@context" => "http://schema.org",
# "\@type" => "Event",
# "name" => $event->{title},
# "description" => $event->{description},
# "location" => {
# "\@type" => "Place",
# "address" => {
# "\@type" => "PostalAddress",
# "streetAddress" => $event->{location}->{streetAddress},
# "addressLocality" => $event->{location}->{addressLocality},
# "postalCode" => $event->{location}->{postalCode}
# },
# "geo" => {
# "\@type" => "GeoCoordinates",
# "latitude" => $event->{location}->{latitude},
# "longitude" => $event->{location}->{longitude}
# }
# },
# "startDate" => $event->{datetime},
# "url" => $event->{link},
# "x_image" => $event->{image},
# "x_type" => $event->{type},
# "x_location" => $event->{location},
# "geo_point2" => ($event->{location}->{longitude}, $event->{location}->{latitude}),
# "geo_point3" => $event->{location}->{longitude}+", "+$event->{location}->{latitude}
# "geo_point2" => [$event->{location}->{longitude}, $event->{location}->{latitude}]
# };
# if ( defined $event->{location}->{latitude} ) {
# $event_schema_org->{geo_point3} => {
# "lat" => $event->{location}->{latitude},
# "lon" => $event->{location}->{longitude}
# }
# };
my $json = encode_json( $event );
print STDOUT $json . ",\n";
return $result;
}
sub event_error {
my $err = shift;
my $message = shift;
my $event = shift;
log_error( $message, $event );
$IMP_STATS->{errors}->{$message}++;
return $err;
}
sub cleanup {
my $content = shift;
$content =~ s|<br[^>]*>|§~~§|g;
$content =~ s|\n|§~~§|g;
if ( $content ) {
$content =~ s|<.+?>| |g;
$content =~ s/\s+/ /;
$content =~ s/^\s+//;
$content =~ s/\s+$//;
}
$content =~ s|https?://[^\s"']+| |gi;
$content =~ s|(?:§~~§\s*)+|\n|g;
return $content;
}
1;
| codeformuenster/open-events | scripts/scrapers-muenster/lib/MsEvent.pm | Perl | mit | 7,083 |
package MT::Plugin::FastestPreview::L10N::ja;
use strict;
use warnings;
use base 'MT::Plugin::FastestPreview::L10N';
use vars qw( %Lexicon );
%Lexicon = (
);
1;
| usualoma/mt-plugin-FastestPreview | plugins/FastestPreview/lib/MT/Plugin/FastestPreview/L10N/ja.pm | Perl | mit | 165 |
#
# Copyright 2021 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package os::linux::local::mode::storage;
use base qw(centreon::plugins::templates::counter);
use strict;
use warnings;
sub custom_usage_perfdata {
my ($self, %options) = @_;
my $label = 'used';
my $value_perf = $self->{result_values}->{used};
if (defined($self->{instance_mode}->{option_results}->{free})) {
$label = 'free';
$value_perf = $self->{result_values}->{free};
}
my %total_options = ();
if ($self->{instance_mode}->{option_results}->{units} eq '%') {
$total_options{total} = $self->{result_values}->{total};
$total_options{cast_int} = 1;
}
$self->{output}->perfdata_add(
label => $label, unit => 'B',
instances => $self->use_instances(extra_instance => $options{extra_instance}) ? $self->{result_values}->{display} : undef,
value => $value_perf,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning-' . $self->{thlabel}, %total_options),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical-' . $self->{thlabel}, %total_options),
min => 0, max => $self->{result_values}->{total}
);
}
sub custom_usage_threshold {
my ($self, %options) = @_;
my ($exit, $threshold_value);
$threshold_value = $self->{result_values}->{used};
$threshold_value = $self->{result_values}->{free} if (defined($self->{instance_mode}->{option_results}->{free}));
if ($self->{instance_mode}->{option_results}->{units} eq '%') {
$threshold_value = $self->{result_values}->{prct_used};
$threshold_value = $self->{result_values}->{prct_free} if (defined($self->{instance_mode}->{option_results}->{free}));
}
$exit = $self->{perfdata}->threshold_check(value => $threshold_value, threshold => [ { label => 'critical-' . $self->{thlabel}, exit_litteral => 'critical' }, { label => 'warning-'. $self->{thlabel}, exit_litteral => 'warning' } ]);
return $exit;
}
sub custom_usage_output {
my ($self, %options) = @_;
my ($total_size_value, $total_size_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{total});
my ($total_used_value, $total_used_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{used});
my ($total_free_value, $total_free_unit) = $self->{perfdata}->change_bytes(value => $self->{result_values}->{free});
return sprintf(
'Usage Total: %s Used: %s (%.2f%%) Free: %s (%.2f%%)',
$total_size_value . " " . $total_size_unit,
$total_used_value . " " . $total_used_unit, $self->{result_values}->{prct_used},
$total_free_value . " " . $total_free_unit, $self->{result_values}->{prct_free}
);
}
sub custom_usage_calc {
my ($self, %options) = @_;
if ($options{new_datas}->{$self->{instance} . '_total'} == 0) {
$self->{error_msg} = "total size is 0";
return -2;
}
$self->{result_values}->{display} = $options{new_datas}->{$self->{instance} . '_display'};
$self->{result_values}->{total} = $options{new_datas}->{$self->{instance} . '_total'};
$self->{result_values}->{used} = $options{new_datas}->{$self->{instance} . '_used'};
$self->{result_values}->{free} = $options{new_datas}->{$self->{instance} . '_free'};
$self->{result_values}->{prct_used} = $self->{result_values}->{used} * 100 / ($self->{result_values}->{used} + $self->{result_values}->{free});
$self->{result_values}->{prct_free} = 100 - $self->{result_values}->{prct_used};
return 0;
}
sub set_counters {
my ($self, %options) = @_;
$self->{maps_counters_type} = [
{ name => 'disks', type => 1, cb_prefix_output => 'prefix_disks_output', message_multiple => 'All storages are ok' }
];
$self->{maps_counters}->{disks} = [
{ label => 'usage', set => {
key_values => [ { name => 'display' }, { name => 'used' }, { name => 'free' }, { name => 'total' } ],
closure_custom_calc => $self->can('custom_usage_calc'),
closure_custom_output => $self->can('custom_usage_output'),
closure_custom_perfdata => $self->can('custom_usage_perfdata'),
closure_custom_threshold_check => $self->can('custom_usage_threshold')
}
}
];
}
sub prefix_disks_output {
my ($self, %options) = @_;
return "Storage '" . $options{instance_value}->{display} . "' ";
}
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments => {
'filter-type:s' => { name => 'filter_type', },
'filter-fs:s' => { name => 'filter_fs', },
'filter-mountpoint:s' => { name => 'filter_mountpoint' },
'units:s' => { name => 'units', default => '%' },
'free' => { name => 'free' }
});
return $self;
}
sub manage_selection {
my ($self, %options) = @_;
my ($stdout, $exit_code) = $options{custom}->execute_command(
command => 'df',
command_options => '-P -k -T 2>&1',
no_quit => 1
);
$self->{disks} = {};
my @lines = split /\n/, $stdout;
foreach my $line (@lines) {
next if ($line !~ /^(\S+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(.*)/);
my ($fs, $type, $size, $used, $available, $percent, $mount) = ($1, $2, $3, $4, $5, $6, $7);
next if (defined($self->{option_results}->{filter_fs}) && $self->{option_results}->{filter_fs} ne '' &&
$fs !~ /$self->{option_results}->{filter_fs}/);
next if (defined($self->{option_results}->{filter_type}) && $self->{option_results}->{filter_type} ne '' &&
$type !~ /$self->{option_results}->{filter_type}/);
next if (defined($self->{option_results}->{filter_mountpoint}) && $self->{option_results}->{filter_mountpoint} ne '' &&
$mount !~ /$self->{option_results}->{filter_mountpoint}/);
$self->{disks}->{$mount} = {
display => $mount,
fs => $fs,
type => $type,
total => $size * 1024,
used => $used * 1024,
free => $available * 1024
};
}
if (scalar(keys %{$self->{disks}}) <= 0) {
if ($exit_code != 0) {
$self->{output}->output_add(long_msg => "command output:" . $stdout);
}
$self->{output}->add_option_msg(short_msg => "No storage found (filters or command issue)");
$self->{output}->option_exit();
}
}
1;
__END__
=head1 MODE
Check storage usages.
Command used: df -P -k -T 2>&1
=over 8
=item B<--warning-usage>
Threshold warning.
=item B<--critical-usage>
Threshold critical.
=item B<--units>
Units of thresholds (Default: '%') ('%', 'B').
=item B<--free>
Thresholds are on free space left.
=item B<--filter-mountpoint>
Filter filesystem mount point (regexp can be used).
=item B<--filter-type>
Filter filesystem type (regexp can be used).
=item B<--filter-fs>
Filter filesystem (regexp can be used).
=back
=cut
| Tpo76/centreon-plugins | os/linux/local/mode/storage.pm | Perl | apache-2.0 | 7,843 |
#
# Copyright 2019 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::postgres::mode::backends;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$options{options}->add_options(arguments =>
{
"warning:s" => { name => 'warning', },
"critical:s" => { name => 'critical', },
"exclude:s" => { name => 'exclude', },
"noidle" => { name => 'noidle', },
});
return $self;
}
sub check_options {
my ($self, %options) = @_;
$self->SUPER::init(%options);
if (($self->{perfdata}->threshold_validate(label => 'warning', value => $self->{option_results}->{warning})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong warning threshold '" . $self->{option_results}->{warning} . "'.");
$self->{output}->option_exit();
}
if (($self->{perfdata}->threshold_validate(label => 'critical', value => $self->{option_results}->{critical})) == 0) {
$self->{output}->add_option_msg(short_msg => "Wrong critical threshold '" . $self->{option_results}->{critical} . "'.");
$self->{output}->option_exit();
}
}
sub run {
my ($self, %options) = @_;
# $options{sql} = sqlmode object
$self->{sql} = $options{sql};
$self->{sql}->connect();
my $noidle = '';
if (defined($self->{option_results}->{noidle})) {
if ($self->{sql}->is_version_minimum(version => '9.2')) {
$noidle = " AND state <> 'idle'";
} else {
$noidle = " AND current_query <> '<IDLE>'";
}
}
my $query = "SELECT COUNT(datid) AS current,
(SELECT setting AS mc FROM pg_settings WHERE name = 'max_connections') AS mc,
d.datname
FROM pg_database d
LEFT JOIN pg_stat_activity s ON (s.datid = d.oid $noidle)
GROUP BY d.datname
ORDER BY d.datname";
$self->{sql}->query(query => $query);
$self->{output}->output_add(severity => 'OK',
short_msg => "All client database connections are ok.");
my $database_check = 0;
my $result = $self->{sql}->fetchall_arrayref();
foreach my $row (@{$result}) {
if (defined($self->{option_results}->{exclude}) && $$row[2] !~ /$self->{option_results}->{exclude}/) {
$self->{output}->output_add(long_msg => "Skipping database '" . $$row[2] . '"');
next;
}
$database_check++;
my $used = $$row[0];
my $max_connections = $$row[1];
my $database_name = $$row[2];
my $prct_used = ($used * 100) / $max_connections;
my $exit_code = $self->{perfdata}->threshold_check(value => $prct_used, threshold => [ { label => 'critical', 'exit_litteral' => 'critical' }, { label => 'warning', exit_litteral => 'warning' } ]);
$self->{output}->output_add(long_msg => sprintf("Database '%s': %.2f%% client connections limit reached (%d of max. %d)",
$database_name, $prct_used, $used, $max_connections));
if (!$self->{output}->is_status(value => $exit_code, compare => 'ok', litteral => 1)) {
$self->{output}->output_add(severity => $exit_code,
short_msg => sprintf("Database '%s': %.2f%% client connections limit reached (%d of max. %d)",
$database_name, $prct_used, $used, $max_connections));
}
$self->{output}->perfdata_add(label => 'connections_' . $database_name,
value => $used,
warning => $self->{perfdata}->get_perfdata_for_output(label => 'warning', total => $max_connections, cast_int => 1),
critical => $self->{perfdata}->get_perfdata_for_output(label => 'critical', total => $max_connections, cast_int => 1),
min => 0, max => $max_connections);
}
if ($database_check == 0) {
$self->{output}->output_add(severity => 'UNKNOWN',
short_msg => 'No database checked. (permission or a wrong exclude filter)');
}
$self->{output}->display();
$self->{output}->exit();
}
1;
__END__
=head1 MODE
Check the current number of connections for one or more databases
=over 8
=item B<--warning>
Threshold warning in percent.
=item B<--critical>
Threshold critical in percent.
=item B<--exclude>
Filter databases.
=item B<--noidle>
Idle connections are not counted.
=back
=cut
| Sims24/centreon-plugins | database/postgres/mode/backends.pm | Perl | apache-2.0 | 5,593 |
#
# Copyright 2015 Electric Cloud, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# -------------------------------------------------------------------------
# Includes
# -------------------------------------------------------------------------
use Cwd;
use Carp;
use strict;
use Data::Dumper;
use utf8;
use Encode;
use warnings;
use diagnostics;
use open IO => ':encoding(utf8)';
use File::Basename;
use ElectricCommander;
use ElectricCommander::PropDB;
use ElectricCommander::PropMod qw(/myProject/libs);
use ChefHelper;
$| = 1;
# -------------------------------------------------------------------------
# Main functions
# -------------------------------------------------------------------------
###########################################################################
=head2 main
Title : main
Usage : main();
Function : Performs a Chef run
Returns : none
Args : named arguments: none
=cut
###########################################################################
sub main {
my $ec = ElectricCommander->new();
$ec->abortOnError(0);
# -------------------------------------------------------------------------
# Parameters
# -------------------------------------------------------------------------
my $knife_path =
( $ec->getProperty("knife_path") )->findvalue('//value')->string_value;
my $role_name =
( $ec->getProperty("role_name") )->findvalue('//value')->string_value;
my $description =
( $ec->getProperty("description") )->findvalue('//value')->string_value;
my $additional_options =
( $ec->getProperty("additional_options") )->findvalue('//value')
->string_value;
$ec->abortOnError(1);
#Variable that stores the command to be executed
my $command = $knife_path . " role create";
my @cmd;
my %props;
#Prints procedure and parameters information
my $pluginKey = 'EC-Chef';
my $xpath = $ec->getPlugin($pluginKey);
my $pluginName = $xpath->findvalue('//pluginVersion')->value;
print "Using plugin $pluginKey version $pluginName\n";
print "Running procedure CreateRole\n";
#Parameters are checked to see which should be included
if ( $role_name && $role_name ne '' ) {
$command = $command . " " . $role_name;
}
if ( $description && $description ne '' ) {
$command = $command . " --description " . $description;
}
if ( $additional_options && $additional_options ne '' ) {
$command = $command . " " . $additional_options;
}
$command = $command . " -d";
#Print out the command to be executed
print "\nCommand to be executed: \n$command \n\n";
#Execute the command
system("$command");
# To get exit code of process shift right by 8
my $exitCode = $? >> 8;
# Set outcome
setOutcomeFromExitCode($ec, $exitCode);
}
main();
| electric-cloud/EC-Chef | src/main/resources/project/drivers/CreateRoleDriver.pl | Perl | apache-2.0 | 3,389 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package database::mssql::plugin;
use strict;
use warnings;
use base qw(centreon::plugins::script_sql);
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
$self->{version} = '0.1';
$self->{modes} = {
'backup-age' => 'database::mssql::mode::backupage',
'blocked-processes' => 'database::mssql::mode::blockedprocesses',
'cache-hitratio' => 'database::mssql::mode::cachehitratio',
'collection' => 'centreon::common::protocols::sql::mode::collection',
'connected-users' => 'database::mssql::mode::connectedusers',
'connection-time' => 'centreon::common::protocols::sql::mode::connectiontime',
'dead-locks' => 'database::mssql::mode::deadlocks',
'databases-size' => 'database::mssql::mode::databasessize',
'failed-jobs' => 'database::mssql::mode::failedjobs',
'list-databases' => 'database::mssql::mode::listdatabases',
'locks-waits' => 'database::mssql::mode::lockswaits',
'page-life-expectancy' => 'database::mssql::mode::pagelifeexpectancy',
'sql' => 'centreon::common::protocols::sql::mode::sql',
'sql-string' => 'centreon::common::protocols::sql::mode::sqlstring',
'tables' => 'database::mssql::mode::tables',
'transactions' => 'database::mssql::mode::transactions'
};
$self->{sql_modes}->{dbi} = 'database::mssql::dbi';
return $self;
}
sub init {
my ($self, %options) = @_;
$self->{options}->add_options(
arguments => {
'hostname:s@' => { name => 'hostname' },
'port:s@' => { name => 'port' },
'server:s@' => { name => 'server' },
'database:s' => { name => 'database' },
}
);
$self->{options}->parse_options();
my $options_result = $self->{options}->get_options();
$self->{options}->clean();
if (defined($options_result->{server})) {
@{$self->{sqldefault}->{dbi}} = ();
for (my $i = 0; $i < scalar(@{$options_result->{server}}); $i++) {
$self->{sqldefault}->{dbi}[$i] = { data_source => 'Sybase:server=' . $options_result->{server}[$i] };
if ((defined($options_result->{database})) && ($options_result->{database} ne '')) {
$self->{sqldefault}->{dbi}[$i]->{data_source} .= ';database=' . $options_result->{database};
}
}
}
if (defined($options_result->{hostname})) {
@{$self->{sqldefault}->{dbi}} = ();
for (my $i = 0; $i < scalar(@{$options_result->{hostname}}); $i++) {
$self->{sqldefault}->{dbi}[$i] = { data_source => 'Sybase:host=' . $options_result->{hostname}[$i] };
if (defined($options_result->{port}[$i])) {
$self->{sqldefault}->{dbi}[$i]->{data_source} .= ';port=' . $options_result->{port}[$i];
}
if ((defined($options_result->{database})) && ($options_result->{database} ne '')) {
$self->{sqldefault}->{dbi}[$i]->{data_source} .= ';database=' . $options_result->{database};
}
}
}
$self->SUPER::init(%options);
}
1;
__END__
=head1 PLUGIN DESCRIPTION
Check MSSQL Server.
=over 8
=item B<--hostname>
Hostname to query.
=item B<--port>
Database Server Port.
=item B<--server>
An alternative to hostname+port. <server> will be looked up in the file freetds.conf.
=item B<--database>
Select database .
=back
=cut
| centreon/centreon-plugins | database/mssql/plugin.pm | Perl | apache-2.0 | 4,406 |
#
# Copyright 2022 Centreon (http://www.centreon.com/)
#
# Centreon is a full-fledged industry-strength solution that meets
# the needs in IT infrastructure and application monitoring for
# service performance.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
package apps::monitoring::loggly::restapi::custom::api;
use base qw(centreon::plugins::mode);
use strict;
use warnings;
use centreon::plugins::http;
use JSON::XS;
sub new {
my ($class, %options) = @_;
my $self = $class->SUPER::new(package => __PACKAGE__, %options);
bless $self, $class;
if (!defined($options{output})) {
print "Class Custom: Need to specify 'output' argument.\n";
exit 3;
}
if (!defined($options{options})) {
$options{output}->add_option_msg(short_msg => "Class Custom: Need to specify 'options' argument.");
$options{output}->option_exit();
}
if (!defined($options{noptions})) {
$options{options}->add_options(arguments => {
'hostname:s' => { name => 'hostname' },
'port:s' => { name => 'port'},
'proto:s' => { name => 'proto' },
'api-password:s' => { name => 'api_password' },
'timeout:s' => { name => 'timeout', default => 30 },
'unknown-http-status:s' => { name => 'unknown_http_status' },
'warning-http-status:s' => { name => 'warning_http_status' },
'critical-http-status:s' => { name => 'critical_http_status' }
});
}
$options{options}->add_help(package => __PACKAGE__, sections => 'REST API OPTIONS', once => 1);
$self->{output} = $options{output};
$self->{http} = centreon::plugins::http->new(%options);
return $self;
}
sub set_options {
my ($self, %options) = @_;
$self->{option_results} = $options{option_results};
}
sub set_defaults {}
sub check_options {
my ($self, %options) = @_;
$self->{hostname} = (defined($self->{option_results}->{hostname})) ? $self->{option_results}->{hostname} : '';
$self->{port} = (defined($self->{option_results}->{port})) ? $self->{option_results}->{port} : 443;
$self->{proto} = (defined($self->{option_results}->{proto})) ? $self->{option_results}->{proto} : 'https';
$self->{timeout} = (defined($self->{option_results}->{timeout})) ? $self->{option_results}->{timeout} : 30;
$self->{ssl_opt} = (defined($self->{option_results}->{ssl_opt})) ? $self->{option_results}->{ssl_opt} : undef;
$self->{api_password} = (defined($self->{option_results}->{api_password})) ? $self->{option_results}->{api_password} : '';
$self->{unknown_http_status} = (defined($self->{option_results}->{unknown_http_status})) ? $self->{option_results}->{unknown_http_status} : '%{http_code} < 200 or %{http_code} >= 300';
$self->{warning_http_status} = (defined($self->{option_results}->{warning_http_status})) ? $self->{option_results}->{warning_http_status} : '';
$self->{critical_http_status} = (defined($self->{option_results}->{critical_http_status})) ? $self->{option_results}->{critical_http_status} : '';
if ($self->{hostname} eq '') {
$self->{output}->add_option_msg(short_msg => 'Need to specify --hostname option.');
$self->{output}->option_exit();
}
if ($self->{api_password} eq '') {
$self->{output}->add_option_msg(short_msg => 'Need to specify --api-password option.');
$self->{output}->option_exit();
}
return 0;
}
sub build_options_for_httplib {
my ($self, %options) = @_;
$self->{option_results}->{hostname} = $self->{hostname};
$self->{option_results}->{port} = $self->{port};
$self->{option_results}->{proto} = $self->{proto};
$self->{option_results}->{ssl_opt} = $self->{ssl_opt};
$self->{option_results}->{timeout} = $self->{timeout};
}
sub settings {
my ($self, %options) = @_;
$self->build_options_for_httplib();
$self->{http}->add_header(key => 'Content-Type', value => 'application/json;charset=UTF-8');
$self->{http}->add_header(key => 'Authorization', value => 'bearer ' . $self->{option_results}->{api_password});
$self->{http}->set_options(%{$self->{option_results}});
}
sub request_api {
my ($self, %options) = @_;
$self->settings();
my $content = $self->{http}->request(
%options,
unknown_status => $self->{unknown_http_status},
warning_status => $self->{warning_http_status},
critical_status => $self->{critical_http_status}
);
my $decoded;
eval {
$decoded = JSON::XS->new->utf8->decode($content);
};
if ($@) {
$self->{output}->add_option_msg(short_msg => "Cannot decode json response: $@");
$self->{output}->option_exit();
}
if (!defined($decoded)) {
$self->{output}->add_option_msg(short_msg => "Error while retrieving data (add --debug option for detailed message)");
$self->{output}->option_exit();
}
return $decoded;
}
sub internal_search {
my ($self, %options) = @_;
my $status = $self->request_api(
method => 'GET',
url_path => '/apiv2/search',
get_param => [
'size=1',
'from=-' . $options{time_period} . 'm',
'q=' . $options{query}
]
);
return $status->{rsid}->{id};
}
sub internal_events {
my ($self, %options) = @_;
my $status = $self->request_api(
method => 'GET',
url_path => '/apiv2/events',
get_param => ['rsid=' . $options{id}]
);
return $status;
}
sub api_events {
my ($self, %options) = @_;
my $id = $self->internal_search(
time_period => $options{time_period},
query => $options{query}
);
my $status = $self->internal_events(id => $id);
# Get a proper output message
my $message = '';
if (length($options{output_field}) && scalar($status->{events}) && defined($status->{events}->[0]->{event})) {
$message = $status->{events}->[0]->{event};
for (split /\./, $options{output_field}) {
if (defined($message->{$_})) {
$message = $message->{$_};
} else {
$message = '';
last;
}
}
}
# Message may be messed-up with wrongly encoded characters, let's force some cleanup
$message =~ s/[\r\n]//g;
$message =~ s/^\s+|\s+$//g;
# Clean returned hash
$status->{message} = $message;
delete $status->{events};
delete $status->{page};
return $status;
}
sub internal_fields {
my ($self, %options) = @_;
# 300 limitation comes from the API : https://documentation.solarwinds.com/en/Success_Center/loggly/Content/admin/api-retrieving-data.htm
my $status = $self->request_api(
method => 'GET',
url_path => '/apiv2/fields/' . $options{field} . '/',
get_param => [
'facet_size=300',
'from=-' . $options{time_period} . 'm',
'q=' . $options{query}
]
);
return $status;
}
sub api_fields {
my ($self, %options) = @_;
my $status = $self->internal_fields(
time_period => $options{time_period},
field => $options{field},
query => $options{query}
);
# Fields may be messed-up with wrongly encoded characters, let's force some cleanup
for (my $i = 0; $i < scalar(@{$status->{ $options{field} }}); $i++) {
$status->{ $options{field} }->[$i]->{term} =~ s/[\r\n]//g;
$status->{ $options{field} }->[$i]->{term} =~ s/^\s+|\s+$//g;
}
return $status;
}
1;
__END__
=head1 NAME
Loggly Rest API
=head1 REST API OPTIONS
=over 8
=item B<--hostname>
Set hostname of the Loggly server (<subdomain>.loggly.com).
=item B<--port>
Set Loggly Port (Default: '443').
=item B<--proto>
Specify http if needed (Default: 'https').
=item B<--api-password>
Set Loggly API token.
=item B<--timeout>
Threshold for HTTP timeout (Default: '30').
=back
=cut
| centreon/centreon-plugins | apps/monitoring/loggly/restapi/custom/api.pm | Perl | apache-2.0 | 8,494 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.