code stringlengths 3 1.01M | repo_name stringlengths 5 116 | path stringlengths 3 311 | language stringclasses 30 values | license stringclasses 15 values | size int64 3 1.01M |
|---|---|---|---|---|---|
package net.iharding.modules.meta.dao.impl;
import net.iharding.modules.meta.dao.OwnerDao;
import net.iharding.modules.meta.model.Owner;
import org.guess.core.orm.hibernate.HibernateDao;
import org.guess.sys.model.User;
import org.springframework.stereotype.Repository;
/**
*
* @ClassName: Owner
* @Description: OwnerdaoImpl
* @author Joe.zhang
* @date 2016-5-18 14:17:31
*
*/
@Repository
public class OwnerDaoImpl extends HibernateDao<Owner,Long> implements OwnerDao {
@Override
public Owner getOwner(User user, Integer objectType, Long objectId) {
return this.findUnique(" from Owner where user=? and objectType=? and objectId=? ", user,objectType,objectId);
}
@Override
public long getOwnerNum(int objectType, Long objectId) {
return this.countHqlResult("select count(1) from Owner where objectType=? and objectId=? ", objectType,objectId);
}
}
| joezxh/DATAX-UI | eshbase-commons/src/main/java/net/iharding/modules/meta/dao/impl/OwnerDaoImpl.java | Java | gpl-2.0 | 867 |
<?php
// Render the WP_ContentProtect GUI on the edit post/page/custom page.
// Plugin is in protected state?
if( $post -> post_status == 'publish' && $meta_protected_until > current_time('timestamp') ){
echo '<div class="WPCP_row">',
$this->minVer('3.8')? '<i class="dashicons dashicons-lock"></i>':NULL,
'<p>',
sprintf( __('Protected by %s for %s until %s.', 'WP_ContentProtect_Textdomain'),
'<b>'.$meta_wpcp['last_edited_by'].'</b>',
'<b>'.human_time_diff($meta_protected_until, current_time('timestamp')).'</b>',
'<b>'.date($this -> settings['time_format'], $meta_protected_until).'</b>'
),
'</p>',
'</div>';
include 'protect_alter_form.php';
}else if( $post -> post_status == 'publish' && $meta_protected_until && $meta_protected_until < current_time('timestamp') ){
echo '<div class="WPCP_row">',
$this->minVer('3.8')? '<i class="dashicons dashicons-info"></i>':NULL,
'<p>',
sprintf( __('Content is publicly available since %s', 'WP_ContentProtect_Textdomain'),
'<b>'.date($this -> settings['time_format'], $meta_protected_until).'</b><em>'
),
'</p>',
'</div>';
include 'protect_again_form.php';
}else{
include 'protect_setup_form.php';
}
| ColorfullyMe/wp-content-protect | admin/control_metabox.php | PHP | gpl-2.0 | 1,427 |
/*
* Copyright (C) 2000-2002 the xine project
*
* This file is part of xine, a free video player.
*
* xine is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* xine is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA
*
*
* John McCutchan
* FLAC demuxer (http://flac.sf.net)
*
* TODO: Skip id3v2 tags.
*
*/
#ifdef HAVE_CONFIG_H
#include "config.h"
#endif
#include <stdio.h>
#include <fcntl.h>
#include <unistd.h>
#include <sched.h>
#include <string.h>
#include <stdlib.h>
#include <FLAC/seekable_stream_decoder.h>
#include "xine_internal.h"
#include "xineutils.h"
#include "../demuxers/demux.h"
/*
#define LOG 1
*/
#include "demux_flac.h"
/* FLAC Demuxer plugin */
typedef struct demux_flac_s {
demux_plugin_t demux_plugin;
xine_stream_t *stream;
fifo_buffer_t *audio_fifo;
fifo_buffer_t *video_fifo;
input_plugin_t *input;
int status;
int seek_flag;
off_t data_start;
off_t data_size;
/* FLAC Stuff */
FLAC__SeekableStreamDecoder *flac_decoder;
uint64_t total_samples;
uint64_t bits_per_sample;
uint64_t channels;
uint64_t sample_rate;
uint64_t length_in_msec;
} demux_flac_t ;
/* FLAC Demuxer class */
typedef struct demux_flac_class_s {
demux_class_t demux_class;
xine_t *xine;
config_values_t *config;
} demux_flac_class_t;
/* FLAC Callbacks */
static FLAC__SeekableStreamDecoderReadStatus
flac_read_callback (const FLAC__SeekableStreamDecoder *decoder,
FLAC__byte buffer[],
unsigned *bytes,
void *client_data)
{
demux_flac_t *this = (demux_flac_t *)client_data;
input_plugin_t *input = this->input;
off_t offset = *bytes;
#ifdef LOG
printf("demux_flac: flac_read_callback\n");
#endif
/* This should only be called when flac is reading the metadata
* of the flac stream.
*/
offset = input->read (input, buffer, offset);
#ifdef LOG
printf("demux_flac: Read %lld / %u bytes into buffer\n", offset, *bytes);
#endif
*bytes = offset;
/* This is the way to detect EOF with xine input plugins */
if ( (offset != *bytes) && (*bytes != 0) )
{
#ifdef LOG
printf("demux_flac: Marking EOF\n");
#endif
this->status = DEMUX_FINISHED;
return FLAC__SEEKABLE_STREAM_DECODER_READ_STATUS_ERROR;
}
else
{
#ifdef LOG
printf("demux_flac: Read was perfect\n");
#endif
return FLAC__SEEKABLE_STREAM_DECODER_READ_STATUS_OK;
}
}
static FLAC__SeekableStreamDecoderSeekStatus
flac_seek_callback (const FLAC__SeekableStreamDecoder *decoder,
FLAC__uint64 absolute_byte_offset,
void *client_data)
{
input_plugin_t *input = ((demux_flac_t *)client_data)->input;
off_t offset;
#ifdef LOG
printf("demux_flac: flac_seek_callback\n");
#endif
offset = input->seek (input, absolute_byte_offset, SEEK_SET);
if (offset == -1)
return FLAC__SEEKABLE_STREAM_DECODER_SEEK_STATUS_ERROR;
else
return FLAC__SEEKABLE_STREAM_DECODER_SEEK_STATUS_OK;
}
static FLAC__SeekableStreamDecoderTellStatus
flac_tell_callback (const FLAC__SeekableStreamDecoder *decoder,
FLAC__uint64 *absolute_byte_offset,
void *client_data)
{
input_plugin_t *input = ((demux_flac_t *)client_data)->input;
off_t offset;
#ifdef LOG
printf("demux_flac: flac_tell_callback\n");
#endif
offset = input->get_current_pos (input);
*absolute_byte_offset = offset;
return FLAC__SEEKABLE_STREAM_DECODER_TELL_STATUS_OK;
}
static FLAC__SeekableStreamDecoderLengthStatus
flac_length_callback (const FLAC__SeekableStreamDecoder *decoder,
FLAC__uint64 *stream_length,
void *client_data)
{
input_plugin_t *input = ((demux_flac_t *)client_data)->input;
off_t offset;
#ifdef LOG
printf("demux_flac: flac_length_callback\n");
#endif
offset = input->get_length (input);
/* FIXME, can flac handle -1 as offset ? */
return FLAC__SEEKABLE_STREAM_DECODER_LENGTH_STATUS_OK;
}
static FLAC__bool
flac_eof_callback (const FLAC__SeekableStreamDecoder *decoder,
void *client_data)
{
demux_flac_t *this = (demux_flac_t *)client_data;
#ifdef LOG
printf("demux_flac: flac_eof_callback\n");
#endif
if (this->status == DEMUX_FINISHED)
{
#ifdef LOG
printf("demux_flac: flac_eof_callback: True!\n");
#endif
return true;
}
else
{
#ifdef LOG
printf("demux_flac: flac_eof_callback: False!\n");
#endif
return false;
}
}
static FLAC__StreamDecoderWriteStatus
flac_write_callback (const FLAC__SeekableStreamDecoder *decoder,
const FLAC__Frame *frame,
const FLAC__int32 * const buffer[],
void *client_data)
{
/* This should never be called, all we use flac for in this demuxer
* is seeking. We do the decoding in the decoder
*/
#ifdef LOG
printf("demux_flac: Error: Write callback was called!\n");
#endif
return FLAC__STREAM_DECODER_WRITE_STATUS_ABORT;
}
static void
flac_metadata_callback (const FLAC__SeekableStreamDecoder *decoder,
const FLAC__StreamMetadata *metadata,
void *client_data)
{
demux_flac_t *this = (demux_flac_t *)client_data;
#ifdef LOG
printf("demux_flac: IN: Metadata callback\n");
#endif
/* This should be called when we first look at a flac stream,
* We get information about the stream here.
*/
if (metadata->type == FLAC__METADATA_TYPE_STREAMINFO) {
#ifdef LOG
printf("demux_flac: Got METADATA!\n");
#endif
this->total_samples = metadata->data.stream_info.total_samples;
this->bits_per_sample = metadata->data.stream_info.bits_per_sample;
this->channels = metadata->data.stream_info.channels;
this->sample_rate = metadata->data.stream_info.sample_rate;
this->length_in_msec = (this->total_samples * 10 /
(this->sample_rate / 100))/1000;
}
return;
}
static void
flac_error_callback (const FLAC__SeekableStreamDecoder *decoder,
FLAC__StreamDecoderErrorStatus status,
void *client_data)
{
/* This will be called if there is an error when flac is seeking
* in the stream.
*/
demux_flac_t *this = (demux_flac_t *)client_data;
printf("demux_flac: flac_error_callback\n");
if (status == FLAC__STREAM_DECODER_ERROR_STATUS_LOST_SYNC)
printf("demux_flac: Decoder lost synchronization.\n");
else if (status == FLAC__STREAM_DECODER_ERROR_STATUS_BAD_HEADER)
printf("demux_flac: Decoder encounted a corrupted frame header.\n");
else if (status == FLAC__STREAM_DECODER_ERROR_STATUS_FRAME_CRC_MISMATCH)
printf("demux_flac: Frame's data did not match the CRC in the footer.\n");
else
printf("demux_flac: unknown error.\n");
this->status = DEMUX_FINISHED;
return;
}
/* FLAC Demuxer plugin */
static int
demux_flac_send_chunk (demux_plugin_t *this_gen) {
demux_flac_t *this = (demux_flac_t *) this_gen;
buf_element_t *buf = NULL;
off_t current_file_pos;
int64_t current_pts;
unsigned int remaining_sample_bytes = 0;
remaining_sample_bytes = 2048;
current_file_pos = this->input->get_current_pos (this->input)
- this->data_start;
current_pts = current_file_pos;
current_pts *= 90000;
if (this->sample_rate != 0)
{
current_pts /= this->sample_rate;
}
if (this->seek_flag) {
xine_demux_control_newpts (this->stream, current_pts, 0);
this->seek_flag = 0;
}
while (remaining_sample_bytes)
{
if(!this->audio_fifo) {
this->status = DEMUX_FINISHED;
break;
}
buf = this->audio_fifo->buffer_pool_alloc (this->audio_fifo);
buf->type = BUF_AUDIO_FLAC;
buf->extra_info->input_pos = current_file_pos;
buf->extra_info->input_length = this->data_size;
buf->extra_info->input_time = current_pts / 90;
//buf->pts = current_pts;
if (remaining_sample_bytes > buf->max_size)
buf->size = buf->max_size;
else
buf->size = remaining_sample_bytes;
remaining_sample_bytes -= buf->size;
if (this->input->read (this->input,buf->content,buf->size)!=buf->size) {
#ifdef LOG
printf("demux_flac: buf->size != input->read()\n");
#endif
buf->free_buffer (buf);
this->status = DEMUX_FINISHED;
break;
}
/*
if (!remaining_sample_bytes)
{
buf->decoder_flags |= BUF_FLAG_FRAME_END;
}*/
this->audio_fifo->put (this->audio_fifo, buf);
}
return this->status;
}
static void
demux_flac_send_headers (demux_plugin_t *this_gen) {
demux_flac_t *this = (demux_flac_t *) this_gen;
buf_element_t *buf;
#ifdef LOG
printf("demux_flac: demux_flac_send_headers\n");
#endif
this->video_fifo = this->stream->video_fifo;
this->audio_fifo = this->stream->audio_fifo;
this->status = DEMUX_OK;
this->stream->stream_info[XINE_STREAM_INFO_HAS_VIDEO] = 0;
this->stream->stream_info[XINE_STREAM_INFO_HAS_AUDIO] = 1;
this->stream->stream_info[XINE_STREAM_INFO_AUDIO_CHANNELS] = this->channels;
this->stream->stream_info[XINE_STREAM_INFO_AUDIO_SAMPLERATE] = this->sample_rate;
this->stream->stream_info[XINE_STREAM_INFO_AUDIO_BITS] = this->bits_per_sample;
xine_demux_control_start (this->stream);
if (this->audio_fifo) {
buf = this->audio_fifo->buffer_pool_alloc (this->audio_fifo);
buf->type = BUF_AUDIO_FLAC;
buf->decoder_flags = BUF_FLAG_HEADER;
buf->decoder_info[0] = 0;
buf->decoder_info[1] = this->sample_rate;
buf->decoder_info[2] = this->bits_per_sample;
buf->decoder_info[3] = this->channels;
buf->size = 0;
this->audio_fifo->put (this->audio_fifo, buf);
}
}
static void
demux_flac_dispose (demux_plugin_t *this_gen) {
demux_flac_t *this = (demux_flac_t *) this_gen;
#ifdef LOG
printf("demux_flac: demux_flac_dispose\n");
#endif
if (this->flac_decoder)
FLAC__seekable_stream_decoder_delete (this->flac_decoder);
free(this);
return;
}
static int
demux_flac_get_status (demux_plugin_t *this_gen) {
demux_flac_t *this = (demux_flac_t *) this_gen;
#ifdef LOG
printf("demux_flac: demux_flac_get_status\n");
#endif
return this->status;
}
static int
demux_flac_seek (demux_plugin_t *this_gen, off_t start_pos, int start_time) {
demux_flac_t *this = (demux_flac_t *) this_gen;
#ifdef LOG
printf("demux_flac: demux_flac_seek\n");
#endif
if (start_pos || !start_time) {
this->input->seek (this->input, start_pos, SEEK_SET);
#ifdef LOG
printf ("Seek to position: %lld\n", start_pos);
#endif
} else {
double distance = (double)start_time;
uint64_t target_sample = (uint64_t)(distance * this->total_samples);
FLAC__bool s = false;
if (this->length_in_msec != 0)
{
distance /= (double)this->length_in_msec;
}
s = FLAC__seekable_stream_decoder_seek_absolute (this->flac_decoder,
target_sample);
if (s) {
#ifdef LOG
printf ("Seek to: %d successfull!\n", start_time);
#endif
} else
this->status = DEMUX_FINISHED;
}
xine_demux_flush_engine (this->stream);
return this->status;
}
static int
demux_flac_get_stream_length (demux_plugin_t *this_gen) {
demux_flac_t *this = (demux_flac_t *) this_gen;
#ifdef LOG
printf("demux_flac: demux_flac_get_stream_length\n");
#endif
if (this->flac_decoder)
return this->length_in_msec;
else
return 0;
}
static uint32_t
demux_flac_get_capabilities (demux_plugin_t *this_gen) {
#ifdef LOG
printf("demux_flac: demux_flac_get_capabilities\n");
#endif
return DEMUX_CAP_NOCAP;
}
static int
demux_flac_get_optional_data (demux_plugin_t *this_gen, void *data, int dtype) {
#ifdef LOG
printf("demux_flac: demux_flac_get_optional_data\n");
#endif
return DEMUX_OPTIONAL_UNSUPPORTED;
}
static demux_plugin_t *
open_plugin (demux_class_t *class_gen,
xine_stream_t *stream,
input_plugin_t *input) {
demux_flac_t *this;
#ifdef LOG
printf("demux_flac: open_plugin\n");
#endif
switch (stream->content_detection_method) {
case METHOD_BY_CONTENT:
{
uint8_t buf[MAX_PREVIEW_SIZE];
int len;
/*
* try to get a preview of the data
*/
len = input->get_optional_data (input, buf, INPUT_OPTIONAL_DATA_PREVIEW);
if (len == INPUT_OPTIONAL_UNSUPPORTED) {
if (input->get_capabilities (input) & INPUT_CAP_SEEKABLE) {
input->seek (input, 0, SEEK_SET);
if ( (len=input->read (input, buf, 1024)) <= 0)
return NULL;
input->seek (input, 0, SEEK_SET);
} else
return NULL;
}
/* FIXME: Skip id3v2 tag */
/* Look for fLaC tag at the beginning of file */
if ( (buf[0] != 'f') || (buf[1] != 'L') ||
(buf[2] != 'a') || (buf[3] != 'C') )
return NULL;
}
break;
case METHOD_BY_EXTENSION: {
char *ending, *mrl;
mrl = input->get_mrl (input);
ending = strrchr (mrl, '.');
if (!ending || (strlen (ending) < 5))
return NULL;
if (strncasecmp (ending, ".flac", 5))
return NULL;
}
break;
case METHOD_EXPLICIT:
break;
default:
return NULL;
break;
}
/*
* if we reach this point, the input has been accepted.
*/
this = xine_xmalloc (sizeof (demux_flac_t));
this->stream = stream;
this->input = input;
this->demux_plugin.send_headers = demux_flac_send_headers;
this->demux_plugin.send_chunk = demux_flac_send_chunk;
this->demux_plugin.seek = demux_flac_seek;
this->demux_plugin.dispose = demux_flac_dispose;
this->demux_plugin.get_status = demux_flac_get_status;
this->demux_plugin.get_stream_length = demux_flac_get_stream_length;
this->demux_plugin.get_video_frame = NULL;
this->demux_plugin.got_video_frame_cb= NULL;
this->demux_plugin.get_capabilities = demux_flac_get_capabilities;
this->demux_plugin.get_optional_data = demux_flac_get_optional_data;
this->demux_plugin.demux_class = class_gen;
this->seek_flag = 0;
/* Get a new FLAC decoder and hook up callbacks */
this->flac_decoder = FLAC__seekable_stream_decoder_new();
#ifdef LOG
printf("demux_flac: this->flac_decoder: %p\n", this->flac_decoder);
#endif
FLAC__seekable_stream_decoder_set_md5_checking (this->flac_decoder, false);
FLAC__seekable_stream_decoder_set_read_callback (this->flac_decoder,
flac_read_callback);
FLAC__seekable_stream_decoder_set_seek_callback (this->flac_decoder,
flac_seek_callback);
FLAC__seekable_stream_decoder_set_tell_callback (this->flac_decoder,
flac_tell_callback);
FLAC__seekable_stream_decoder_set_length_callback (this->flac_decoder,
flac_length_callback);
FLAC__seekable_stream_decoder_set_eof_callback (this->flac_decoder,
flac_eof_callback);
FLAC__seekable_stream_decoder_set_metadata_callback (this->flac_decoder,
flac_metadata_callback);
FLAC__seekable_stream_decoder_set_write_callback (this->flac_decoder,
flac_write_callback);
FLAC__seekable_stream_decoder_set_error_callback (this->flac_decoder,
flac_error_callback);
FLAC__seekable_stream_decoder_set_client_data (this->flac_decoder,
this);
FLAC__seekable_stream_decoder_init (this->flac_decoder);
/* Get some stream info */
this->data_size = this->input->get_length (this->input);
this->data_start = this->input->get_current_pos (this->input);
/* This will cause FLAC to give us the rest of the information on
* this flac stream
*/
this->status = DEMUX_OK;
FLAC__seekable_stream_decoder_process_until_end_of_metadata (this->flac_decoder);
#ifdef LOG
printf("demux_flac: Processed file until end of metadata\n");
#endif
return &this->demux_plugin;
}
/* FLAC Demuxer class */
static char *
get_description (demux_class_t *this_gen) {
return "FLAC demux plugin";
}
static char *
get_identifier (demux_class_t *this_gen) {
return "FLAC";
}
static char *
get_extensions (demux_class_t *this_gen) {
return "flac";
}
static char *
get_mimetypes (demux_class_t *this_gen) {
return "application/x-flac: flac: FLAC Audio;";
}
static void
class_dispose (demux_class_t *this_gen) {
demux_flac_class_t *this = (demux_flac_class_t *) this_gen;
#ifdef LOG
printf("demux_flac: class_dispose\n");
#endif
free (this);
}
void *
demux_flac_init_class (xine_t *xine, void *data) {
demux_flac_class_t *this;
#ifdef LOG
printf("demux_flac: demux_flac_init_class\n");
#endif
this = xine_xmalloc (sizeof (demux_flac_class_t));
this->config = xine->config;
this->xine = xine;
this->demux_class.open_plugin = open_plugin;
this->demux_class.get_description = get_description;
this->demux_class.get_identifier = get_identifier;
this->demux_class.get_mimetypes = get_mimetypes;
this->demux_class.get_extensions = get_extensions;
this->demux_class.dispose = class_dispose;
return this;
}
| OS2World/MM-SOUND-xine | src/libflac/demux_flac.c | C | gpl-2.0 | 18,964 |
<?
class_load ('Customer');
class_load ('ActionType');
class_load ('Ticket');
/**
* Class for handling syncronisation of the Keyos database with the ERP database.
*
* The ErpSync objects themselves are not actually stored to the database,
* they are only instantiated in order to establish the connection to the
* ERP database.
*
*/
class ErpSync extends Base
{
/** Specifies if the connection with the ERP database has been established
* @var bool */
var $db_connected = false;
/** Class constructor. Establishes a connection to the ERP database
* @param bool $do_connect Specifies if the connection to the ERP database should be established or not.
*/
function ErpSync ($do_connect = true)
{
if ($do_connect) $this->connect_to_erp ();
}
/** Overloading of the load_data() method. It does nothing, since there
* is no data to be loaded from database
*/
function load_data () {}
/** Overloading of the save_data() method. It does nothing, since there
* is no data to be saved to database
*/
function save_data () {}
/** Overloading of the delete() method. It does nothing, since there
* is no data to be deleted from the databse
*/
function delete () {}
/** Creates the connection to the ERP database */
function connect_to_erp ()
{
$conn = @mssql_connect (ERP_DB_HOST, ERP_DB_USER, ERP_DB_PWD);
if ($conn)
{
$db_selected = @mssql_select_db (ERP_DB_NAME);
if ($db_selected) $this->db_connected = true;
else error_msg ('Failed opening the ERP database.');
}
else
{
error_msg ('Failed connecting to the ERP database. '.ERP_DB_HOST);
}
}
/** Returns the customers from the ERP database
* @return array Array of generic objects, containing customer info from the ERP
* database. Besides the ERP fields, each object has a special
* field called 'sync_stat', specifying the syncronisation status
* for that item - see $GLOBALS['ERP_SYNC_STATS']
*/
function get_erp_customers ()
{
$ret = array ();
if ($this->db_connected)
{
$q = 'SELECT c_nom as erp_name, c_id as erp_id, c_adresse, c_adresse2, c_codep, c_ville, c_pays, c_cle2, ';
$q.= 'c_tarif, c_cat1, c_cat2 ';
$q.= 'FROM cli ORDER BY c_nom';
$h = mssql_query ($q);
while ($c = mssql_fetch_object ($h)) $ret[] = $c;
// Get the list of ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_CUSTOMERS.' WHERE erp_id<>""';
$ks_erp_ids = $this->db_fetch_list ($q);
$selected_erp_ids = array ();
for ($i=0; $i<count($ret); $i++)
{
$r = &$ret[$i];
$r->erp_id = trim($r->erp_id);
$r->c_cat1 = trim($r->c_cat1);
$r->c_cat2 = trim($r->c_cat2);
// Translate the codes
$r->contract_type = $GLOBALS['ERP_CONTRACT_TYPES'][$r->c_cat1];
$r->contract_sub_type = $GLOBALS['ERP_CUST_SUBTYPES'][$r->c_cat2];
$r->price_type = $GLOBALS['ERP_CUST_PRICETYPES'][$r->c_tarif];
// Make sure that undefined values in ERP are presented as zeros
if (!$r->contract_type) $r->contract_type = 0;
if (!$r->contract_sub_type) $r->contract_sub_type = 0;
if (!$r->price_type) $r->price_type = 0;
//debug($ks_erp_ids[trim($r->erp_id)]);
if (isset($ks_erp_ids[$r->erp_id]))
{
$r->customer_id =$ks_erp_ids[$r->erp_id];
$r->customer = new Customer ($r->customer_id);
// Check if any information was modified
if (
$r->erp_name != $r->customer->name or
$r->contract_type != $r->customer->contract_type or
$r->contract_sub_type != $r->customer->contract_sub_type or
$r->price_type != $r->customer->price_type
)
{
$r->sync_stat = ERP_SYNC_STAT_MODIFIED;
}
// Check if complete info is available in ERP
if (!$r->contract_type) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
}
else
{
// This is a customer which doesn't exist in Keyos, check if it is fully defined in ERP
if (!$r->contract_type) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
else $r->sync_stat = ERP_SYNC_STAT_ERP_NEW;
}
$selected_erp_ids[] = $r->erp_id;
//debug($r);
}
// Append the users which are defined only in Keyos
$q = 'SELECT id, erp_id FROM '.TBL_CUSTOMERS.' WHERE active=1 ORDER BY name';
$ks_ids = $this->db_fetch_list ($q);
foreach ($ks_ids as $customer_id => $erp_id)
{
if (!$erp_id or ($erp_id and !in_array($erp_id, $selected_erp_ids)))
{
$n = null;
$n->sync_stat = ERP_SYNC_STAT_KS_NEW;
$n->customer_id = $customer_id;
$n->customer = new Customer ($customer_id);
// Insert the customer in alphabetical order
for ($j = 0; ($j<count($ret) and (strtolower($n->customer->name) > strtolower($ret[$j]->erp_name))); $j++);
array_splice ($ret, $j, 0, array($n));
}
}
}
return $ret;
}
function sync_erp_customers()
{
if ($this->db_connected)
{
// Get the customers from the ERP system
$erp_customers = $this->get_erp_customers ();
// Get the list of ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_CUSTOMERS.' WHERE erp_id<>""';
$ks_erp_ids = $this->db_fetch_list ($q);
for ($i = 0; $i<count($erp_customers); $i++)
{
$r = &$erp_customers[$i];
if ($r->sync_stat == ERP_SYNC_STAT_ERP_NEW)
{
// This is a new customer which can be imported in Keyos
$customer = new Customer ();
$customer->name = $r->erp_name;
$customer->erp_id = $r->erp_id;
$customer->contract_type = $r->contract_type;
$customer->contract_sub_type = $r->contract_sub_type;
$customer->price_type = $r->price_type;
$customer->save_data ();
}
elseif ($r->sync_stat == ERP_SYNC_STAT_MODIFIED and isset($ks_erp_ids[$r->erp_id]))
{
// This is an customer that has been modified in ERP
$customer = new Customer ($ks_erp_ids[$r->erp_id]);
$customer->name = $r->erp_name;
$customer->erp_id = $r->erp_id;
$customer->contract_type = $r->contract_type;
$customer->contract_sub_type = $r->contract_sub_type;
$customer->price_type = $r->price_type;
$customer->save_data ();
}
}
}
}
/** Get the list of action type categories from Keyos */
function get_erp_actypes_categories ()
{
$ret = array ();
if ($this->db_connected)
{
// Action type categories
$q = 'SELECT id as erp_id, nom as name FROM ss_famil WHERE id_famille="Q1J50PXW11" or id_famille="S1J50PYRYC" ORDER BY nom';
$h = mssql_query ($q);
while ($a = mssql_fetch_object ($h)) $ret[] = $a;
// Get the categories already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_ACTION_TYPES_CATEGORIES.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
// Keep track of the ERP IDs we already handled, in case some of the KS action types have an ERP ID that doesn't exist anymore in ERP
$selected_erp_ids = array ();
for ($i=0; $i<count($ret); $i++)
{
$r = &$ret[$i];
$r->erp_id = trim($r->erp_id);
if (isset($ks_erp_ids[$r->erp_id]))
{
// This ERP action type already exists in Keyos
$r->id = $ks_erp_ids[$r->erp_id];
$r->category = new ActionTypeCategory ($r->id);
// Check if the category has been modified in ERP
if ($r->category->name != $r->name) $r->sync_stat = ERP_SYNC_STAT_MODIFIED;
// Check if the ERP definition is complete
if (!$r->erp_id or !$r->name) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
}
else
{
// This is a category which doesn't exist in Keyos, check if it is fully defined in ERP
if (!$r->erp_id or !$r->name) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
else $r->sync_stat = ERP_SYNC_STAT_ERP_NEW;
}
$selected_erp_ids[] = $r->erp_id;
}
}
return $ret;
}
function sync_erp_actypes_categories ()
{
if ($this->db_connected)
{
// Get the action types categories from the ERP system
$erp_actypes_categories = $this->get_erp_actypes_categories ();
// Get the ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_ACTION_TYPES_CATEGORIES.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
for ($i = 0; $i<count($erp_actypes_categories); $i++)
{
$r = &$erp_actypes_categories[$i];
if ($r->sync_stat == ERP_SYNC_STAT_ERP_NEW)
{
// This is a new category which can be imported in Keyos
$category = new ActionTypeCategory ();
$category->erp_id = $r->erp_id;
$category->name = $r->name;
$category->save_data ();
}
elseif ($r->sync_stat == ERP_SYNC_STAT_MODIFIED and isset($ks_erp_ids[$r->erp_id]))
{
// This is an existing action type that has been modified in ERP
$category = new ActionTypeCategory ($ks_erp_ids[$r->erp_id]);
$category->name = $r->name;
$category->save_data ();
}
}
}
}
function get_erp_actypes ()
{
$ret = array ();
if ($this->db_connected)
{
// Articles
$q = 'SELECT stock.s_id as erp_id, stock.s_modele as erp_name, stock.s_cle3 as erp_code, familles.nom as family, ';
$q.= 'stock.s_id_ssfam, stock.s_cat2, stock.s_cat3, stock.s_id_famil ';
$q.= 'FROM stock, familles ';
$q.= 'WHERE familles.id=s_id_famil AND stock.s_id_rayon="R000000004" AND ';
$q.= '(stock.s_id_famil="Q1J50PXW11" OR stock.s_id_famil="S1J50PYRYC") ';
/*
stock.s_cat2 is the categorie2 (Basic/TC/Keypro)
stock.s_cat3 is the categorie3 (Basic/TC level1/ TC level2 / TC level3/Keypro /GlobalPro)
stock.s_cat4 is the categorie3 (HourlyBased/FixedBased)
stock.s_id_rayon='R000000004' (service ks)
stock.s_id_famil='Q1J50PXW11' (regie tc)
stock.s_id_famil='S1J50PYRYC' (regie autre)
famille ([ID_RAYON] => R000000004):
Abo: P1J50PXPEQ
Techniciens: P1KR12M74O
Régie TC: Q1J50PXW11
Autres: Q1KR12NDTT
Régie autre: S1J50PYRYC
*/
$h = mssql_query ($q);
while ($a = mssql_fetch_object ($h)) $ret[] = $a;
// Get the ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_ACTION_TYPES.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
// Keep track of the ERP IDs we already handled, in case some of the KS action types have an ERP ID that doesn't exist anymore in ERP
$selected_erp_ids = array ();
$erp_actypes = ActionTypeCategory::get_erp_categories_translation ();
for ($i=0; $i<count($ret); $i++)
{
$r = &$ret[$i];
$r->erp_id = trim($r->erp_id);
$r->s_id_ssfarm = trim($r->s_id_ssfarm);
$r->s_cat2 = trim($r->s_cat2);
$r->s_cat3 = trim($r->s_cat3);
$r->category = $erp_actypes[$r->s_id_ssfam];
$r->contract_types = $GLOBALS['ERP_CONTRACT_TYPES'][$r->s_cat2];
$r->contract_sub_type = $GLOBALS['ERP_CONTRACT_SUBTYPES_ACTIONS'][$r->s_cat2];
$r->price_type = $GLOBALS['ERP_PRICE_TYPES'][$r->s_cat3];
if (isset($ks_erp_ids[$r->erp_id]))
{
// This ERP action type already exists in Keyos
$r->action_type_id = $ks_erp_ids[$r->erp_id];
$r->action_type = new ActionType ($r->action_type_id);
// Check if the action type has been modified in ERP
if (
$r->action_type->erp_code != $r->erp_code or
$r->action_type->erp_name != $r->erp_name or
$r->action_type->category != $r->category or
$r->action_type->contract_types != $r->contract_types or
$r->action_type->contract_sub_type != $r->contract_sub_type or
$r->action_type->price_type != $r->price_type or
$r->action_type->family != $r->family
) $r->sync_stat = ERP_SYNC_STAT_MODIFIED;
// Check if the ERP definition is complete
if (!$r->erp_code or !$r->contract_types or !$r->contract_sub_type or !$r->price_type or !$r->category) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
}
else
{
// This is an action type which doesn't exist in Keyos, check if it is fully defined in ERP
if (!$r->erp_code or !$r->contract_types or !$r->contract_sub_type or !$r->price_type or !$r->category) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
else $r->sync_stat = ERP_SYNC_STAT_ERP_NEW;
}
$selected_erp_ids[] = $r->erp_id;
}
}
return $ret;
}
function sync_erp_actypes ()
{
if ($this->db_connected)
{
// Get the action types from the ERP system
$erp_actypes = $this->get_erp_actypes ();
// Get the ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_ACTION_TYPES.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
for ($i = 0; $i<count($erp_actypes); $i++)
{
$r = &$erp_actypes[$i];
if ($r->sync_stat == ERP_SYNC_STAT_ERP_NEW)
{
// This is a new action type which can be imported in Keyos
$action_type = new ActionType ();
$action_type->erp_code = $r->erp_code;
$action_type->erp_id = $r->erp_id;
$action_type->name = $r->erp_name;
$action_type->erp_name = $r->erp_name;
$action_type->category = $r->category;
$action_type->contract_types = $r->contract_types;
$action_type->contract_sub_type = $r->contract_sub_type;
$action_type->price_type = $r->price_type;
$action_type->family = $r->family;
$action_type->save_data ();
}
elseif ($r->sync_stat == ERP_SYNC_STAT_MODIFIED and isset($ks_erp_ids[$r->erp_id]))
{
// This is an existing action type that has been modified in ERP
$action_type = new ActionType ($ks_erp_ids[$r->erp_id]);
$action_type->erp_code = $r->erp_code;
$action_type->erp_name = $r->erp_name;
$action_type->category = $r->category;
$action_type->contract_types = $r->contract_types;
$action_type->contract_sub_type = $r->contract_sub_type;
$action_type->price_type = $r->price_type;
$action_type->family = $r->family;
$action_type->save_data ();
}
}
}
}
/** Fetch from the ERP system the activities to be used for Timesheets */
function get_erp_activities ()
{
$ret = array ();
class_load ('Activity');
if ($this->db_connected)
{
/*
[4] => stdClass Object
(
[id] => H1KY0YA0M9
[lib] => Cécile Blitz
[erp_id_service] => 137108
[erp_id_travel] => 137099
[erp_id_adminw] => 140453 - O1MH0X4XKM
[erp_id_holid] => 140463 - P1MH0X52UB
)
*/
// Activities
$q = 'SELECT g.id as erp_id, g.lib as erp_name FROM gamenum g ';
$q.= 'WHERE g.id_type="L1KY0X3EG3" and g.lib<>"" and g.lib<>"1H"';
$h = mssql_query ($q);
while ($a = mssql_fetch_object ($h)) $ret[] = $a;
// Fetch a list with all erp IDs for users
$users_ids = DB::db_fetch_list ('SELECT erp_id, id FROM '.TBL_USERS.' WHERE erp_id<>""');
// For each activity ERP ID, make a list with the user-specific codes
// s.s_gamenum2 will be the ERP ID of a user, g.id will be ERP ID of an activity, s.s_id is the "code" for that user-activity combination
$q = 'SELECT s.s_gamenum2 as user_id, g.id as activity_id, s.s_id as code FROM gamenum g, stock s WHERE ';
$q.= 'g.id=s.s_gamenum1 AND g.lib<>"" AND g.lib<>"1H" ';
$h = mssql_query ($q);
$users_activities = array ();
while ($a = mssql_fetch_object ($h)) $users_activities[$a->activity_id][$users_ids[$a->user_id]] = $a->code;
// Sort each array by user ID
foreach ($users_activities as $erp_id => $codes) ksort ($users_activities[$erp_id]);
/*
[A1MH0XGOC9] => Array
(
[4] => 140569
[26] => 140570
[14] => 140571
[28] => 140572
[468] => 140573
[21] => 140574
[2] => 140575
[5] => 140576
[7] => 140577
[1] => 140578
)*/
// Get the ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_ACTIVITIES.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
// Keep track of the ERP IDs we already handled, in case some of the KS activities have an ERP ID that doesn't exist anymore in ERP
$selected_erp_ids = array ();
for ($i=0; $i<count($ret); $i++)
{
$r = &$ret[$i];
if (isset($ks_erp_ids[$r->erp_id]))
{
// This ERP action type already exists in Keyos
$r->activity_id = $ks_erp_ids[$r->erp_id];
$r->users_codes = $users_activities[$r->erp_id];
$r->activity = new Activity ($r->activity_id);
$r->activity->load_users_codes ();
// Check if the ERP definition is complete
if (!$r->erp_id or !$r->erp_name) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
// Check if the service and travel ERP IDs are the same
elseif ($r->erp_id != $r->activity->erp_id or $r->erp_name != $r->activity->erp_name or $r->users_codes!=$r->activity->users_codes) $r->sync_stat = ERP_SYNC_STAT_MODIFIED;
}
else
{
// This is a category which doesn't exist in Keyos, check if it is fully defined in ERP
if (!$r->erp_id or !$r->erp_name) $r->sync_stat = ERP_SYNC_STAT_ERP_INCOMPLETE;
else $r->sync_stat = ERP_SYNC_STAT_ERP_NEW;
}
$selected_erp_ids[] = $r->erp_id;
}
// Append the activities which are defined only in Keyos
$q = 'SELECT id, erp_id FROM '.TBL_ACTIVITIES.' ORDER BY name';
$ks_ids = $this->db_fetch_list ($q);
foreach ($ks_ids as $activity_id => $erp_id)
{
if (!$erp_id or ($erp_id and !in_array($erp_id, $selected_erp_ids)))
{
$n = null;
$n->sync_stat = ERP_SYNC_STAT_KS_NEW;
$n->activity_id = $activity_id;
$n->activity = new Activity ($activity_id);
// Insert the customer in alphabetical order
for ($j = 0; ($j<count($ret) and (strtolower($n->actvity->name) > strtolower($ret[$j]->erp_name))); $j++);
array_splice ($ret, $j, 0, array($n));
}
}
}
return $ret;
}
/** Synchronize the activities (for Timesheets) in Keyos with the ones from ERP */
function sync_erp_activities ()
{
if ($this->db_connected)
{
// Get the action types categories from the ERP system
$erp_activities = $this->get_erp_activities ();
// Get the ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_ACTIVITIES.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
for ($i = 0; $i<count($erp_activities); $i++)
{
$r = &$erp_activities[$i];
if ($r->sync_stat == ERP_SYNC_STAT_ERP_NEW)
{
// This is a new category which can be imported in Keyos
$activity = new Activity ();
$activity->erp_id = $r->erp_id;
$activity->name = $r->erp_name;
$activity->erp_name = $r->erp_name;
$activity->save_data ();
$activity->set_users_codes ($r->users_codes);
}
elseif ($r->sync_stat == ERP_SYNC_STAT_MODIFIED and isset($ks_erp_ids[$r->erp_id]))
{
// This is an existing action type that has been modified in ERP
$activity = new Activity ($ks_erp_ids[$r->erp_id]);
$activity->erp_name = $r->erp_name;
$activity->save_data ();
$activity->set_users_codes ($r->users_codes);
}
}
}
}
/** Returns the engineers and associated IDs from the ERP database
*/
function get_erp_engineers ($get_ks = true)
{
$ret = array ();
if ($this->db_connected)
{
// Fetch from the ERP database the list of engineers and the travel and service ERP IDs
$q = 'SELECT g.id as erp_id, g.lib as erp_name, s1.s_id as erp_id_service, s2.s_id erp_id_travel ';
$q.= 'FROM gamenum g, stock s1, stock s2 ';
$q.= 'WHERE g.id=s1.s_gamenum2 AND g.id=s2.s_gamenum2 AND ';
$q.= 's1.s_id_rayon="R000000004" AND s2.s_id_rayon="R000000004" AND s1.s_id_famil="P1KR12M74O" AND s2.s_id_famil="P1KR12M74O" AND ';
$q.= 's1.s_gamtyp1="L1KY0X3EG3" AND s2.s_gamtyp1="L1KY0X3EG3" AND ';
$q.= 's1.s_gamenum1="O1KY0X40EG" AND '; // Filter s1 to services (1H) articles
$q.= 's2.s_gamenum1="N1KY0X3UW3" '; // Filter s2 to travel costs articles
$q.= 'ORDER BY g.lib';
$h = mssql_query ($q);
while ($e = mssql_fetch_object ($h)) $ret[] = $e;
// Get the list of ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_USERS.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
// Keep track of the ERP IDs we already handled, in case some of the KS users have an ERP ID that doesn't exist anymore in ERP
$selected_erp_ids = array ();
for ($i=0; $i<count($ret); $i++)
{
$r = &$ret[$i];
if (isset($ks_erp_ids[$r->erp_id]))
{
$r->user_id = $ks_erp_ids[$r->erp_id];
$r->user = new User ($r->user_id);
$r->actype_travel = ActionType::get_user_travel_cost ($r->user->id);
// Check if the service and travel ERP IDs are the same
if (
$r->erp_id_service != $r->user->erp_id_service or
$r->erp_id_travel != $r->user->erp_id_travel or
!$r->actype_travel or
($r->actype_travel->id and $r->actype_travel->erp_id != $r->erp_id_travel)
) $r->sync_stat = ERP_SYNC_STAT_MODIFIED;
}
else
{
// This engineer is only defined in ERP
$r->sync_stat = ERP_SYNC_STAT_ERP_NEW;
}
$selected_erp_ids[] = $r->erp_id;
}
// Append the users which are defined only in Keyos, if requested
if ($get_ks)
{
$q = 'SELECT id, erp_id FROM '.TBL_USERS.' WHERE type='.USER_TYPE_KEYSOURCE.' AND active=1 ORDER BY fname, lname';
$ks_ids = $this->db_fetch_list ($q);
foreach ($ks_ids as $user_id => $erp_id)
{
if (!$erp_id or ($erp_id and !in_array($erp_id, $selected_erp_ids)))
{
$n = &$ret[];
$n->sync_stat = ERP_SYNC_STAT_KS_NEW;
$n->user_id = $user_id;
$n->user = new User ($user_id);
}
}
}
}
return $ret;
}
/** Performs a synchronization between the engineer records in ERP and Keyos. Note this
* means only synchrnoizing travel and service codes. New users are NOT automatically created.
* This function also performs the updating of the special action types for users travel costs.
*/
function sync_erp_engineers ()
{
if ($this->db_connected)
{
// Get the list of ERP engineers
$erp_engineers = $this->get_erp_engineers (false);
// Get the list of ERP IDs already defined in Keyos
$q = 'SELECT erp_id, id FROM '.TBL_USERS.' WHERE erp_id<>"" ';
$ks_erp_ids = $this->db_fetch_list ($q);
for ($i=0; $i<count($erp_engineers); $i++)
{
$r = &$erp_engineers[$i];
if ($r->sync_stat == ERP_SYNC_STAT_MODIFIED and $r->user->id)
{
// Synchronize the user itself
$r->user->erp_id_service = $r->erp_id_service;
$r->user->erp_id_travel = $r->erp_id_travel;
$r->user->save_data ();
// Synchronize the action type for travel cost
if ($r->actype_travel->id)
{
// The action type exist, only update it
$r->actype_travel->erp_id = $r->erp_id_travel;
$r->actype_travel->name = 'Travel costs - '.$r->user->get_name ();
$r->actype_travel->erp_name = 'Travel costs - '.$r->user->get_name ();
$r->actype_travel->save_data ();
}
else
{
// The action type doesn't exist, create it
$r->actype_travel = new ActionType ();
$r->actype_travel->erp_id = $r->erp_id_travel;
$r->actype_travel->name = 'Travel costs - '.$r->user->get_name ();
$r->actype_travel->erp_name = 'Travel costs - '.$r->user->get_name ();
$r->actype_travel->price_type = PRICE_TYPE_FIXED;
$r->actype_travel->special_type = ACTYPE_SPECIAL_TRAVEL;
$r->actype_travel->user_id = $r->user->id;
$r->actype_travel->billable = true;
$r->actype_travel->active = true;
$r->actype_travel->save_data ();
}
}
}
// Check for the existence of the generic action type for engineers travel costs
// XXXX @TODO: Make better synchronization for this
$q = 'SELECT id FROM '.TBL_ACTION_TYPES.' WHERE erp_id="'.ERP_TRAVEL_ID.'"';
$travel_id = $this->db_fetch_field ($q, 'id');
if (!$travel_id)
{
$actype_travel = new ActionType ();
$actype_travel->erp_id = ERP_TRAVEL_ID;
$actype_travel->erp_code = ERP_TRAVEL_CODE;
$actype_travel->name = ERP_TRAVEL_NAME;
$actype_travel->erp_name = ERP_TRAVEL_NAME;
$actype_travel->price_type = PRICE_TYPE_FIXED;
$actype_travel->billable = true;
$actype_travel->active = true;
$r->actype_travel->save_data ();
}
}
}
}
?> | KEYSOURCE/OpenKeyos | plugins/ERP/model/erp_sync.php | PHP | gpl-2.0 | 24,264 |
.redux-info-field {
min-height: 20px;
padding: 8px 19px;
margin: 10px 0;
border: 1px solid;
border-radius: 4px;
border: 1px solid;
position: relative;
}
.redux-info-field h1,
.redux-info-field h2,
.redux-info-field h3,
.redux-info-field h4,
.redux-info-field h5,
.redux-info-field h6 {
border-bottom: 0 !important;
}
.redux-info-field h3 {
color: #777;
}
.redux-info-field .redux-info-icon {
display: inline-block;
margin-right: 15px;
}
.redux-info-field .redux-info-icon i {
font-size: 2em;
}
.redux-info-field .redux-info-desc {
display: inline-block;
vertical-align: top;
}
.redux-info-field.redux-normal {
background-color: #eeeeee;
border-color: #cccccc;
color: #666666;
}
.redux-info-field.redux-normal i {
color: #c5c5c5;
}
.redux-info-field.redux-warning {
background-color: #fbeba4;
border-color: #d7c281;
color: #958234;
}
.redux-info-field.redux-warning i {
color: #dcca81;
}
.redux-info-field.redux-success {
background-color: #c4ee91;
border-color: #71af5d;
color: #4d7615;
}
.redux-info-field.redux-success i {
color: #a0ca6c;
}
.redux-info-field.redux-critical {
background-color: #fba1a3;
border-color: #b84f5b;
color: #981225;
}
.redux-info-field.redux-critical i {
color: #dd767d;
}
.redux-info-field.redux-info {
background-color: #d3e4f4;
border-color: #a9b6c2;
color: #5c80a1;
}
.redux-info-field.redux-info i {
color: #afc6da;
}
.redux-notice-field {
margin: 15px 0 0;
background-color: #fff;
border: 0;
border-left: 4px solid #f3f3f3;
-webkit-box-shadow: 0 1px 1px 0 rgba(0, 0, 0, 0.1);
box-shadow: 0 1px 1px 0 rgba(0, 0, 0, 0.1);
padding: 1px 12px;
}
.redux-notice-field h1,
.redux-notice-field h2,
.redux-notice-field h3,
.redux-notice-field h4,
.redux-notice-field h5,
.redux-notice-field h6 {
border-bottom: 0 !important;
}
.redux-notice-field p {
margin: .5em 0;
padding: 2px;
}
.redux-notice-field .redux-info-icon {
display: inline-block;
margin-right: 15px;
}
.redux-notice-field .redux-info-icon i {
font-size: 2em;
}
.redux-notice-field .redux-info-desc {
display: inline-block;
vertical-align: top;
}
.redux-notice-field.redux-info {
border-left: 4px solid #0099d5;
}
.redux-notice-field.redux-success {
border-left: 4px solid #7ad03a;
}
.redux-notice-field.redux-warning {
border-left: 4px solid #fbeba4;
}
.redux-notice-field.redux-critical {
border-left: 4px solid #dd3d36;
}
.redux-main .redux-field-container.redux-container-info {
padding: 0;
}
.wp-customizer .hasIcon.redux-notice-field .redux-info-desc, .wp-customizer .hasIcon.redux-info-field .redux-info-desc {
display: block;
margin-left: 43px;
}
.wp-customizer .hasIcon.redux-notice-field .redux-info-icon, .wp-customizer .hasIcon.redux-info-field .redux-info-icon {
float: left;
}
.wp-customizer .redux-main .customize-control.customize-control-redux-info {
border-bottom: 0;
}
| estrategasdigitales/Dagutorio | wp-content/themes/theshop/ReduxFramework/ReduxCore/inc/fields/info/field_info.css | CSS | gpl-2.0 | 2,921 |
<?php # -*- coding: utf-8 -*-
namespace W2M\Test\Unit\Import\Data;
use
W2M\Import\Data,
W2M\Test\Helper;
class ImmutableMultiTypeIdListTest extends Helper\MonkeyTestCase {
public function test_local_id() {
$this->markTestSkipped();
}
}
| inpsyde/wpml2mlp | tests/phpunit/Unit/Import/Data/ImmutableMultiTypeIdListTest.php | PHP | gpl-2.0 | 247 |
bl_info = {
"name" : "text objects to-from xml",
"author" : "chebhou",
"version" : (1, 0),
"blender" : (2, 7, 3),
"location" : "file->export->text to-from xml",
"discription" : "copys an text objectx from-to xml file",
"wiki_url" : " https://github.com/chebhou",
"tracker_url" : "https://github.com/chebhou",
"category" : "Import-Export"
}
import bpy
from bpy.types import Operator
from bpy_extras.io_utils import ExportHelper
from bpy.props import EnumProperty, BoolProperty
from xml.dom import minidom
from xml.dom.minidom import Document
def txt_sync(filepath):
dom = minidom.parse(filepath)
scenes =dom.getElementsByTagName('scene')
for scene in scenes:
scene_name=scene.getAttribute('name')
print("\n",scene_name)
bl_scene = bpy.data.scenes[scene_name]
txt_objs =scene.getElementsByTagName('object')
for obj in txt_objs:
obj_name = obj.getAttribute('name')
obj_body = obj.childNodes[0].nodeValue
bl_obj = bl_scene.objects[obj_name].data.body = obj_body
print(obj_name," ",obj_body)
def txt_export(filepath):
doc = Document()
root = doc.createElement('data')
doc.appendChild(root)
for sce in bpy.data.scenes :
#create a scene
scene = doc.createElement('scene')
scene.setAttribute('name', sce.name)
root.appendChild(scene)
for obj in sce.objects :
if obj.type == 'FONT':
#add object element
object = doc.createElement('object')
object.setAttribute('name', obj.name)
txt_node = doc.createTextNode(obj.data.body)
object.appendChild(txt_node)
scene.appendChild(object)
#write to a file
file_handle = open(filepath,"wb")
file_handle.write(bytes(doc.toprettyxml(indent='\t'), 'UTF-8'))
file_handle.close()
class text_export(Operator, ExportHelper):
"""write and read text objects to a file"""
bl_idname = "export_scene.text_xml"
bl_label = "text from-to xml"
bl_options = {'REGISTER', 'UNDO'} #should remove undo ?
# ExportHelper mixin class uses this
filename_ext = ".xml"
#parameters and variables
convert = EnumProperty(
name="Convert",
description="Choose conversion",
items=(('W', "write objects", "write text objects to xml"),
('R', "read objects", "read text objects from xml")),
default='W',
)
#main function
def execute(self, context):
bpy.ops.object.mode_set(mode = 'OBJECT')
if self.convert == 'W':
txt_export(self.filepath)
else:
txt_sync(self.filepath)
bpy.context.scene.update()
self.report({'INFO'},"Conversion is Done")
return {'FINISHED'}
def menu_func_export(self, context):
self.layout.operator(text_export.bl_idname, text="Text to-from xml")
def register():
bpy.utils.register_class(text_export)
bpy.types.INFO_MT_file_export.append(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
def unregister():
bpy.utils.unregister_class(text_export)
bpy.types.INFO_MT_file_export.remove(menu_func_export)
bpy.types.INFO_MT_file_import.append(menu_func_export)
if __name__ == "__main__":
register()
| chebhou/Text-from-to-.XML | text_io_xml.py | Python | gpl-2.0 | 3,503 |
package net.opendasharchive.openarchive.publish.sites;
import android.content.Context;
import android.util.Log;
import net.opendasharchive.openarchive.publish.PublishController;
import net.opendasharchive.openarchive.publish.model.Job;
import net.opendasharchive.openarchive.publish.model.JobTable;
import net.opendasharchive.openarchive.publish.model.PublishJob;
import java.util.Locale;
import net.opendasharchive.openarchive.Globals;
import net.opendasharchive.openarchive.publish.PublisherBase;
public class ArchivePublisher extends PublisherBase {
private final String TAG = "ArchivePublisher";
private static final String ARCHIVE_URL_DOWNLOAD = "https://archive.org/download/";
private static final String ARCHIVE_API_ENDPOINT = "http://s3.us.archive.org/";
public ArchivePublisher(Context context, PublishController publishController, PublishJob publishJob) {
super(context, publishController, publishJob);
}
public void startUpload() {
Log.d(TAG, "startUpload");
Job newJob = new Job(mContext, mPublishJob.getId(), JobTable.TYPE_UPLOAD, Globals.SITE_ARCHIVE,null);
mController.enqueueJob(newJob);
}
@Override
public String getEmbed(Job job) {
if(null == job) {
return null;
}
String medium = job.getSpec();
String fileURL = job.getResult();
String width = null;
String height = null;
String cleanFileURL = null;
//FIXME need to add Media.MEDIA_TYPE check instead of strings
/*
if (medium != null) {
if (medium.equals(Globals.PHOTO)) {
// keep default image size
width = "";
height = "";
} else if (medium.equals(Globals.VIDEO)) {
width = "600";
height = "480";
} else if (medium.equals(Globals.AUDIO)) {
width = "500";
height = "30";
}
cleanFileURL = cleanFileURL(fileURL);
}*/
String embed = null;
if (null != width && null != height && null != cleanFileURL) {
embed = String.format(Locale.US, "[archive %s %s %s]", cleanFileURL, width, height);
/*
if(isMediumPhoto) {
embed = String.format(Locale.US, "<img src='%s' alt='Archive Embed'>" ,
ARCHIVE_URL_DOWNLOAD + cleanFileURL);
} else {
embed = String.format(Locale.US, "<iframe " +
"src='%s' " +
"width='%s' " +
"height='%s' " +
"frameborder='0' " +
"webkitallowfullscreen='true' " +
"mozallowfullscreen='true' allowfullscreen>" +
"</iframe>",
ARCHIVE_URL_DOWNLOAD + cleanFileURL, width, height);
}
*/
}
return embed;
}
@Override
public String getResultUrl(Job job) {
return null; // FIXME implement getResultUrl
}
private String cleanFileURL(String fileURL) {
fileURL = fileURL.replace(ARCHIVE_API_ENDPOINT, "");
return fileURL;
}
}
| scalio/openarchive | app/src/main/java/net/opendasharchive/openarchive/publish/sites/ArchivePublisher.java | Java | gpl-2.0 | 2,787 |
//----------------------------------------------------------------------------------------
//
// This file and all other Easy Bridge source files are copyright (C) 2002 by Steven Han.
// Use of this file is governed by the GNU General Public License.
// See the files COPYING and COPYRIGHT for details.
//
//----------------------------------------------------------------------------------------
//
// Play.h
//
// abstract base class
//
// Current list of plays are:
// Cash
// Discard
// Drop
// Finesse
// Force
// Ruff
// Sluff
// TrumpPull
//
#ifndef __CPLAY__
#define __CPLAY__
#include <memory>
class AppInterface;
class CPlayList;
class CPlayEngine;
class CCard;
class CCardList;
class CCombinedHoldings;
class CGuessedHandHoldings;
class CCardLocation;
class CPlayerStatusDialog;
const LPCTSTR tszPlayTypeNames[] = {
"Trump Pull",
"Cash",
"Ruff",
"Drop",
"Force",
"Finesse",
"Hold Up",
"Develop Suit",
"Discard",
"Exit",
};
// play status
typedef enum {
PLAY_ERROR = -4,
PLAY_FAILED = -3,
PLAY_NOT_VIABLE = -2,
PLAY_POSTPONE = -1,
PLAY_INACTIVE = 0,
PLAY_IN_PROGRESS = 1,
PLAY_COMPLETE = 2,
PLAY_COMPLETE_AND_REPEAT = 3,
} PlayResult;
//
// The CPlay class
//
class CPlay {
// public data
public:
// play type
typedef enum {
NULL_PLAY = -1,
TRUMP_PULL,
CASH,
RUFF,
DROP,
FORCE,
FINESSE,
HOLDUP,
DEVELOP,
DISCARD,
EXIT,
} PlayType;
// play location
enum {
IN_HAND = 0,
IN_DUMMY = 1,
IN_EITHER = -1,
};
// which entry the play uses up, if any
enum {
ENTRY_NONE = 0,
ENTRY_HAND = 1,
ENTRY_DUMMY = 2,
};
// play propsect
typedef enum {
PP_UNKNOWN = 0,
PP_LOSER = 1,
PP_SPECULATIVE = 2,
PP_LIKELY_WINNER = 3,
PP_GUARANTEED_WINNER = 4,
} PlayProspect;
// public routines
public:
// non-overridable functions
int ObtainUniquePlayID();
int GetID() { return m_nID; }
// overridable pure virtual functions
virtual void Clear();
virtual BOOL IsValid();
virtual int GetTargetHand() { return m_nTargetHand; }
virtual int GetStartingHand() { return m_nStartingHand; }
virtual int GetEndingHand() { return m_nEndingHand; }
virtual int GetSuit() { return m_nSuit; }
virtual int GetSecondSuit() { return m_nSuit2; }
virtual CString GetFullDescription() = 0;
// basic functions
virtual PlayResult Perform(CPlayEngine& playEngine, CCombinedHoldings& combinedHand,
CCardLocation& cardLocation, CGuessedHandHoldings** ppGuessedHands,
CPlayerStatusDialog& status, CCard*& pPlayCard);
virtual void Init();
virtual BOOL IsGuranteedWinner() { return (m_nPlayProspect == PP_GUARANTEED_WINNER); }
virtual BOOL IsWinner() { return (m_nPlayProspect >= PP_LIKELY_WINNER); }
virtual BOOL IsPlayUsable(const CCombinedHoldings& combinedHand, const CPlayEngine& playEngine);
virtual int GetPlayProspect() { return m_nPlayProspect; }
virtual PlayResult GetStatus() { return m_nStatusCode; }
virtual int GetPlayType() { return m_nPlayType; }
virtual BOOL IsOpportunistic() { return m_bOpportunistic; }
virtual LPCTSTR GetPlayTypeName();
virtual int GetNumPrerequisites() { return m_numPrerequisites; }
virtual int GetNumPostrerequisites() { return m_numPostrequisites; }
virtual CPlay* GetPrerequisite() { return m_pPrerequisite; }
virtual CPlay* GetPostrerequisite() { return m_pPostrequisite; }
virtual CPlayList* GetPrerequisiteList() { return m_pPrerequisiteList; }
virtual CPlayList* GetPostrerequisiteList() { return m_pPostrequisiteList; }
virtual BOOL RequiresCard(CCard* pCard);
virtual int LookupORCard(CCard* pCard);
virtual int LookupORCard2(CCard* pCard);
//
virtual int GetNumKeyCards();
virtual int GetNumOrKeyCards();
virtual int GetNumOrKeyCards2();
virtual CCardList* GetKeyCardsList() { return m_pKeyCardsList; }
virtual CCardList* GetOrKeyCardsList() { return m_pOrKeyCardsList; }
virtual CCardList* GetOrKeyCardsList2() { return m_pOrKeyCardsList2; }
virtual CCardList* GetEnemyKeyCardsList() { return m_pEnemyKeyCardsList; }
virtual CCardList* GetEnemyOrKeyCardsList() { return m_pEnemyOrKeyCardsList; }
virtual CCard* GetConsumedCard() { return m_pConsumedCard; }
virtual CCard* GetTargetCard() { return m_pTargetCard; }
virtual CCardList* GetTargetCardsList() { return m_pTargetCardsList; }
virtual CCardList* GetRequiredPlayedCardsList() { return m_pRequiredPlayedCardsList; }
virtual LPCTSTR GetName() { return m_strName; }
virtual LPCTSTR GetDescription() { return m_strDescription; }
virtual int UsesUpEntry() { return ENTRY_NONE; }
//
virtual void Reset();
// static functions
static void ClassInitialize();
static void ClassTerminate();
// protected routines
protected:
virtual void SetStatus(PlayResult nStatus) { m_nStatusCode = nStatus; }
// protected data
protected:
CString m_strName; // play short name
CString m_strDescription; // play description
PlayType m_nPlayType; // play type
BOOL m_bOpportunistic; // opportunistic (i.e., must play now)
int m_nID; // unique play ID
int m_nPlayProspect; // play prospect
int m_nSuit; // play suit
int m_nSuit2; // second suit (if appropriate)
PlayResult m_nStatusCode; // current status
int m_nTargetHand; // key hand for the play
int m_nStartingHand; // starting hand for the play
int m_nEndingHand; // ending hand for the play
int m_numPrerequisites; // # antecendents
int m_numPostrequisites; // # postcedents
CPlay* m_pPrerequisite; // required antecendent
CPlay* m_pPostrequisite; // required postcedent
CPlayList* m_pPrerequisiteList; // list of required antecendents
CPlayList* m_pPostrequisiteList; // list of required postcedents
//
CCard* m_pConsumedCard; // card that will be used up, if any
CCard* m_pTargetCard; // enemy card that is the target of the play
CCardList* m_pTargetCardsList; // enemy target cards
CCardList* m_pKeyCardsList; // key cards required for the play
CCardList* m_pOrKeyCardsList; // one or more of these key cards are rq'd
CCardList* m_pOrKeyCardsList2; // one or more of these key cards are also rq'd
CCardList* m_pEnemyKeyCardsList; // key cards required to be held by opponents
CCardList* m_pEnemyOrKeyCardsList; // one or more cards rq'd held by opponents
CCardList* m_pRequiredPlayedCardsList; // cards that must already have ben played
// play ID counter
static CRITICAL_SECTION m_csPlayIDLock;
static int m_nPlayIDCounter;
std::shared_ptr<AppInterface> app_;
// construction/destruction
public:
CPlay(std::shared_ptr<AppInterface> app, PlayType nPlayType, int nTargetHand = IN_EITHER, int nSuit = NONE, PlayProspect nPlayProspect = PP_UNKNOWN, BOOL bOpportunistic = FALSE);
virtual ~CPlay();
};
#endif
| Jauhen/EasyBridge | src/engine/play/Play.h | C | gpl-2.0 | 6,911 |
#!/usr/bin/ruby
#
# Copyright Red Hat, Inc. 2011
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2, or (at your option) any
# later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; see the file COPYING. If not, write to the
# Free Software Foundation, Inc., 675 Mass Ave, Cambridge,
# MA 02139, USA.
#
# Ruby Wrapper for RHN Satellite
# Andrew Nelson anelson@redhat.com 2-9-12
# Example usage:
# sat=Satellite.new
# sat.login("user")
# p sat.call("channel.listAllChannels")
require 'xmlrpc/client'
require 'digest'
class Satellite < XMLRPC::Client
class InvalidFunctionCall < XMLRPC::FaultException
end
attr_accessor :cache_session
def initialize(host="localhost",path="/rpc/api")
#new(host=nil, path=nil, port=nil, proxy_host=nil, proxy_port=nil, user=nil, password=nil, use_ssl=nil, timeout=nil)
super(host,path,nil,nil,nil,nil,nil,true)
@host=host
@user=nil
@session=nil
@cache_session=false
@session_file_override=nil
#set the http portion to not verify SSL certs
super.instance_variable_get(:@http).
instance_variable_set(:@verify_mode,OpenSSL::SSL::VERIFY_NONE)
end
def session_file
if @session_file_override.nil?
"~/rhnsat-#{@host}-#{@user}-session"
else
@session_file_override
end
end
def session_file=(path)
@session_file_override=path
end
alias :call_super :call
def call(method, *params)
begin
if @session.nil?
if params.empty?
call_super(method)
else
call_super(method,*params)
end
else
if params.empty?
call_super(method,@session)
else
call_super(method, @session, *params)
end
end
rescue XMLRPC::FaultException => e
if e.message =~ /The specified handler cannot be found/
raise InvalidFunctionCall.new(e.faultCode,"Invalid function call: #{method}")
else
raise e
end
end
end
alias :call_async_super :call_async
def call_async(method, *params)
begin
if @session.nil?
if params.empty?
call_async_super(method)
else
call_async_super(method,*params)
end
else
if params.empty?
call_async_super(method,@session)
else
call_async_super(method, @session, *params)
end
end
rescue XMLRPC::FaultException => e
if e.message =~ /The specified handler cannot be found/
raise InvalidFunctionCall.new(e.faultCode,"Invalid function call: #{method}")
else
raise e
end
end
end
def get_password(prompt="Password: ")
#It would be nice if we could use the RubyGems Highline package, but
#it's not included in RHEL so we need to do this old school
begin
system "stty -echo"
printf prompt
password=STDIN.gets.chomp
ensure
#Ensure a newline character and the echo are always turned on
#irrespective of any errors seen.
puts
system "stty echo"
end
password
end
def login(user,pass=nil)
@user=user
path=File.expand_path(session_file)
if @cache_session
begin
session=File.open(path,"r").gets
call_super("org.listOrgs",session)
rescue => e
puts "Valid Session Cache not found."
session=nil
end
@session=session
end
if @session.nil?
pass=get_password("Password for #{user}: ") if pass.nil?
@session=call_super("auth.login",user,pass)
end
if @cache_session
File.open(path,'w') {|f|
f.puts(@session)
}
end
end
def get_package(pkg_id,path,options={})
options[:count_divisor]||=100
options[:show_dot]=options[:show_dot].nil? ? true : options[:show_dot]
options[:overwrite]=options[:overwrite].nil? ? true : options[:overwrite]
pkg_info=nil
if !options[:overwrite] || options[:verify]
pkg_info=call("packages.getDetails",pkg_id)
end
if !options[:overwrite] && File.exists?(path)
if File.size(path)!=pkg_info["size"].to_i
printf "Incorrect size, redownloading "
File.delete(path)
else
puts "Skipping"
return true
end
end
uri=URI.parse(call("packages.getPackageUrl",pkg_id))
f=File.open(path,"wb")
finished=false
Net::HTTP.start(uri.host,uri.port) do |http|
begin
http.request_get(uri.path) do |resp|
count=0
resp.read_body do |segment|
count+=1
putc "." if (count%options[:count_divisor]==0) && options[:show_dot]
f.write(segment)
end
puts
end
finished=true
ensure
f.close
File.delete(path) unless finished
end
end
end
end
#Usage Example:
#require 'pp'
#sat=Satellite.new
#sat.cache_session=true
#sat.login(USER)
#pp sat.call("channel.listAllChannels")
| red-tux/rhn-satellite | ruby/satellite.rb | Ruby | gpl-2.0 | 5,571 |
/* Common code for ARM software single stepping support.
Copyright (C) 1988-2015 Free Software Foundation, Inc.
This file is part of GDB.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. */
#ifndef ARM_GET_NEXT_PCS_H
#define ARM_GET_NEXT_PCS_H 1
/* Forward declaration. */
struct arm_get_next_pcs;
/* get_next_pcs operations. */
struct arm_get_next_pcs_ops
{
ULONGEST (*read_mem_uint) (CORE_ADDR memaddr, int len, int byte_order);
CORE_ADDR (*syscall_next_pc) (struct arm_get_next_pcs *self, CORE_ADDR pc);
CORE_ADDR (*addr_bits_remove) (struct arm_get_next_pcs *self, CORE_ADDR val);
int (*is_thumb) (struct arm_get_next_pcs *self);
};
/* Context for a get_next_pcs call on ARM. */
struct arm_get_next_pcs
{
/* Operations implementations. */
struct arm_get_next_pcs_ops *ops;
/* Byte order for data. */
int byte_order;
/* Byte order for code. */
int byte_order_for_code;
/* Thumb2 breakpoint instruction. */
const gdb_byte *arm_thumb2_breakpoint;
/* Registry cache. */
struct regcache *regcache;
};
/* Initialize arm_get_next_pcs. */
void arm_get_next_pcs_ctor (struct arm_get_next_pcs *self,
struct arm_get_next_pcs_ops *ops,
int byte_order,
int byte_order_for_code,
const gdb_byte *arm_thumb2_breakpoint,
struct regcache *regcache);
/* Find the next possible PCs after the current instruction executes. */
VEC (CORE_ADDR) *arm_get_next_pcs (struct arm_get_next_pcs *self,
CORE_ADDR pc);
/* Find the next possible PCs for thumb mode. */
VEC (CORE_ADDR) *thumb_get_next_pcs_raw (struct arm_get_next_pcs *self,
CORE_ADDR pc);
/* Find the next possible PCs for arm mode. */
VEC (CORE_ADDR) *arm_get_next_pcs_raw (struct arm_get_next_pcs *self,
CORE_ADDR pc);
#endif /* ARM_GET_NEXT_PCS_H */
| DSMan195276/protura-binutils | gdb/arch/arm-get-next-pcs.h | C | gpl-2.0 | 2,401 |
ckage net.sf.jabref.bibtex;
import java.io.IOException;
import java.io.StringWriter;
import net.sf.jabref.Globals;
import net.sf.jabref.JabRefPreferences;
import net.sf.jabref.logic.bibtex.BibEntryWriter;
import net.sf.jabref.logic.bibtex.LatexFieldFormatter;
import net.sf.jabref.model.database.BibDatabaseMode;
import net.sf.jabref.model.entry.BibEntry;
import org.junit.AfterClass;
import org.junit.Before;
import org.junit.BeforeClass;
import org.junit.Test;
import static org.junit.Assert.assertEquals;
public class ValidacaoAnoPrimeiroTeste {
private BibEntryWriter writer;
private static JabRefPreferences backup;
@BeforeClass
public static void setUp() {
Globals.prefs = JabRefPreferences.getInstance();
backup = Globals.prefs;
}
@AfterClass
public static void tearDown() {
Globals.prefs.overwritePreferences(backup);
}
@Before
public void setUpWriter() {
writer = new BibEntryWriter(new LatexFieldFormatter(), true);
}
// teste para o tipo article com data maior que a atual
@Test
public void testeValidacaoAnoMaiorArticle() throws IOException {
StringWriter stringWriter = new StringWriter();
BibEntry entry = new BibEntry("1234", "article");
entry.setField("year", "2027");
writer.write(entry, stringWriter, BibDatabaseMode.BIBTEX);
String actual = stringWriter.toString();
String expected = Globals.NEWLINE + "@Article{," + Globals.NEWLINE + " year = {2027}," + Globals.NEWLINE + "}"
+ Globals.NEWLINE;
assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", ""));
}
//teste para o tipo article com data no limite esperado
@Test
public void testeValidacaoAnoEsperadoArticle() throws IOException {
StringWriter stringWriter = new StringWriter();
BibEntry entry = new BibEntry("1234", "article");
entry.setField("year", "2010");
writer.write(entry, stringWriter, BibDatabaseMode.BIBTEX);
String actual = stringWriter.toString();
String expected = Globals.NEWLINE + "@Article{," + Globals.NEWLINE + " year = {2010}," + Globals.NEWLINE + "}"
+ Globals.NEWLINE;
assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", ""));
}
//teste para o tipo article com data menor que 0
@Test
public void testeValidacaoAnoMenorArticle() throws IOException {
StringWriter stringWriter = new StringWriter();
BibEntry entry = new BibEntry("1234", "article");
entry.setField("year", "-10");
writer.write(entry, stringWriter, BibDatabaseMode.BIBTEX);
String actual = stringWriter.toString();
String expected = Globals.NEWLINE + "@Article{," + Globals.NEWLINE + " year = {-10}," + Globals.NEWLINE + "}"
+ Globals.NEWLINE;
assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", ""));
}
// teste para o tipo book com data maior que a atual
@Test
public void testeValidacaoAnoMaiorBook() throws IOException {
StringWriter stringWriter = new StringWriter();
BibEntry entry = new BibEntry("1234", "book");
entry.setField("year", "2027");
writer.write(entry, stringWriter, BibDatabaseMode.BIBTEX);
String actual = stringWriter.toString();
String expected = Globals.NEWLINE + "@Book{," + Globals.NEWLINE + " year = {2027}," + Globals.NEWLINE + "}"
+ Globals.NEWLINE;
assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", ""));
}
//teste para o tipo book com data no limite esperado
@Test
public void testeValidacaoAnoEsperadoBook() throws IOException {
StringWriter stringWriter = new StringWriter();
BibEntry entry = new BibEntry("1234", "book");
entry.setField("year", "2010");
writer.write(entry, stringWriter, BibDatabaseMode.BIBTEX);
String actual = stringWriter.toString();
String expected = Globals.NEWLINE + "@Book{," + Globals.NEWLINE + " year = {2010}," + Globals.NEWLINE + "}"
+ Globals.NEWLINE;
assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", ""));
}
//teste para o tipo book com data menor que 0
@Test
public void testeValidacaoAnoMenorBook() throws IOException {
StringWriter stringWriter = new StringWriter();
BibEntry entry = new BibEntry("1234", "book");
entry.setField("year", "-10");
writer.write(entry, stringWriter, BibDatabaseMode.BIBTEX);
String actual = stringWriter.toString();
String expected = Globals.NEWLINE + "@Book{," + Globals.NEWLINE + " year = {-10}," + Globals.NEWLINE + "}"
+ Globals.NEWLINE;
assertEquals(expected.replaceAll("\\s+", ""), actual.replaceAll("\\s+", ""));
}
}
| iksmada/DC-UFSCar-ES2-201601-GrupoDilema | src/test/java/net/sf/jabref/bibtex/ValidacaoAnoPrimeiroTeste.java | Java | gpl-2.0 | 4,937 |
body{
padding-top: 50px;
position: relative;
}
pre {
tab-size: 8;
}
@media screen and (max-width: 768px) {
.side-collapse-container{
width:100%;
position:relative;
left:0;
transition:left .4s;
}
.side-collapse-container.out{
left:200px;
}
.side-collapse {
top:50px;
bottom:0;
left:0;
width:200px;
position:fixed;
overflow:hidden;
transition:width .4s;
}
.side-collapse.in {
width:0;
}
} | cleberpv/SistemaCalc | assets/header.css | CSS | gpl-2.0 | 727 |
package br.edu.utfpr.eventos.repository;
import java.util.List;
import org.springframework.data.jpa.repository.JpaRepository;
import br.edu.utfpr.eventos.model.Cidade;
import br.edu.utfpr.eventos.model.Estado;
public interface CidadeRepository extends JpaRepository<Cidade, Long> {
List<Cidade> findByEstado(Estado estado);
}
| BoraTomaUma/Eventos | src/main/java/br/edu/utfpr/eventos/repository/CidadeRepository.java | Java | gpl-2.0 | 331 |
using UnityEngine;
using System.Collections;
public class CameraBound : MonoBehaviour {
public tk2dCamera mainCamera;
[System.NonSerialized]
public Bounds bounds;
// Use this for initialization
void Start() {
}
// Update is called once per frame
void Update() {
if(bounds.size.x > 0.0f && bounds.size.y > 0.0f) {
Vector3 pos = transform.position;
Rect screen = mainCamera.ScreenExtents;
if(pos.x - screen.width * 0.5f < bounds.min.x)
pos.x = bounds.min.x + screen.width * 0.5f;
else if(pos.x + screen.width * 0.5f > bounds.max.x)
pos.x = bounds.max.x - screen.width * 0.5f;
if(pos.y - screen.height * 0.5f < bounds.min.y)
pos.y = bounds.min.y + screen.height * 0.5f;
else if(pos.y + screen.height * 0.5f > bounds.max.y)
pos.y = bounds.max.y - screen.height * 0.5f;
transform.position = pos;
}
}
}
| ddionisio/WreckingWhore | Assets/Scripts/CameraBound.cs | C# | gpl-2.0 | 1,046 |
from __future__ import print_function, division, absolute_import
import difflib
import locale
import os
import pprint
import six
import sys
import tempfile
try:
import unittest2 as unittest
except ImportError:
import unittest
# just log py.warnings (and pygtk warnings in particular)
import logging
try:
# 2.7+
logging.captureWarnings(True)
except AttributeError:
pass
from mock import Mock, MagicMock, NonCallableMock, patch, mock_open
from contextlib import contextmanager
from . import stubs
import subscription_manager.injection as inj
import subscription_manager.managercli
from rhsmlib.services import config
# use instead of the normal pid file based ActionLock
from threading import RLock
if six.PY2:
OPEN_FUNCTION = '__builtin__.open'
else:
OPEN_FUNCTION = 'builtins.open'
@contextmanager
def open_mock(content=None, **kwargs):
content_out = six.StringIO()
m = mock_open(read_data=content)
with patch(OPEN_FUNCTION, m, create=True, **kwargs) as mo:
stream = six.StringIO(content)
rv = mo.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
rv.__iter__ = lambda x: iter(stream.readlines())
yield rv
@contextmanager
def open_mock_many(file_content_map=None, **kwargs):
"""
Mock out access to one or many files opened using the builtin "open".
:param file_content_map: A dictionary of path : file_contents
:type file_content_map: dict[str,str]
:param kwargs:
:return:
"""
file_content_map = file_content_map or {}
for key, value in file_content_map.items():
file_content_map[key] = (mock_open(read_data=value), value, six.StringIO())
def get_file(path, *args, **kwargs):
"""
The side effect that will allow us to "open" the right "file".
Not for use outside open_mock_many.
:param path: The path which is passed in to the built
:param args:
:param kwargs:
:return:
"""
try:
rv, file_contents, content_out = file_content_map[path]
except KeyError:
if six.PY2:
raise IOError(2, 'No such file or directory')
else:
raise OSError(2, 'No such file or directory')
rv = rv.return_value
rv.write = lambda x: content_out.write(x)
rv.content_out = lambda: content_out.getvalue()
return rv
with patch(OPEN_FUNCTION, **kwargs) as mo:
mo.side_effect = get_file
yield mo
@contextmanager
def temp_file(content, *args, **kwargs):
try:
kwargs['delete'] = False
kwargs.setdefault('prefix', 'sub-man-test')
fh = tempfile.NamedTemporaryFile(mode='w+', *args, **kwargs)
fh.write(content)
fh.close()
yield fh.name
finally:
os.unlink(fh.name)
@contextmanager
def locale_context(new_locale, category=None):
old_category = category or locale.LC_CTYPE
old_locale = locale.getlocale(old_category)
category = category or locale.LC_ALL
locale.setlocale(category, new_locale)
try:
yield
finally:
locale.setlocale(category, old_locale)
class FakeLogger(object):
def __init__(self):
self.expected_msg = ""
self.msg = None
self.logged_exception = None
def debug(self, buf, *args, **kwargs):
self.msg = buf
def error(self, buf, *args, **kwargs):
self.msg = buf
def exception(self, e, *args, **kwargs):
self.logged_exception = e
def set_expected_msg(self, msg):
self.expected_msg = msg
def info(self, buf, *args, **kwargs):
self.msg = buf
def warning(self, buf, *args, **kwargs):
self.msg = buf
class FakeException(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return repr(self.msg)
class Matcher(object):
@staticmethod
def set_eq(first, second):
"""Useful for dealing with sets that have been cast to or instantiated as lists."""
return set(first) == set(second)
def __init__(self, compare, some_obj):
self.compare = compare
self.some_obj = some_obj
def __eq__(self, other):
return self.compare(self.some_obj, other)
class SubManFixture(unittest.TestCase):
def set_facts(self):
"""Override if you need to set facts for a test."""
return {"mock.facts": "true"}
"""
Can be extended by any subscription manager test case to make
sure nothing on the actual system is read/touched, and appropriate
mocks/stubs are in place.
"""
def setUp(self):
# No matter what, stop all patching (even if we have a failure in setUp itself)
self.addCleanup(patch.stopall)
# Never attempt to use the actual managercli.cfg which points to a
# real file in etc.
self.mock_cfg_parser = stubs.StubConfig()
original_conf = subscription_manager.managercli.conf
def unstub_conf():
subscription_manager.managercli.conf = original_conf
# Mock makes it damn near impossible to mock a module attribute (which we shouldn't be using
# in the first place because it's terrible) so we monkey-patch it ourselves.
# TODO Fix this idiocy by not reading the damn config on module import
subscription_manager.managercli.conf = config.Config(self.mock_cfg_parser)
self.addCleanup(unstub_conf)
facts_host_patcher = patch('rhsmlib.dbus.facts.FactsClient', auto_spec=True)
self.mock_facts_host = facts_host_patcher.start()
self.mock_facts_host.return_value.GetFacts.return_value = self.set_facts()
# By default mock that we are registered. Individual test cases
# can override if they are testing disconnected scenario.
id_mock = NonCallableMock(name='FixtureIdentityMock')
id_mock.exists_and_valid = Mock(return_value=True)
id_mock.uuid = 'fixture_identity_mock_uuid'
id_mock.name = 'fixture_identity_mock_name'
id_mock.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
id_mock.keypath.return_value = "/not/a/real/key/path"
id_mock.certpath.return_value = "/not/a/real/cert/path"
# Don't really care about date ranges here:
self.mock_calc = NonCallableMock()
self.mock_calc.calculate.return_value = None
# Avoid trying to read real /etc/yum.repos.d/redhat.repo
self.mock_repofile_path_exists_patcher = patch('subscription_manager.repolib.YumRepoFile.path_exists')
mock_repofile_path_exists = self.mock_repofile_path_exists_patcher.start()
mock_repofile_path_exists.return_value = True
inj.provide(inj.IDENTITY, id_mock)
inj.provide(inj.PRODUCT_DATE_RANGE_CALCULATOR, self.mock_calc)
inj.provide(inj.ENTITLEMENT_STATUS_CACHE, stubs.StubEntitlementStatusCache())
inj.provide(inj.POOL_STATUS_CACHE, stubs.StubPoolStatusCache())
inj.provide(inj.PROD_STATUS_CACHE, stubs.StubProductStatusCache())
inj.provide(inj.CONTENT_ACCESS_MODE_CACHE, stubs.StubContentAccessModeCache())
inj.provide(inj.SUPPORTED_RESOURCES_CACHE, stubs.StubSupportedResourcesCache())
inj.provide(inj.SYSPURPOSE_VALID_FIELDS_CACHE, stubs.StubSyspurposeValidFieldsCache())
inj.provide(inj.CURRENT_OWNER_CACHE, stubs.StubCurrentOwnerCache)
inj.provide(inj.OVERRIDE_STATUS_CACHE, stubs.StubOverrideStatusCache())
inj.provide(inj.RELEASE_STATUS_CACHE, stubs.StubReleaseStatusCache())
inj.provide(inj.AVAILABLE_ENTITLEMENT_CACHE, stubs.StubAvailableEntitlementsCache())
inj.provide(inj.PROFILE_MANAGER, stubs.StubProfileManager())
# By default set up an empty stub entitlement and product dir.
# Tests need to modify or create their own but nothing should hit
# the system.
self.ent_dir = stubs.StubEntitlementDirectory()
inj.provide(inj.ENT_DIR, self.ent_dir)
self.prod_dir = stubs.StubProductDirectory()
inj.provide(inj.PROD_DIR, self.prod_dir)
# Installed products manager needs PROD_DIR injected first
inj.provide(inj.INSTALLED_PRODUCTS_MANAGER, stubs.StubInstalledProductsManager())
self.stub_cp_provider = stubs.StubCPProvider()
self._release_versions = []
self.stub_cp_provider.content_connection.get_versions = self._get_release_versions
inj.provide(inj.CP_PROVIDER, self.stub_cp_provider)
inj.provide(inj.CERT_SORTER, stubs.StubCertSorter())
# setup and mock the plugin_manager
plugin_manager_mock = MagicMock(name='FixturePluginManagerMock')
plugin_manager_mock.runiter.return_value = iter([])
inj.provide(inj.PLUGIN_MANAGER, plugin_manager_mock)
inj.provide(inj.DBUS_IFACE, Mock(name='FixtureDbusIfaceMock'))
pooltype_cache = Mock()
inj.provide(inj.POOLTYPE_CACHE, pooltype_cache)
# don't use file based locks for tests
inj.provide(inj.ACTION_LOCK, RLock)
self.stub_facts = stubs.StubFacts()
inj.provide(inj.FACTS, self.stub_facts)
content_access_cache_mock = MagicMock(name='ContentAccessCacheMock')
inj.provide(inj.CONTENT_ACCESS_CACHE, content_access_cache_mock)
self.dbus_patcher = patch('subscription_manager.managercli.CliCommand._request_validity_check')
self.dbus_patcher.start()
# No tests should be trying to connect to any configure or test server
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.is_valid_server_patcher = patch("subscription_manager.managercli.is_valid_server_info")
is_valid_server_mock = self.is_valid_server_patcher.start()
is_valid_server_mock.return_value = True
# No tests should be trying to test the proxy connection
# so really, everything needs this mock. May need to be in __init__, or
# better, all test classes need to use SubManFixture
self.test_proxy_connection_patcher = patch("subscription_manager.managercli.CliCommand.test_proxy_connection")
test_proxy_connection_mock = self.test_proxy_connection_patcher.start()
test_proxy_connection_mock.return_value = True
self.syncedstore_patcher = patch('subscription_manager.syspurposelib.SyncedStore')
syncedstore_mock = self.syncedstore_patcher.start()
set_up_mock_sp_store(syncedstore_mock)
self.files_to_cleanup = []
def tearDown(self):
if not hasattr(self, 'files_to_cleanup'):
return
for f in self.files_to_cleanup:
# Assuming these are tempfile.NamedTemporaryFile, created with
# the write_tempfile() method in this class.
f.close()
def write_tempfile(self, contents):
"""
Write out a tempfile and append it to the list of those to be
cleaned up in tearDown.
"""
fid = tempfile.NamedTemporaryFile(mode='w+', suffix='.tmp')
fid.write(contents)
fid.seek(0)
self.files_to_cleanup.append(fid)
return fid
def set_consumer_auth_cp(self, consumer_auth_cp):
cp_provider = inj.require(inj.CP_PROVIDER)
cp_provider.consumer_auth_cp = consumer_auth_cp
def get_consumer_cp(self):
cp_provider = inj.require(inj.CP_PROVIDER)
consumer_cp = cp_provider.get_consumer_auth_cp()
return consumer_cp
# The ContentConnection used for reading release versions from
# the cdn. The injected one uses this.
def _get_release_versions(self, listing_path):
return self._release_versions
# For changing injection consumer id to one that fails "is_valid"
def _inject_mock_valid_consumer(self, uuid=None):
"""For changing injected consumer identity to one that passes is_valid()
Returns the injected identity if it need to be examined.
"""
identity = NonCallableMock(name='ValidIdentityMock')
identity.uuid = uuid or "VALIDCONSUMERUUID"
identity.is_valid = Mock(return_value=True)
identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, identity)
return identity
def _inject_mock_invalid_consumer(self, uuid=None):
"""For chaining injected consumer identity to one that fails is_valid()
Returns the injected identity if it need to be examined.
"""
invalid_identity = NonCallableMock(name='InvalidIdentityMock')
invalid_identity.is_valid = Mock(return_value=False)
invalid_identity.uuid = uuid or "INVALIDCONSUMERUUID"
invalid_identity.cert_dir_path = "/not/a/real/path/to/pki/consumer/"
inj.provide(inj.IDENTITY, invalid_identity)
return invalid_identity
# use our naming convention here to make it clear
# this is our extension. Note that python 2.7 adds a
# assertMultilineEquals that assertEqual of strings does
# automatically
def assert_string_equals(self, expected_str, actual_str, msg=None):
if expected_str != actual_str:
expected_lines = expected_str.splitlines(True)
actual_lines = actual_str.splitlines(True)
delta = difflib.unified_diff(expected_lines, actual_lines, "expected", "actual")
message = ''.join(delta)
if msg:
message += " : " + msg
self.fail("Multi-line strings are unequal:\n" + message)
def assert_equal_dict(self, expected_dict, actual_dict):
mismatches = []
missing_keys = []
extra = []
for key in expected_dict:
if key not in actual_dict:
missing_keys.append(key)
continue
if expected_dict[key] != actual_dict[key]:
mismatches.append((key, expected_dict[key], actual_dict[key]))
for key in actual_dict:
if key not in expected_dict:
extra.append(key)
message = ""
if missing_keys or extra:
message += "Keys in only one dict: \n"
if missing_keys:
for key in missing_keys:
message += "actual_dict: %s\n" % key
if extra:
for key in extra:
message += "expected_dict: %s\n" % key
if mismatches:
message += "Unequal values: \n"
for info in mismatches:
message += "%s: %s != %s\n" % info
# pprint the dicts
message += "\n"
message += "expected_dict:\n"
message += pprint.pformat(expected_dict)
message += "\n"
message += "actual_dict:\n"
message += pprint.pformat(actual_dict)
if mismatches or missing_keys or extra:
self.fail(message)
def assert_items_equals(self, a, b):
"""Assert that two lists contain the same items regardless of order."""
if sorted(a, key=lambda item: str(item)) != sorted(b, key=lambda item: str(item)):
self.fail("%s != %s" % (a, b))
return True
class Capture(object):
class Tee(object):
def __init__(self, stream, silent):
self.buf = six.StringIO()
self.stream = stream
self.silent = silent
def write(self, data):
self.buf.write(data)
if not self.silent:
self.stream.write(data)
def flush(self):
pass
def getvalue(self):
return self.buf.getvalue()
def isatty(self):
return False
def __init__(self, silent=False):
self.silent = silent
def __enter__(self):
self.buffs = (self.Tee(sys.stdout, self.silent), self.Tee(sys.stderr, self.silent))
self.stdout = sys.stdout
self.stderr = sys.stderr
sys.stdout, sys.stderr = self.buffs
return self
@property
def out(self):
return self.buffs[0].getvalue()
@property
def err(self):
return self.buffs[1].getvalue()
def __exit__(self, exc_type, exc_value, traceback):
sys.stdout = self.stdout
sys.stderr = self.stderr
def set_up_mock_sp_store(mock_sp_store):
"""
Sets up the mock syspurpose store with methods that are mock versions of the real deal.
Allows us to test in the absence of the syspurpose module.
This documents the essential expected behaviour of the methods subman relies upon
from the syspurpose codebase.
:return:
"""
contents = {}
mock_sp_store_contents = contents
def set(item, value):
contents[item] = value
def read(path, raise_on_error=False):
return mock_sp_store
def unset(item):
contents[item] = None
def add(item, value):
current = contents.get(item, [])
if value not in current:
current.append(value)
contents[item] = current
def remove(item, value):
current = contents.get(item)
if current is not None and isinstance(current, list) and value in current:
current.remove(value)
def get_local_contents():
return contents
def get_cached_contents():
return contents
def update_local(data):
global contents
contents = data
mock_sp_store.return_value.set = Mock(side_effect=set)
mock_sp_store.return_value.read = Mock(side_effect=read)
mock_sp_store.return_value.unset = Mock(side_effect=unset)
mock_sp_store.return_value.add = Mock(side_effect=add)
mock_sp_store.return_value.remove = Mock(side_effect=remove)
mock_sp_store.return_value.local_contents = mock_sp_store_contents
mock_sp_store.return_value.get_local_contents = Mock(side_effect=get_local_contents)
mock_sp_store.return_value.update_local = Mock(side_effect=update_local)
mock_sp_store.return_value.get_cached_contents = Mock(side_effect=get_cached_contents)
return mock_sp_store, mock_sp_store_contents
| Lorquas/subscription-manager | test/fixture.py | Python | gpl-2.0 | 18,129 |
html, body {
padding: 0;
margin: 0;
}
html {
height: 100%;
}
body {
background-color: #f3f3f3;
font-family: Arial, Helvetica, Verdana;
font-size: 14px;
line-height: 22px;
color: #666;
min-height: 100%;
position: relative;
-webkit-text-size-adjust: none;
}
body * {
text-shadow: none;
}
h1, h2, h3, h4, h5, h6 {
line-height: 1;
font-weight: bold;
margin: 20px 0 10px 0;
}
h1, h2, h3 {
font-size: 18px;
color: rgba(55,179,73,1);
}
h4, h5, h6 {
font-size: 16px;
}
p {
margin: 0 0 10px 0;
}
a, a:link, a:active, a:visited, a:hover {
text-decoration: none;
}
select,
input[type="text"] {
background: #fff;
border: 1px solid #ccc;
}
.mm-label.button {
text-indent: 0 !important;
padding: 20px 20px 40px 20px !important;
}
pre {
font-size: 12px;
width: 100%;
overflow: auto;
-webkit-overflow-scrolling: touch;
}
nav:not(.mm-menu) {
display: none;
}
.header,
.content,
.footer {
text-align: center;
}
.header,
.footer {
background: #777;
font-size: 16px;
font-weight: bold;
color: #fff;
line-height: 40px;
-webkit-box-sizing: border-box;
-moz-box-sizing: border-box;
box-sizing: border-box;
width: 100%;
height: 40px;
padding: 0 50px;
}
.header.mm-fixed-top {
position: fixed;
top: 0;
left: 0;
}
.header a {
background: center center no-repeat transparent;
background-image: url( data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABkAAAAZCAYAAADE6YVjAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAADhJREFUeNpi/P//PwOtARMDHQBdLGFBYtMq3BiHT3DRPU4YR4NrNAmPJuHRJDyahEeT8Ii3BCDAAF0WBj5Er5idAAAAAElFTkSuQmCC );
display: block;
width: 40px;
height: 40px;
position: absolute;
top: 0;
left: 10px;
}
.header a.contacts {
background-image: url(data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAABkAAAAZCAYAAADE6YVjAAAAGXRFWHRTb2Z0d2FyZQBBZG9iZSBJbWFnZVJlYWR5ccllPAAAAVNJREFUeNq01jFLAzEYxvH2UBCxiIiDUhzcdXCyIk5ufgUXLR37JdxcXBxcuwmOOtml0EKpo04iWIpIQREVEVFLjf9AhoC9e3O5uxd+lMLdPU1yedO8UiqXdQWO122ghi4+8IwGqiiId+uRCHbxqsLrFHNRz5ACSnhTch1EPScQpnIH0w7TWcGiz5rouS45rtkMVnxCJrAQ4yUq+oT8YhAjZOAT8oWHGCE9nxC9H64cA15w4xOiW0HbfErVNhvUa8c3cecQcoZv3xA9z+fCNbe4SNq7jvEUMaUnuE/au7SjkHbSR1G637ULh73Kn+gn7cIBttELGckQh9Jo8iMOrSksYx2b2MKk8FuvUUcHl/9GbiWuooYu3pVf/eARLZQxbp8na2YR0659e+HLmM/geN+z98lSRv8hZu2QQkYh+kzKjZkvHdPahymH6DNp5Cucev0JMAAkKj0NEoYBJwAAAABJRU5ErkJggg==);
}
.header a.right {
left: auto;
right: 10px;
}
.content {
background: url( menu-arrow.png ) 25px 15px no-repeat transparent;
padding: 150px 50px 50px 50px;
}
.header.mm-fixed-top + .content {
background-position: 25px 55px;
}
html.mm-opened.mm-zoom-menu body {
background: #333;
}
html.mm-opened.mm-zoom-menu.mm-light body {
background: #f3f3f3;
}
html.mm-opened.mm-zoom-menu.mm-black body {
background: #000;
}
html.mm-opened.mm-zoom-menu.mm-white body {
background: #fff;
}
html.mm-opened.mm-zoom-menu .mm-page {
background-color: #fff;
}
.mm-menu li .fa {
margin: 0 20px 0 5px;
font-size: 16px;
width: 12px;
}
body.no-iconbar .mm-menu li .fa {
display: none !important;
}
body.no-buttonbars .mm-menu li.buttonbar-item {
display: none !important;
}
body.no-labels .mm-menu .mm-label,
body.no-labels .mm-menu .mm-collapsed,
.Collapsed {
display: none !important;
}
body.no-toggles .mm-menu li .mm-toggle {
display: none !important;
}
.mm-menu {
width: 120px !important;
}
div.container_previous {
padding-left: 12px;
padding-top: 84px;
}
div.container {
}
div.container fieldset {
display: flex;
flex-direction: column;
align-items: stretch;
border: none;
justify-content: space-around;
}
div.container fieldset div {
display: flex;
flex-direction: row;
align-content: stretch;
align-items: stretch;
padding-bottom: 5px;
padding-top: 5px;
margin-bottom: 5px;
border-top: solid 1px rgba(211, 211, 211, 0.5);
/* background-color: rgba(255, 255, 255, 0.56); */
/* height: 33px; */
}
div.container fieldset div label {
width: 15%;
margin-left: 3%;
}
div.container fieldset div label.largo {
width: 20%;
}
div.container fieldset div label.corto {
width: 11%;
}
div.container fieldset div input {
box-shadow: 5px 5px 23px -12px rgba(84, 84, 84, 0.5) inset;
color: #666;
font-size: 1em;
font-weight: normal;
border-radius: 5px;
box-sizing: border-box;
/*display: block;*/
font-weight: normal;
padding: 5px;
text-indent: 0;
text-transform: none;
vertical-align: middle;
}
div.container fieldset div select {
box-shadow: 5px 5px 23px -12px rgba(84, 84, 84, 0.5) inset;
color: #666;
font-size: 1em;
font-weight: normal;
border-radius: 5px;
box-sizing: border-box;
display: block;
padding: 5px;
text-indent: 0;
text-transform: none;
vertical-align: middle;
}
div.container table.table {
padding: 0;
margin: 0;
border-collapse: collapse;
width: 100%;
font-size: 13px;
border: solid 1px #ccc;
margin-top: 30px;
}
div.container table.table tr:nth-child(odd) {
background-color: #eee;
}
div.container table.table tr:nth-child(even) {
background-color: #fff;
}
div.container table.table tr td {
padding: 5px;
border-left: solid 1px #D9D9D9;
}
div.container table.table th {
background-color: rgba(55,179,73,1);
padding: 7px;
border: solid 1px #75D684;
color: #fff;
}
div.container table.table th a {
color: #fff;
font-weight: normal;
text-transform: uppercase;
}
span.disabled {
background-color: #ccc;
padding: 5px;
cursor: pointer;
}
input.btn {
background-color: #ddd;
padding: 5px;
border: 0;
cursor: pointer;
}
.ui-datepicker-trigger {
margin-left: 12px;
width: 22px;
height: 22px;
margin-top: 2px;
cursor: pointer;
}
label.largo {
width: 220px;
}
/*ESTADOS VERSIONES*/
a.borrador, a.publicado, a.caducado {
color: white;
padding: 5px;
border-radius: 4px;
margin: 4px;
}
a.borrador:hover, a.publicado:hover, a.caducado:hover {
opacity: 0.8;
}
a.borrador {
background-color: #777;
}
a.publicado {
background-color: rgba(55,179,73,1);
}
a.caducado {
background-color: #ff0000;
}
a.rechazo {
background-color:#ff9400;
}
a.revision {
background-color:#fff200;
}
| alfre/Prestlan | Content/estilos.css | CSS | gpl-2.0 | 6,469 |
#
# CMake custom modules
# Copyright (C) 2011-2015 Cedric OCHS
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
MACRO(INIT_BUILD_FLAGS_ANDROID)
IF(ANDROID)
ADD_PLATFORM_FLAGS("--sysroot=${PLATFORM_ROOT}")
ADD_PLATFORM_FLAGS("-ffunction-sections -funwind-tables -no-canonical-prefixes")
ADD_PLATFORM_FLAGS("-DANDROID")
ADD_PLATFORM_FLAGS("-I${STL_INCLUDE_DIR} -I${STL_INCLUDE_CPU_DIR}")
IF(CLANG)
IF(TARGET_ARM64)
SET(LLVM_TRIPLE "aarch64-none-linux-android")
ELSEIF(TARGET_ARMV7)
SET(LLVM_TRIPLE "armv7-none-linux-androideabi")
ELSEIF(TARGET_ARMV5)
SET(LLVM_TRIPLE "armv5te-none-linux-androideabi")
ELSEIF(TARGET_X64)
SET(LLVM_TRIPLE "x86_64-none-linux-android")
ELSEIF(TARGET_X86)
SET(LLVM_TRIPLE "i686-none-linux-android")
ELSEIF(TARGET_MIPS64)
SET(LLVM_TRIPLE "mips64el-none-linux-android")
ELSEIF(TARGET_MIPS)
SET(LLVM_TRIPLE "mipsel-none-linux-android")
ELSE()
MESSAGE(FATAL_ERROR "Unspported architecture ${TARGET_CPU}")
ENDIF()
ADD_PLATFORM_FLAGS("-gcc-toolchain ${GCC_TOOLCHAIN_ROOT}")
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -gcc-toolchain ${GCC_TOOLCHAIN_ROOT}")
ADD_PLATFORM_FLAGS("-target ${LLVM_TRIPLE}") # -emit-llvm -fPIC ?
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -target ${LLVM_TRIPLE}")
ELSE()
ADD_PLATFORM_FLAGS("-Wa,--noexecstack")
ENDIF()
IF(TARGET_ARM)
ADD_PLATFORM_FLAGS("-fpic -fstack-protector")
ADD_PLATFORM_FLAGS("-D__ARM_ARCH_5__ -D__ARM_ARCH_5T__ -D__ARM_ARCH_5E__ -D__ARM_ARCH_5TE__")
IF(CLANG)
ADD_PLATFORM_FLAGS("-fno-integrated-as")
ENDIF()
IF(TARGET_ARMV7)
ADD_PLATFORM_FLAGS("-march=armv7-a -mfpu=vfpv3-d16")
SET(ARMV7_HARD_FLOAT OFF)
IF(ARMV7_HARD_FLOAT)
ADD_PLATFORM_FLAGS("-mhard-float -D_NDK_MATH_NO_SOFTFP=1")
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -Wl,--no-warn-mismatch -lm_hard")
ELSE()
ADD_PLATFORM_FLAGS("-mfloat-abi=softfp")
ENDIF()
IF(NOT CLANG)
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -march=armv7-a")
ENDIF()
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -Wl,--fix-cortex-a8")
ELSEIF(TARGET_ARMV5)
ADD_PLATFORM_FLAGS("-march=armv5te -mtune=xscale -msoft-float")
ENDIF()
SET(TARGET_THUMB ON)
IF(TARGET_THUMB)
IF(NOT CLANG)
ADD_PLATFORM_FLAGS("-finline-limit=64")
ENDIF()
SET(DEBUG_CFLAGS "${DEBUG_CFLAGS} -marm")
SET(RELEASE_CFLAGS "${RELEASE_CFLAGS} -mthumb")
ELSE()
IF(NOT CLANG)
ADD_PLATFORM_FLAGS("-funswitch-loops -finline-limit=300")
ENDIF()
ENDIF()
ELSEIF(TARGET_X86)
# Same options for x86 and x86_64
IF(CLANG)
ADD_PLATFORM_FLAGS("-fPIC")
ELSE()
ADD_PLATFORM_FLAGS("-funswitch-loops -finline-limit=300")
# Optimizations for Intel Atom
# ADD_PLATFORM_FLAGS("-march=i686 -mtune=atom -mstackrealign -msse3 -mfpmath=sse -m32 -flto -ffast-math -funroll-loops")
ENDIF()
ADD_PLATFORM_FLAGS("-fstack-protector")
ELSEIF(TARGET_MIPS)
# Same options for mips and mips64
IF(NOT CLANG)
ADD_PLATFORM_FLAGS("-frename-registers -fno-inline-functions-called-once -fgcse-after-reload -frerun-cse-after-loop")
SET(RELEASE_CFLAGS "${RELEASE_CFLAGS} -funswitch-loops -finline-limit=300")
ENDIF()
ADD_PLATFORM_FLAGS("-fpic -finline-functions -fmessage-length=0")
ENDIF()
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -Wl,-z,noexecstack -Wl,-z,relro -Wl,-z,now -no-canonical-prefixes")
SET(PLATFORM_LINKFLAGS "${PLATFORM_LINKFLAGS} -L${PLATFORM_ROOT}/usr/lib")
ENDIF()
ENDMACRO()
| ktereyp/ktereyp-gentoo | dev-libs/squish/files/cmake/modules/AndroidSupport.cmake | CMake | gpl-2.0 | 4,540 |
/*
* Copyright 2012 AndroidPlot.com
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.androidplot.util;
import android.graphics.PointF;
import android.graphics.RectF;
/**
* Utility methods for converting pixel coordinates into real values and vice versa.
*/
public class ValPixConverter {
private static final int ZERO = 0;
public static float valToPix(double val, double min, double max, double lengthPix, boolean flip) {
if(lengthPix <= ZERO) {
throw new IllegalArgumentException("Length in pixels must be greater than 0.");
}
double range = range(min, max);
double scale = lengthPix / range;
double raw = val - min;
double pix = (raw * scale);
if(flip) {
pix = (lengthPix - pix);
}
return (float)pix;
}
public static double range(double min, double max) {
return (max-min);
}
public static double valPerPix(double min, double max, float lengthPix) {
double valRange = range(min, max);
return valRange/lengthPix;
}
/**
* Convert a value in pixels to the type passed into min/max
* @param pix
* @param min
* @param max
* @param lengthPix
* @param flip True if the axis should be reversed before calculated. This is the case
* with the y axis for screen coords.
* @return
*/
public static double pixToVal(float pix, double min, double max, float lengthPix, boolean flip) {
if(pix < ZERO) {
throw new IllegalArgumentException("pixel values cannot be negative.");
}
if(lengthPix <= ZERO) {
throw new IllegalArgumentException("Length in pixels must be greater than 0.");
}
float pMult = pix;
if(flip) {
pMult = lengthPix - pix;
}
double range = range(min, max);
return ((range / lengthPix) * pMult) + min;
}
/**
* Converts a real value into a pixel value.
* @param x Real d (domain) component of the point to convert.
* @param y Real y (range) component of the point to convert.
* @param plotArea
* @param minX Minimum visible real value on the d (domain) axis.
* @param maxX Maximum visible real value on the y (domain) axis.
* @param minY Minimum visible real value on the y (range) axis.
* @param maxY Maximum visible real value on the y (range axis.
* @return
*/
public static PointF valToPix(Number x, Number y, RectF plotArea, Number minX, Number maxX, Number minY, Number maxY) {
float pixX = ValPixConverter.valToPix(x.doubleValue(), minX.doubleValue(), maxX.doubleValue(), plotArea.width(), false) + (plotArea.left);
float pixY = ValPixConverter.valToPix(y.doubleValue(), minY.doubleValue(), maxY.doubleValue(), plotArea.height(), true) + plotArea.top;
return new PointF(pixX, pixY);
}
public static PointF valToPixOpt(double x, double y, double width, double height, RectF plotArea, double minX, double maxX, double minY, double maxY) {
float pixX = ValPixConverter.valToPix(x, minX, maxX, width, false) + (plotArea.left);
float pixY = ValPixConverter.valToPix(y, minY, maxY, height, true) + plotArea.top;
return new PointF(pixX, pixY);
}
}
| miiicmueller/RedPitayaScope | AndroidPlot-Core/src/main/java/com/androidplot/util/ValPixConverter.java | Java | gpl-2.0 | 3,954 |
<?php
/**
* The template for displaying posts in the Aside Post Format on index and archive pages
*
* Learn more: http://codex.wordpress.org/Post_Formats
*
* @package WordPress
* @subpackage Twenty_Eleven
* @since fabfive 1.0
*/
?>
<article id="post-<?php the_ID(); ?>" <?php post_class(); ?>>
<header class="entry-header">
<hgroup>
<h2 class="entry-title"><a href="<?php the_permalink(); ?>" title="<?php printf( esc_attr__( 'Permalink to %s', 'fabfive' ), the_title_attribute( 'echo=0' ) ); ?>" rel="bookmark"><?php the_title(); ?></a></h2>
<h3 class="entry-format"><?php _e( 'Aside', 'fabfive' ); ?></h3>
</hgroup>
<?php if ( comments_open() && ! post_password_required() ) : ?>
<div class="comments-link">
<?php comments_popup_link( '<span class="leave-reply">' . __( 'Reply', 'fabfive' ) . '</span>', _x( '1', 'comments number', 'fabfive' ), _x( '%', 'comments number', 'fabfive' ) ); ?>
</div>
<?php endif; ?>
</header><!-- .entry-header -->
<?php if ( is_search() ) : // Only display Excerpts for Search ?>
<div class="entry-summary">
<?php the_excerpt(); ?>
</div><!-- .entry-summary -->
<?php else : ?>
<div class="entry-content">
<?php the_content( __( 'Continue reading <span class="meta-nav">→</span>', 'fabfive' ) ); ?>
<?php wp_link_pages( array( 'before' => '<div class="page-link"><span>' . __( 'Pages:', 'fabfive' ) . '</span>', 'after' => '</div>' ) ); ?>
</div><!-- .entry-content -->
<?php endif; ?>
<footer class="entry-meta">
<?php fabfive_posted_on(); ?>
<?php if ( comments_open() ) : ?>
<span class="sep"> | </span>
<span class="comments-link"><?php comments_popup_link( '<span class="leave-reply">' . __( 'Leave a reply', 'fabfive' ) . '</span>', __( '<b>1</b> Reply', 'fabfive' ), __( '<b>%</b> Replies', 'fabfive' ) ); ?></span>
<?php endif; ?>
<?php edit_post_link( __( 'Edit', 'fabfive' ), '<span class="edit-link">', '</span>' ); ?>
</footer><!-- #entry-meta -->
</article><!-- #post-<?php the_ID(); ?> -->
| johansst/fabfive | content-aside.php | PHP | gpl-2.0 | 2,036 |
<?php
namespace Drupal\country\Plugin\Field\FieldFormatter;
use Drupal\Core\Field\FieldItemListInterface;
use Drupal\Core\Field\FormatterBase;
/**
* Plugin implementation of the 'country' formatter showing the iso code.
*
* @FieldFormatter(
* id = "country_iso_code",
* module = "country",
* label = @Translation("ISO code"),
* field_types = {
* "country"
* }
* )
*/
class CountryCodeFormatter extends FormatterBase {
/**
* {@inheritdoc}
*/
public function viewElements(FieldItemListInterface $items, $langcode) {
$elements = [];
foreach ($items as $delta => $item) {
$elements[$delta] = ['#markup' => $item->value];
}
return $elements;
}
}
| mamont77/easydrupal | modules/contrib/country/src/Plugin/Field/FieldFormatter/CountryCodeFormatter.php | PHP | gpl-2.0 | 704 |
# Contributing
Awesome that you are reading this!
* For questions, you can create an Issue
* Code changes go via Pull Requests
## Branching policy
* The `master` branch should always build successfully
* The `development` branch is for developers
## Submitting code
Submitted code should follow these quality guidelines:
* All tests pass cleanly/silently
* Code coverage above 95%
* Coding style should follow the default style by `lintr`
These are all checked by Travis CI when submitting
a Pull Request.
## git usage
To get started working on `nLTT` do:
```
git clone https://github.com/thijsjanzen/nLTT.git
```
Development is done on the `develop` branch.
To download and checkout the `develop` branch,
first go into the `nLTT` folder (`cd nLTT`), then do:
```
git checkout develop
```
Then the workflow is the common `git` workflow:
```
git pull
git add --all :/
git commit -m "Did something awesome"
git push
```
| richelbilderbeek/nLTT | CONTRIBUTING.md | Markdown | gpl-2.0 | 943 |
<?php
/**
* @package WordPress
* @subpackage HTML5-Reset-WordPress-Theme
* @since HTML5 Reset 2.0
*/
?>
</div> <!-- end content-background -->
</div> <!-- end page-wrap -->
<div class="clearfix"></div>
<footer id="footer">
<div class="footer-wrap">
<p class="disclaimer">
Disclaimer: Although every precaution is taken to ensure accuracy, errors or misunderstandings in price and policy may occur.<br/> We reserve the right to correct such errors and misunderstandings.
</p>
<p class="footer-contact">
Ski Snowstar Winter Sports Park • 9500 126th Stree West • Andalusia, IL • 61232
</p>
<p class="footer-contact">
Toll Free: 800.383.4002 • Local: 309.798.2666 • email: info@skisnowstar.com
</p>
<div class="source-org vcard copyright">
<small>©<?php echo date("Y"); echo " "; bloginfo('name'); ?></small>
</div>
</div> <!-- END FOOTER WRAP -->
</footer>
<?php wp_footer(); ?>
<!-- here comes the javascript -->
<!-- jQuery is called via the WordPress-friendly way via functions.php -->
<!-- this is where we put our custom functions -->
<script src="<?php bloginfo('template_directory'); ?>/_/js/functions.js"></script>
<script src="<?php bloginfo('template_directory'); ?>/js/jquery.easing.1.3.js"></script>
<script src="<?php bloginfo('template_directory'); ?>/js/jquery.touchSwipe.min.js"></script>
<script src="<?php bloginfo('template_directory'); ?>/js/jquery.liquid-slider.min.js"></script>
<script>/* If installing in the footer, you won't need $(function() {} */
$('#slider-id').liquidSlider({
dynamicTabs:false,
});
</script>
<script src="http://code.jquery.com/ui/1.10.3/jquery-ui.js"></script>
<link rel="stylesheet" href="<?php bloginfo('template_directory'); ?>/css/jquery-ui-1.10.3.custom.css">
<script>
$(function() {
$( "#accordion, #accordion-2").accordion({
collapsible:true,
heightStyle: "content"
});
});
</script>
<?php if(is_page_template('windy-city.php')) :?>
<script type="text/javascript" src="<?php echo get_template_directory_uri(); ?>/_/js/windy-city-form.js"></script>
<?php endif;?>
<!-- Asynchronous google analytics; this is the official snippet.
Replace UA-XXXXXX-XX with your site's ID and uncomment to enable.
<script>
var _gaq = _gaq || [];
_gaq.push(['_setAccount', 'UA-XXXXXX-XX']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s);
})();
</script>
-->
</body>
</html>
| markfaust/snowstar | wp-content/themes/snowstar/footer.php | PHP | gpl-2.0 | 2,786 |
/*
==============================================================================
This file is part of the JUCE library.
Copyright (c) 2017 - ROLI Ltd.
JUCE is an open source library subject to commercial or open-source
licensing.
The code included in this file is provided under the terms of the ISC license
http://www.isc.org/downloads/software-support-policy/isc-license. Permission
To use, copy, modify, and/or distribute this software for any purpose with or
without fee is hereby granted provided that the above copyright notice and
this permission notice appear in all copies.
JUCE IS PROVIDED "AS IS" WITHOUT ANY WARRANTY, AND ALL WARRANTIES, WHETHER
EXPRESSED OR IMPLIED, INCLUDING MERCHANTABILITY AND FITNESS FOR PURPOSE, ARE
DISCLAIMED.
==============================================================================
*/
namespace juce
{
//==============================================================================
/** Wraps another input stream, and reads from it using an intermediate buffer
If you're using an input stream such as a file input stream, and making lots of
small read accesses to it, it's probably sensible to wrap it in one of these,
so that the source stream gets accessed in larger chunk sizes, meaning less
work for the underlying stream.
*/
class JUCE_API BufferedInputStream : public InputStream
{
public:
//==============================================================================
/** Creates a BufferedInputStream from an input source.
@param sourceStream the source stream to read from
@param bufferSize the size of reservoir to use to buffer the source
@param deleteSourceWhenDestroyed whether the sourceStream that is passed in should be
deleted by this object when it is itself deleted.
*/
BufferedInputStream (InputStream* sourceStream,
int bufferSize,
bool deleteSourceWhenDestroyed);
/** Creates a BufferedInputStream from an input source.
@param sourceStream the source stream to read from - the source stream must not
be deleted until this object has been destroyed.
@param bufferSize the size of reservoir to use to buffer the source
*/
BufferedInputStream (InputStream& sourceStream, int bufferSize);
/** Destructor.
This may also delete the source stream, if that option was chosen when the
buffered stream was created.
*/
~BufferedInputStream();
//==============================================================================
/** Returns the next byte that would be read by a call to readByte() */
char peekByte();
int64 getTotalLength() override;
int64 getPosition() override;
bool setPosition (int64 newPosition) override;
int read (void* destBuffer, int maxBytesToRead) override;
String readString() override;
bool isExhausted() override;
private:
//==============================================================================
OptionalScopedPointer<InputStream> source;
int bufferSize;
int64 position, lastReadPos = 0, bufferStart, bufferOverlap = 128;
HeapBlock<char> buffer;
bool ensureBuffered();
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (BufferedInputStream)
};
} // namespace juce
| pingdynasty/FirmwareSender | JuceLibraryCode/modules/juce_core/streams/juce_BufferedInputStream.h | C | gpl-2.0 | 3,552 |
/*
* Copyright 1998-2003 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 only, as
* published by the Free Software Foundation. Sun designates this
* particular file as subject to the "Classpath" exception as provided
* by Sun in the LICENSE file that accompanied this code.
*
* This code is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* version 2 for more details (a copy is included in the LICENSE file that
* accompanied this code).
*
* You should have received a copy of the GNU General Public License version
* 2 along with this work; if not, write to the Free Software Foundation,
* Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
*/
package sun.awt;
import java.awt.Image;
import java.awt.Toolkit;
import java.awt.im.spi.InputMethod;
import java.awt.im.spi.InputMethodDescriptor;
import java.security.AccessController;
import java.util.Locale;
import sun.awt.SunToolkit;
import sun.security.action.GetPropertyAction;
/**
* Provides sufficient information about an input method
* to enable selection and loading of that input method.
* The input method itself is only loaded when it is actually used.
*
* @since JDK1.3
*/
public abstract class X11InputMethodDescriptor implements InputMethodDescriptor {
private static Locale locale;
public X11InputMethodDescriptor() {
locale = getSupportedLocale();
}
/**
* @see java.awt.im.spi.InputMethodDescriptor#getAvailableLocales
*/
public Locale[] getAvailableLocales() {
Locale[] locales = {locale};
return locales;
}
/**
* @see java.awt.im.spi.InputMethodDescriptor#hasDynamicLocaleList
*/
public boolean hasDynamicLocaleList() {
return false;
}
/**
* @see java.awt.im.spi.InputMethodDescriptor#getInputMethodDisplayName
*/
public synchronized String getInputMethodDisplayName(Locale inputLocale, Locale displayLanguage) {
// We ignore the input locale.
// When displaying for the default locale, rely on the localized AWT properties;
// for any other locale, fall back to English.
String name = "System Input Methods";
if (Locale.getDefault().equals(displayLanguage)) {
name = Toolkit.getProperty("AWT.HostInputMethodDisplayName", name);
}
return name;
}
/**
* @see java.awt.im.spi.InputMethodDescriptor#getInputMethodIcon
*/
public Image getInputMethodIcon(Locale inputLocale) {
return null;
}
/**
* @see java.awt.im.spi.InputMethodDescriptor#createInputMethod
*/
public abstract InputMethod createInputMethod() throws Exception;
/**
* returns supported locale. Currently this method returns the locale in which
* the VM is started since Solaris doesn't provide a way to determine the login locale.
*/
static Locale getSupportedLocale() {
return SunToolkit.getStartupLocale();
}
}
| TheTypoMaster/Scaper | openjdk/jdk/src/solaris/classes/sun/awt/X11InputMethodDescriptor.java | Java | gpl-2.0 | 3,480 |
.size 8000
.text@48
jp lstatint
.text@100
jp lbegin
.data@143
80
.text@150
lbegin:
ld a, 02
ldff(43), a
ld c, 41
ld b, 03
lbegin_waitm3:
ldff a, (c)
and a, b
cmp a, b
jrnz lbegin_waitm3
ld a, 20
ldff(c), a
xor a, a
ldff(0f), a
ld a, 02
ldff(ff), a
ei
.text@1000
lstatint:
ld a, 08
ldff(c), a
.text@1035
halt
dec a
jp lprint_a
.text@7000
lprint_a:
push af
ld b, 91
call lwaitly_b
xor a, a
ldff(40), a
pop af
ld(9800), a
ld bc, 7a00
ld hl, 8000
ld d, a0
lprint_copytiles:
ld a, (bc)
inc bc
ld(hl++), a
dec d
jrnz lprint_copytiles
ld a, c0
ldff(47), a
ld a, 80
ldff(68), a
ld a, ff
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
ldff(69), a
xor a, a
ldff(69), a
ldff(69), a
ldff(43), a
ld a, 91
ldff(40), a
lprint_limbo:
jr lprint_limbo
.text@7400
lwaitly_b:
ld c, 44
lwaitly_b_loop:
ldff a, (c)
cmp a, b
jrnz lwaitly_b_loop
ret
.data@7a00
00 00 7f 7f 41 41 41 41
41 41 41 41 41 41 7f 7f
00 00 08 08 08 08 08 08
08 08 08 08 08 08 08 08
00 00 7f 7f 01 01 01 01
7f 7f 40 40 40 40 7f 7f
00 00 7f 7f 01 01 01 01
3f 3f 01 01 01 01 7f 7f
00 00 41 41 41 41 41 41
7f 7f 01 01 01 01 01 01
00 00 7f 7f 40 40 40 40
7e 7e 01 01 01 01 7e 7e
00 00 7f 7f 40 40 40 40
7f 7f 41 41 41 41 7f 7f
00 00 7f 7f 01 01 02 02
04 04 08 08 10 10 10 10
00 00 3e 3e 41 41 41 41
3e 3e 41 41 41 41 3e 3e
00 00 7f 7f 41 41 41 41
7f 7f 01 01 01 01 7f 7f
| Ben10do/gambatte | test/hwtests/halt/late_m0irq_halt_dec_scx2_2_dmg08_cgb04c_out6.asm | Assembly | gpl-2.0 | 1,421 |
/*
* Arm PrimeCell PL011 UART
*
* Copyright (c) 2006 CodeSourcery.
* Written by Paul Brook
*
* This code is licenced under the GPL.
*/
#include "hw.h"
#include "qemu-char.h"
#include "primecell.h"
typedef struct {
uint32_t base;
uint32_t readbuff;
uint32_t flags;
uint32_t lcr;
uint32_t cr;
uint32_t dmacr;
uint32_t int_enabled;
uint32_t int_level;
uint32_t read_fifo[16];
uint32_t ilpr;
uint32_t ibrd;
uint32_t fbrd;
uint32_t ifl;
int read_pos;
int read_count;
int read_trigger;
CharDriverState *chr;
qemu_irq irq;
enum pl011_type type;
} pl011_state;
#define PL011_INT_TX 0x20
#define PL011_INT_RX 0x10
#define PL011_FLAG_TXFE 0x80
#define PL011_FLAG_RXFF 0x40
#define PL011_FLAG_TXFF 0x20
#define PL011_FLAG_RXFE 0x10
static const unsigned char pl011_id[2][8] = {
{ 0x11, 0x10, 0x14, 0x00, 0x0d, 0xf0, 0x05, 0xb1 }, /* PL011_ARM */
{ 0x11, 0x00, 0x18, 0x01, 0x0d, 0xf0, 0x05, 0xb1 }, /* PL011_LUMINARY */
};
static void pl011_update(pl011_state *s)
{
uint32_t flags;
flags = s->int_level & s->int_enabled;
qemu_set_irq(s->irq, flags != 0);
}
static uint32_t pl011_read(void *opaque, target_phys_addr_t offset)
{
pl011_state *s = (pl011_state *)opaque;
uint32_t c;
offset -= s->base;
if (offset >= 0xfe0 && offset < 0x1000) {
return pl011_id[s->type][(offset - 0xfe0) >> 2];
}
switch (offset >> 2) {
case 0: /* UARTDR */
s->flags &= ~PL011_FLAG_RXFF;
c = s->read_fifo[s->read_pos];
if (s->read_count > 0) {
s->read_count--;
if (++s->read_pos == 16)
s->read_pos = 0;
}
if (s->read_count == 0) {
s->flags |= PL011_FLAG_RXFE;
}
if (s->read_count == s->read_trigger - 1)
s->int_level &= ~ PL011_INT_RX;
pl011_update(s);
qemu_chr_accept_input(s->chr);
return c;
case 1: /* UARTCR */
return 0;
case 6: /* UARTFR */
return s->flags;
case 8: /* UARTILPR */
return s->ilpr;
case 9: /* UARTIBRD */
return s->ibrd;
case 10: /* UARTFBRD */
return s->fbrd;
case 11: /* UARTLCR_H */
return s->lcr;
case 12: /* UARTCR */
return s->cr;
case 13: /* UARTIFLS */
return s->ifl;
case 14: /* UARTIMSC */
return s->int_enabled;
case 15: /* UARTRIS */
return s->int_level;
case 16: /* UARTMIS */
return s->int_level & s->int_enabled;
case 18: /* UARTDMACR */
return s->dmacr;
default:
cpu_abort (cpu_single_env, "pl011_read: Bad offset %x\n", (int)offset);
return 0;
}
}
static void pl011_set_read_trigger(pl011_state *s)
{
#if 0
/* The docs say the RX interrupt is triggered when the FIFO exceeds
the threshold. However linux only reads the FIFO in response to an
interrupt. Triggering the interrupt when the FIFO is non-empty seems
to make things work. */
if (s->lcr & 0x10)
s->read_trigger = (s->ifl >> 1) & 0x1c;
else
#endif
s->read_trigger = 1;
}
static void pl011_write(void *opaque, target_phys_addr_t offset,
uint32_t value)
{
pl011_state *s = (pl011_state *)opaque;
unsigned char ch;
offset -= s->base;
switch (offset >> 2) {
case 0: /* UARTDR */
/* ??? Check if transmitter is enabled. */
ch = value;
if (s->chr)
qemu_chr_write(s->chr, &ch, 1);
s->int_level |= PL011_INT_TX;
pl011_update(s);
break;
case 1: /* UARTCR */
s->cr = value;
break;
case 6: /* UARTFR */
/* Writes to Flag register are ignored. */
break;
case 8: /* UARTUARTILPR */
s->ilpr = value;
break;
case 9: /* UARTIBRD */
s->ibrd = value;
break;
case 10: /* UARTFBRD */
s->fbrd = value;
break;
case 11: /* UARTLCR_H */
s->lcr = value;
pl011_set_read_trigger(s);
break;
case 12: /* UARTCR */
/* ??? Need to implement the enable and loopback bits. */
s->cr = value;
break;
case 13: /* UARTIFS */
s->ifl = value;
pl011_set_read_trigger(s);
break;
case 14: /* UARTIMSC */
s->int_enabled = value;
pl011_update(s);
break;
case 17: /* UARTICR */
s->int_level &= ~value;
pl011_update(s);
break;
case 18: /* UARTDMACR */
s->dmacr = value;
if (value & 3)
cpu_abort(cpu_single_env, "PL011: DMA not implemented\n");
break;
default:
cpu_abort (cpu_single_env, "pl011_write: Bad offset %x\n", (int)offset);
}
}
static int pl011_can_receive(void *opaque)
{
pl011_state *s = (pl011_state *)opaque;
if (s->lcr & 0x10)
return s->read_count < 16;
else
return s->read_count < 1;
}
static void pl011_put_fifo(void *opaque, uint32_t value)
{
pl011_state *s = (pl011_state *)opaque;
int slot;
slot = s->read_pos + s->read_count;
if (slot >= 16)
slot -= 16;
s->read_fifo[slot] = value;
s->read_count++;
s->flags &= ~PL011_FLAG_RXFE;
if (s->cr & 0x10 || s->read_count == 16) {
s->flags |= PL011_FLAG_RXFF;
}
if (s->read_count == s->read_trigger) {
s->int_level |= PL011_INT_RX;
pl011_update(s);
}
}
static void pl011_receive(void *opaque, const uint8_t *buf, int size)
{
pl011_put_fifo(opaque, *buf);
}
static void pl011_event(void *opaque, int event)
{
if (event == CHR_EVENT_BREAK)
pl011_put_fifo(opaque, 0x400);
}
static CPUReadMemoryFunc *pl011_readfn[] = {
pl011_read,
pl011_read,
pl011_read
};
static CPUWriteMemoryFunc *pl011_writefn[] = {
pl011_write,
pl011_write,
pl011_write
};
static void pl011_save(QEMUFile *f, void *opaque)
{
pl011_state *s = (pl011_state *)opaque;
int i;
qemu_put_be32(f, s->readbuff);
qemu_put_be32(f, s->flags);
qemu_put_be32(f, s->lcr);
qemu_put_be32(f, s->cr);
qemu_put_be32(f, s->dmacr);
qemu_put_be32(f, s->int_enabled);
qemu_put_be32(f, s->int_level);
for (i = 0; i < 16; i++)
qemu_put_be32(f, s->read_fifo[i]);
qemu_put_be32(f, s->ilpr);
qemu_put_be32(f, s->ibrd);
qemu_put_be32(f, s->fbrd);
qemu_put_be32(f, s->ifl);
qemu_put_be32(f, s->read_pos);
qemu_put_be32(f, s->read_count);
qemu_put_be32(f, s->read_trigger);
}
static int pl011_load(QEMUFile *f, void *opaque, int version_id)
{
pl011_state *s = (pl011_state *)opaque;
int i;
if (version_id != 1)
return -EINVAL;
s->readbuff = qemu_get_be32(f);
s->flags = qemu_get_be32(f);
s->lcr = qemu_get_be32(f);
s->cr = qemu_get_be32(f);
s->dmacr = qemu_get_be32(f);
s->int_enabled = qemu_get_be32(f);
s->int_level = qemu_get_be32(f);
for (i = 0; i < 16; i++)
s->read_fifo[i] = qemu_get_be32(f);
s->ilpr = qemu_get_be32(f);
s->ibrd = qemu_get_be32(f);
s->fbrd = qemu_get_be32(f);
s->ifl = qemu_get_be32(f);
s->read_pos = qemu_get_be32(f);
s->read_count = qemu_get_be32(f);
s->read_trigger = qemu_get_be32(f);
return 0;
}
void pl011_init(uint32_t base, qemu_irq irq,
CharDriverState *chr, enum pl011_type type)
{
int iomemtype;
pl011_state *s;
s = (pl011_state *)qemu_mallocz(sizeof(pl011_state));
iomemtype = cpu_register_io_memory(0, pl011_readfn,
pl011_writefn, s);
cpu_register_physical_memory(base, 0x00001000, iomemtype);
s->base = base;
s->irq = irq;
s->type = type;
s->chr = chr;
s->read_trigger = 1;
s->ifl = 0x12;
s->cr = 0x300;
s->flags = 0x90;
if (chr){
qemu_chr_add_handlers(chr, pl011_can_receive, pl011_receive,
pl011_event, s);
}
register_savevm("pl011_uart", -1, 1, pl011_save, pl011_load, s);
}
| yajin/qemu-omap3 | hw/pl011.c | C | gpl-2.0 | 8,023 |
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 26 11:09:05 2013
@author: jotterbach
"""
from numpy import *
from ED_HalfFilling import EigSys_HalfFilling
from DotProduct import scalar_prod
from multiprocessing import *
from multiprocessing import Pool
import matplotlib.pyplot as plt
from ParallelizationTools import info
from os.path import *
from scipy.special import *
from scipy.linalg import qr
from DotProduct import scalar_prod
from Correlation_Generator import *
from datetime import datetime
''' define the datestamp for the filenames '''
date = str(datetime.now())
now = date[0:10]+'_'+date[11:13]+'h'+date[14:16]+'m'
def AngleSpectrum(number_particles, noEV, gamma, hopping, angle):
"""
AngleSpectrum(number_particles, noEV, gamma, hopping, angle):
computes the energy eigenspectrum as a function of the angle of the dipoles
with the chain axis given an unit interaction V and a hopping J
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
hopping: hopping parameter in units of interaction V
angle: array containing the angles as a multiple of **PI**
"""
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independet_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
# number_particles = 6
# noEV = 5*number_sites #degeneracy of GS requires noEV>number_sites
# hopping = .1
# gamma = 2*pi/3
# angle = linspace(-.8,-.7,41)
''' intialization of variables that will be stored for later use '''
eigval = zeros((angle.shape[0],noEV), dtype = float)
degeneracies = zeros((angle.shape[0],1))
v1 = zeros((angle.shape[0],1))
v2 = zeros((angle.shape[0],1))
v3 = zeros((angle.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of the eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping, interaction_strength, angle[angle_idx], noEV, spectrum, gamma, independet_v1_v2)) for angle_idx in range(0,angle.shape[0])]
for ridx in it:
angle_idx = nonzero(angle == ridx.get()[0])
eigval[angle_idx,:]= ridx.get()[1]#floor(10*around(real(ridx.get()[1]),decimals = 2))/10
degeneracies[angle_idx] = sum((eigval[angle_idx,:] == eigval[angle_idx,0]).astype(int))
v1[angle_idx]=ridx.get()[2]
v2[angle_idx]=ridx.get()[3]
v3[angle_idx]=ridx.get()[4]
print 'angle:', angle[angle_idx], '\nground-state degeneracy:', degeneracies[angle_idx]
filename = 'FigureData/'+now+'_AngleSpectrum_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_vdd'+str(interaction_strength).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_angle', angle)
print 'saved: '+filename
def InteractionSpectrum(number_particles, noEV, gamma, angle, interaction_strength):
''' computes the eigenvalue spectrum for a given angle
as a function of the interaction strength in units of J
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
angle: array containing the angles as a multiple of **PI**
interaction_strength: interaction in units of hopping J
'''
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
hopping = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((len(interaction_strength),noEV), dtype = float)
v1 = zeros((interaction_strength.shape[0],1))
v2 = zeros((interaction_strength.shape[0],1))
v3 = zeros((interaction_strength.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping, interaction_strength[idx], angle, noEV, spectrum, gamma, independent_v1_v2)) for idx in range(len(interaction_strength))]
for ridx in it:
idx = nonzero(interaction_strength == ridx.get()[6])
v1=ridx.get()[2]
v2=ridx.get()[3]
v3=ridx.get()[4]
eigval[idx,:]= ridx.get()[1]#floor(10*around(real(ridx.get()[1]),decimals = 2))/10
print 'interaction:', interaction_strength[idx], 'interaction constants: ', v1,v2,v3
filename = 'FigureData/'+now+'_InteractionSpectrum_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_interaction',interaction_strength)
print 'saved: '+filename
def HoppingSpectrum(number_particles, noEV, gamma, angle, hopping):
''' computes the eigenvalue spectrum for given interactions as a function
of the hopping in units of interaction V
parameters of the function:
number_particles: number of particles in the problem
noEV: number of eigenvalues being calculated
gamma: opening angle of the zig-zag chain
angle: array containing the angles as a multiple of **PI**
hopping: hopping in units of interaction V
'''
''' default values for other methods that are being called by the current
function '''
spectrum = 1 #ensures that the spectrum is calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' intialization of variables that will be stored for later use '''
eigval = zeros((len(hopping),noEV), dtype = float)
v1 = zeros((hopping.shape[0],1))
v2 = zeros((hopping.shape[0],1))
v3 = zeros((hopping.shape[0],1))
''' actual method call '''
if __name__ == 'DiagonalizationMethods':
info('main line')
pool = Pool()
''' invocation of eigenvalue procedure '''
it = [pool.apply_async(EigSys_HalfFilling, (number_particles, number_sites, hopping[idx], interaction_strength, angle, noEV, spectrum, gamma, independent_v1_v2)) for idx in range(len(hopping))]
for ridx in it:
idx = nonzero(hopping == ridx.get()[5])
v1=ridx.get()[2]
v2=ridx.get()[3]
v3=ridx.get()[4]
eigval[idx,:]= ridx.get()[1]
print 'hopping:', hopping[idx], 'interactions: ', v1,v2,v3
filename = 'FigureData/'+now+'_HoppingSpectrum-nnhopping_N'+str(number_particles)+'_vdd'+str(interaction_strength).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_EigVals', eigval)
save(filename+'_hopping', hopping)
print 'saved: '+filename
def DensityCorrelations(number_particles, noEV, gamma, angle, hopping, degeneracy):
''' computes the density correlation function for a given set of angle,
interaction and hopping'''
''' default values for other methods that are being called by the current
function '''
spectrum = 0 #ensures that the spectrum AND the eigenvectors are calculated in EigSys_HalfFilling
independent_v1_v2 = 1 #makes v1 and v2 independent of each other
number_sites = 2*number_particles #condition for half-filling
interaction_strength = 1 #unit of energy
''' function specific parameter initilaization '''
eigval, eigvec, basisstates = EigSys_HalfFilling(number_particles, number_sites, hopping, interaction_strength, angle, noEV, spectrum, gamma, independent_v1_v2)
eigval = around(real(eigval),decimals = 2)
print '\nlow-energy spectrum: \n', eigval
print 'GS degeneracy:', degeneracy
eigvec = eigvec.astype(complex)
if degeneracy > 1:
print '\nOrthogonalizing GS manifold'
eigvec_GS = zeros((eigvec.shape[0],degeneracy), dtype = complex)
for m in range(degeneracy):
eigvec_GS[:,m] = eigvec[:,m]
Q, R = qr(eigvec_GS, mode = 'economic')
for m in range(degeneracy):
eigvec[:,m] = Q[:,m]
del Q, R, eigvec_GS
number_states = basisstates.shape[0]
if __name__ == 'DiagonalizationMethods':
''' local density '''
print '\nCalculating local density'
local_density = zeros((2*number_particles,1), dtype = float)
pool = Pool()
for deg_idx in range(0,degeneracy):
print 'state index: ', deg_idx
it = [pool.apply_async(loc_den, (basisstates, number_particles, number_states, eigvec[:,deg_idx], site_idx)) for site_idx in range(0,2*number_particles)]
for ridx in it:
site_idx = ridx.get()[0]
local_density[site_idx] += real(ridx.get()[1])/degeneracy
''' density-density correlation '''
print '\nCalculating density-density correlations'
g2 = zeros((number_sites,1), dtype = float)
for deg_idx in range(0,degeneracy):
print 'state index: ', deg_idx
it = [pool.apply_async(pair_corr, (basisstates, number_particles, number_sites, number_states, eigvec[:,deg_idx], site_idx)) for site_idx in range(0,number_sites)]
for ridx in it:
site_idx = ridx.get()[0]
g2[site_idx] += real(ridx.get()[1])/degeneracy
filename='FigureData/'+now+'_Correlations_N'+str(number_particles)+'_J'+str(hopping).replace('.','-')+'_vdd'+str(interaction_strength).replace('.','-')+'_Theta'+str(angle).replace('.','-')
save(filename+'_local_density', local_density)
save(filename+'_g2', g2)
print 'saved: '+filename
| jotterbach/ExactDiagonalization_PolarizedFermionicDipolesOnZigZagChain | DiagonalizationMethods.py | Python | gpl-2.0 | 10,568 |
<?php
/**
* @version 1.0.0
* @package com_llp_service
* @copyright Copyright (C) 2014. All rights reserved.
* @license GNU General Public License version 2 or later; see LICENSE.txt
* @author Ankit <ankit.kr.balyan@gmail.com> - http://igotstudy.com
*/
// No direct access
defined('_JEXEC') or die;
jimport('joomla.application.component.view');
/**
* View class for a list of Llp_service.
*/
class Llp_serviceViewServiceflows extends JViewLegacy {
protected $items;
protected $pagination;
protected $state;
/**
* Display the view
*/
public function display($tpl = null) {
$this->state = $this->get('State');
// Check for errors.
if (count($errors = $this->get('Errors'))) {
throw new Exception(implode("\n", $errors));
}
Llp_serviceHelper::addSubmenu('serviceflows');
$this->addToolbar();
$this->sidebar = JHtmlSidebar::render();
parent::display($tpl);
}
/**
* Add the page title and toolbar.
*
* @since 1.6
*/
protected function addToolbar() {
require_once JPATH_COMPONENT . '/helpers/llp_service.php';
$state = $this->get('State');
$canDo = Llp_serviceHelper::getActions($state->get('filter.category_id'));
JToolBarHelper::title(JText::_('COM_LLP_SERVICE_TITLE_SERVICEFLOWS'), 'serviceflows.png');
//Check if the form exists before showing the add/edit buttons
$formPath = JPATH_COMPONENT_ADMINISTRATOR . '/views/serviceflow';
if (file_exists($formPath)) {
if ($canDo->get('core.create')) {
JToolBarHelper::addNew('serviceflow.add', 'JTOOLBAR_NEW');
}
if ($canDo->get('core.edit') && isset($this->items[0])) {
JToolBarHelper::editList('serviceflow.edit', 'JTOOLBAR_EDIT');
}
}
if ($canDo->get('core.edit.state')) {
if (isset($this->items[0]->state)) {
JToolBarHelper::divider();
JToolBarHelper::custom('serviceflows.publish', 'publish.png', 'publish_f2.png', 'JTOOLBAR_PUBLISH', true);
JToolBarHelper::custom('serviceflows.unpublish', 'unpublish.png', 'unpublish_f2.png', 'JTOOLBAR_UNPUBLISH', true);
} else if (isset($this->items[0])) {
//If this component does not use state then show a direct delete button as we can not trash
JToolBarHelper::deleteList('', 'serviceflows.delete', 'JTOOLBAR_DELETE');
}
if (isset($this->items[0]->state)) {
JToolBarHelper::divider();
JToolBarHelper::archiveList('serviceflows.archive', 'JTOOLBAR_ARCHIVE');
}
if (isset($this->items[0]->checked_out)) {
JToolBarHelper::custom('serviceflows.checkin', 'checkin.png', 'checkin_f2.png', 'JTOOLBAR_CHECKIN', true);
}
}
//Show trash and delete for components that uses the state field
if (isset($this->items[0]->state)) {
if ($state->get('filter.state') == -2 && $canDo->get('core.delete')) {
JToolBarHelper::deleteList('', 'serviceflows.delete', 'JTOOLBAR_EMPTY_TRASH');
JToolBarHelper::divider();
} else if ($canDo->get('core.edit.state')) {
JToolBarHelper::trash('serviceflows.trash', 'JTOOLBAR_TRASH');
JToolBarHelper::divider();
}
}
if ($canDo->get('core.admin')) {
JToolBarHelper::preferences('com_llp_service');
}
//Set sidebar action - New in 3.0
JHtmlSidebar::setAction('index.php?option=com_llp_service&view=serviceflows');
$this->extra_sidebar = '';
//
}
protected function getSortFields()
{
return array(
);
}
}
| ankibalyan/businesssetup | administrator/components/com_llp_service/views/serviceflows/view.html.php | PHP | gpl-2.0 | 3,878 |
#!/usr/bin/python
"""Do not call std::string::find_first_of or std::string::find with a string of
characters to locate that has the size 1.
Use the version of std::string::find that takes a single character to
locate instead. Same for find_last_of/rfind.
"""
error_msg = "Do not use find(\"a\"), use find('a')."
regexp = r"""(?x)
r?find(_(first|last)_of)?\s*
\(
"([^\\]|(\\[nt\\"]))"[,)]"""
forbidden = [
r'find_first_of("a")',
r'find_last_of("a")',
r'find("a")',
r'rfind("a")',
r'find_first_of("\n")',
r'find_last_of("\n")',
r'find("\n")',
r'rfind("\n")',
r'find_first_of("\t")',
r'find_last_of("\t")',
r'find("\t")',
r'rfind("\t")',
r'find_first_of("\\")',
r'find_last_of("\\")',
r'find("\\")',
r'rfind("\\")',
r'find_first_of("\"")',
r'find_last_of("\"")',
r'find("\"")',
r'rfind("\"")',
r'find_first_of("a", 1)',
r'find_last_of("a", 1)',
r'find("a", 1)',
r'rfind("a", 1)',
]
allowed = [
r'find("ab")',
r"find('a')",
r"rfind('a')",
r'rfind("ab")',
r"find('\n')",
r'find("\nx")',
r"rfind('\n')",
r'rfind("\nx")',
r"find('\t')",
r'find("\tx")',
r"rfind('\t')",
r'rfind("\tx")',
r"find('\\')",
r'find("\\x")',
r"rfind('\\')",
r'rfind("\\x")',
r"find('\"')",
r'find("\"x")',
r"rfind('\"')",
r'rfind("\"x")',
r"find('a', 1)",
r'find("ab", 1)',
r"rfind('a', 1)",
r'rfind("ab", 1)',
]
| widelands/widelands | cmake/codecheck/rules/contrived_std_string_find.py | Python | gpl-2.0 | 1,480 |
<?php
/**
* The template for displaying 404 pages (not found).
*
* @package gobrenix
*/
get_header(); ?>
<main role="main">
<div class="container">
<section class="error-404 not-found">
<header class="page-header">
<h1 class="page-title"><?php _e('Oops! That page can’t be found.', '_gxtheme'); ?></h1>
</header>
<div class="page-content">
<p><?php _e('It looks like nothing was found at this location. Maybe try one of the links below or a search?', '_gxtheme'); ?></p>
<?php get_search_form(); ?>
<?php the_widget('WP_Widget_Recent_Posts'); ?>
<?php if (_gxtheme_categorized_blog()) : ?>
<div class="widget widget_categories">
<h2 class="widget-title"><?php _e('Most Used Categories', '_gxtheme'); ?></h2>
<ul>
<?php
wp_list_categories(array(
'orderby' => 'count',
'order' => 'DESC',
'show_count' => 1,
'title_li' => '',
'number' => 10,
));
?>
</ul>
</div>
<?php endif; ?>
<?php
$archive_content = '<p>' . sprintf(__('Try looking in the monthly archives. %1$s', '_gxtheme'), convert_smilies(':)')) . '</p>';
the_widget('WP_Widget_Archives', 'dropdown=1', "after_title=</h2>$archive_content");
?>
<?php the_widget('WP_Widget_Tag_Cloud'); ?>
</div>
</section>
</div>
</main>
<?php get_footer(); ?>
| gobrenix/gobrenix.com | wp-content/themes/gobrenix/404.php | PHP | gpl-2.0 | 1,377 |
/**
* (c) 2014-2016 Alexandro Sanchez Bach. All rights reserved.
* Released under GPL v2 license. Read LICENSE for more details.
*/
#include "opengl_fence.h"
namespace gfx {
namespace opengl {
void OpenGLFence::clear() {
signaled = false;
}
void OpenGLFence::signal() {
signaled = true;
cv.notify_all();
}
void OpenGLFence::wait() {
std::unique_lock<std::mutex> lock(mutex);
cv.wait(lock, [&]{ return signaled; });
}
void OpenGLFence::wait(Clock::duration timeout) {
std::unique_lock<std::mutex> lock(mutex);
cv.wait_for(lock, timeout, [&]{ return signaled; });
}
} // namespace opengl
} // namespace gfx
| AlexAltea/nucleus | nucleus/graphics/backend/opengl/opengl_fence.cpp | C++ | gpl-2.0 | 645 |
/* Copyright (C) 2011-2015 Philipp Benner
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __TFBAYES_DPM_DPM_TFBS_OPTIONS_HH__
#define __TFBAYES_DPM_DPM_TFBS_OPTIONS_HH__
#ifdef HAVE_CONFIG_H
#include <tfbayes/config.h>
#endif /* HAVE_CONFIG_H */
#include <list>
#include <string>
#include <vector>
#include <ostream>
#include <tfbayes/dpm/dpm-partition.hh>
typedef std::matrix<double> baseline_lengths_t;
typedef std::list<std::string> baseline_names_t;
typedef std::list<std::matrix<double> > baseline_priors_t;
typedef std::vector<double> baseline_weights_t;
typedef struct {
std::string phylogenetic_file;
std::string alignment_file;
double alpha;
double discount;
double lambda;
double initial_temperature;
bool block_samples;
size_t block_samples_period;
size_t metropolis_proposals;
bool optimize;
size_t optimize_period;
std::string process_prior;
std::string background_model;
std::matrix<double> background_alpha;
size_t background_context;
std::vector<double> background_beta;
std::vector<double> background_gamma;
std::string background_cache;
std::vector<double> background_weights;
baseline_lengths_t baseline_lengths;
baseline_names_t baseline_names;
baseline_priors_t baseline_priors;
baseline_weights_t baseline_weights;
size_t population_size;
size_t threads;
std::string socket_file;
size_t verbose;
} tfbs_options_t;
std::ostream& operator<<(std::ostream& o, const tfbs_options_t& options);
#endif /* __TFBAYES_DPM_DPM_TFBS_OPTIONS_HH__ */
| pbenner/tfbayes | tfbayes/dpm/dpm-tfbs-options.hh | C++ | gpl-2.0 | 2,386 |
class RemoveUserIdIndexFromClients < ActiveRecord::Migration
def change
remove_index :clients, :user_id
end
end
| davidsantoso/mino | db/migrate/20150428012222_remove_user_id_index_from_clients.rb | Ruby | gpl-2.0 | 120 |
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN">
<html><head><meta http-equiv="Content-Type" content="text/html;charset=utf-8">
<title>src/wxGUI/GUIBoxData.cpp-Dateireferenz</title>
<link href="../../doxygen.css" rel="stylesheet" type="text/css">
</head><body>
<!-- Erzeugt von Doxygen 1.8.13 -->
<script type="text/javascript" src="../../menudata.js"></script>
<script type="text/javascript" src="../../menu.js"></script>
<script type="text/javascript">
$(function() {
initMenu('../../',false,false,'search.php','Suchen');
});
</script>
<div id="main-nav"></div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="../../dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="../../dir_eaae7e9da1b31c6b329e928d72e0e61a.html">wxGUI</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="summary">
<a href="#namespaces">Namensbereiche</a> </div>
<div class="headertitle">
<div class="title">GUIBoxData.cpp-Dateireferenz</div> </div>
</div><!--header-->
<div class="contents">
<div class="textblock"><code>#include "<a class="el" href="../../d5/df7/GUIBoxData_8h_source.html">GUIBoxData.h</a>"</code><br />
<code>#include "<a class="el" href="../../d1/d65/Route_8h_source.html">src/kernel/routing/Route.h</a>"</code><br />
<code>#include "<a class="el" href="../../db/dc1/MutApp_8h_source.html">src/wxGUI/MutApp.h</a>"</code><br />
<code>#include "<a class="el" href="../../dd/db5/MutFrame_8h_source.html">src/wxGUI/MutFrame.h</a>"</code><br />
<code>#include "<a class="el" href="../../d6/d81/MutLogicWnd_8h_source.html">src/wxGUI/MutLogicWnd.h</a>"</code><br />
<code>#include "<a class="el" href="../../d4/dcc/NewBoxShape_8h_source.html">src/wxGUI/Routing/NewBoxShape.h</a>"</code><br />
<code>#include "wx/msgdlg.h"</code><br />
<code>#include "<a class="el" href="../../d5/d12/GUIBoxData-inlines_8h_source.html">GUIBoxData-inlines.h</a>"</code><br />
<code>#include "<a class="el" href="../../d2/d2c/Route-inlines_8h_source.html">src/kernel/routing/Route-inlines.h</a>"</code><br />
</div><div class="textblock"><div class="dynheader">
Include-Abhängigkeitsdiagramm für GUIBoxData.cpp:</div>
<div class="dyncontent">
<div class="center"><img src="../../d8/dd9/GUIBoxData_8cpp__incl.png" border="0" usemap="#src_2wxGUI_2GUIBoxData_8cpp" alt=""/></div>
<!-- MAP 0 -->
</div>
</div>
<p><a href="../../d2/dd3/GUIBoxData_8cpp_source.html">gehe zum Quellcode dieser Datei</a></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="namespaces"></a>
Namensbereiche</h2></td></tr>
<tr class="memitem:db/d94/namespacemutaborGUI"><td class="memItemLeft" align="right" valign="top">  </td><td class="memItemRight" valign="bottom"><a class="el" href="../../db/d94/namespacemutaborGUI.html">mutaborGUI</a></td></tr>
<tr class="memdesc:db/d94/namespacemutaborGUI"><td class="mdescLeft"> </td><td class="mdescRight">not for headers <br /></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
</table>
</div><!-- contents -->
<hr size="1"><address style="align: right;"><small>
Erzeugt am Sam Sep 15 2018 14:57:54 für Mutabor von <a href="http://www.doxygen.org/index.html"><img src="doxygen.png" alt="doxygen" align="middle" border="0"></a> 1.8.13</small></address>
</body>
</html>
| keinstein/mutabor | doc/Doxygen/html/d2/dd3/GUIBoxData_8cpp.html | HTML | gpl-2.0 | 3,471 |
#!/bin/bash
set -e
python manage.py collectstatic --noinput
coverage run --branch --source='.' manage.py test
coverage report -m --omit=*migrations*,*__init__.py,*tests*
| aaronkurtz/gourmand | gourmand/coverage.sh | Shell | gpl-2.0 | 170 |
/*
* @BEGIN LICENSE
*
* Psi4: an open-source quantum chemistry software package
*
* Copyright (c) 2007-2016 The Psi4 Developers.
*
* The copyrights for code used from other parties are included in
* the corresponding files.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
* @END LICENSE
*/
/*! \file
\ingroup DETCI
\brief Enter brief description of file here
*/
/*
** Simultaneous Expansion Method for the Iterative Solution of
** Several of the Lowest Eigenvalues and Corresponding Eivenvectors of
** Large Real-Symmetric Matrices
**
** Algorithm due to Bowen Liu
** IBM Research Laboratory
**
** Implemented for Schaefer Group by David Sherrill
** Center for Computational Quantum Chemistry, UGA
**
** In-core version for now!
** February 1994
**
** Updated 12/7/95 for testing within rasci code
** Updated 11/21/97 for least squares extrapolation and debugging of sem
** code
*/
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include "psi4/libciomr/libciomr.h"
#include "psi4/libqt/qt.h"
#include "psi4/detci/structs.h"
#include "psi4/detci/ciwave.h"
namespace psi { namespace detci {
#define MAX_B_ROWS 200
#define MIN_F_DENOM 1.0E-3
/*** do a little test routine
main()
{
double **A ;
double *evals, **evecs ;
int i, j, used;
void sem() ;
std::string OutFileRMR ;
ffile(&outfile, "output.dat", 0) ;
tstart(outfile) ;
A = init_matrix(50,50) ;
evals = init_array(4) ;
evecs = init_matrix(4, 50) ;
for (i=0; i<50; i++) {
for (j=0; j<=i; j++) {
if (i!=j) {A[i][j] = 1.0; A[j][i] = 1.0; }
else if (i<5) A[i][j] = 1.0 + 0.1 * (double) i ;
else A[i][j] = 2.0 * (double) i + 1.0;
}
}
sem(A, 50, 4, evecs, evals, 1.0E-10, 6, &used);
outfile->Printf( "Ok, the eigenvectors are sideways!\n");
eivout(evecs, evals, 4, 50, outfile);
outfile->Printf( "\nused %d expansion vectors\n", used);
tstop(outfile);
fclose(outfile);
}
***/
/*
** sem(): Use Liu's Simultaneous Expansion Method to find the lowest
** few eigenvalues of a real matrix.
**
** Arguments:
** A = matrix to find eigenvalues of
** N = size of matrix
** M = number of eigenvalues to solve for
** L = number of initial vectors in subspace
** evecs = matrix for eigenvectors
** evals = array for eigenvalues
** b = set of subspace vectors (dimensions maxiter x N)
** space for rows i < L should not yet be allocated!!
** conv_e = convergence tolerance. The lowest energy eigenvalue must
** be converged to within this range. It is interesting
** that higher roots _may_ converge faster.
** conv_rms = the required tolerance for convergence of the CI correction
** vector
** maxiter = max number of iterations allowed
** offst = offset to add to eigenvalues in printing (e.g. enuc)
** vu = pointer to int to hold how many expansion vectors used
** outfile = output file
**
** Returns: none
*/
void CIWavefunction::sem_test(double **A, int N, int M, int L, double **evecs, double *evals,
double **b, double conv_e, double conv_rms, int maxiter, double offst,
int *vu, int maxnvect)
{
double *tmp_vec, **tmp_mat ;
double **jnk;
int sm_tridim;
double *sm_mat;
double *sm_evals;
double **sm_evecs;
int i, j, ij, k, I;
double **G, **d;
double *lambda, **alpha, **f ;
double *converged_root;
double **m_lambda, ***m_alpha;
double tval, *dvecnorm;
int converged=0, iter=1;
int iter2=0; /* iterations since last collapse */
double *lastroot;
int lse_do=0, last_lse_collapse_num=-Parameters_->lse_collapse, collapse_num=0;
double lse_tolerance=Parameters_->lse_tolerance;
double **sigma_overlap, ***Mmatrix;
int *Lvec;
/* check parameters */
if (evecs == NULL || evals == NULL) {
printf("(sem): passed uncallocated pointers for evecs or evals\n") ;
return ;
}
for (I=0; I<N; I++)
A[I][I] -= CalcInfo_->edrc;
/* make space for temp vector */
tmp_vec = init_array(N);
lastroot = init_array(N);
converged_root = init_array(M);
dvecnorm = init_array(M);
/* allocate other arrays with ~fixed dimensions during iteration */
d = init_matrix(M, N); /* M and N are both fixed */
f = init_matrix(M, N);
G = init_matrix(maxnvect, maxnvect);
tmp_mat = init_matrix(maxnvect, N);
jnk = init_matrix(maxnvect, N);
lambda = init_array(maxnvect);
alpha = init_matrix(maxnvect, maxnvect);
sigma_overlap = init_matrix(maxnvect, maxnvect);
Mmatrix = (double ***) malloc (sizeof(double **) * M);
for (i=0; i<M; i++)
Mmatrix[i] = init_matrix(maxnvect, maxnvect);
m_lambda = init_matrix(M, maxnvect);
m_alpha = (double ***) malloc (sizeof(double **) * maxnvect);
for (i=0; i<maxnvect; i++) {
m_alpha[i] = init_matrix(maxnvect, maxnvect);
}
Lvec = init_int_array(maxnvect);
/* ITERATE */
while (!converged && iter <= maxiter) {
Lvec[iter2] = L;
/* form G matrix */
mmult(b, 0, A, 0, tmp_mat, 0, L, N, N, 0); /* tmp = B * A */
mmult(tmp_mat, 0, b, 1, G, 0, L, N, L, 0); /* G = tmp * B(T) */
/* solve the L x L eigenvalue problem G a = lambda a for M roots */
sq_rsp(L, L, G, lambda, 1, alpha, 1E-14);
if (N<100 && print_ >=3) {
outfile->Printf("\n b matrix\n");
print_mat(b,L,N,"outfile");
outfile->Printf("\n sigma matrix\n");
print_mat(tmp_mat,L,N,"outfile");
outfile->Printf("\n G matrix (%d)\n", iter-1);
print_mat(G,L,L,"outfile");
outfile->Printf("\n Eigenvectors and eigenvalues of G matrix (%d)\n", iter-1);
eivout(alpha, lambda, L, L, "outfile");
}
lse_do = 0;
if (Parameters_->lse && (maxnvect-L <= M*Parameters_->collapse_size) && L>2 &&
(lse_tolerance > fabs(lambda[0]-lastroot[0])) && iter>=3 &&
((collapse_num-last_lse_collapse_num)>= Parameters_->lse_collapse))
lse_do = 1;
if (lse_do) {
/* Form sigma_overlap matrix */
zero_mat(sigma_overlap,maxnvect,maxnvect);
mmult(b, 0, A, 0, tmp_mat, 0, L, N, N, 0);
mmult(tmp_mat, 0, tmp_mat, 1, sigma_overlap, 0, L, N, L, 0);
/* Form Mij matrix */
for (k=0; k<M; k++) {
zero_mat(Mmatrix[k],maxnvect,maxnvect);
for (i=0; i<L; i++) {
for (j=i; j<L; j++) {
Mmatrix[k][i][j] = Mmatrix[k][j][i] = sigma_overlap[i][j]
-2.0 * lambda[k] * G[i][j];
if (i==j) Mmatrix[k][i][i] += pow(lambda[k],2.0);
}
}
} /* end loop over k (nroots) */
if (print_ > 2) {
outfile->Printf( "\nsigma_overlap matrix (%2d) = \n", iter-1);
print_mat(sigma_overlap, L, L, "outfile");
for (k=0; k<M; k++) {
outfile->Printf( "\nM matrix (%2d) for root %d = \n", iter, k);
print_mat(Mmatrix[k], L, L, "outfile");
outfile->Printf( "\n");
}
}
/* solve the L x L eigenvalue problem M a = lambda a for M roots */
for (k=0; k<M; k++) {
sq_rsp(L, L, Mmatrix[k], m_lambda[k], 1, m_alpha[k], 1.0E-14);
if (print_ > 2) {
outfile->Printf( "\n M eigenvectors and eigenvalues root %d:\n",k);
eivout(m_alpha[k], m_lambda[k], L, L, "outfile");
}
}
} /* end if lse_do */
if ((Parameters_->collapse_size>0) && (iter2-Parameters_->collapse_size+1 > 0)
&& (Lvec[iter2-Parameters_->collapse_size+1]+M*Parameters_->collapse_size
> maxnvect) && iter!=maxiter) {
collapse_num++;
if (lse_do) last_lse_collapse_num = collapse_num;
/* copy ci vector into d matrix */
zero_mat(d, M, N);
if (lse_do)
for (k=0; k<M; k++)
for (i=0; i<L; i++)
for (I=0; I<N; I++)
d[k][I] += m_alpha[k][i][0] * b[i][I];
else
for (k=0; k<M; k++)
for (i=0; i<L; i++)
for (I=0; I<N; I++)
d[k][I] += alpha[i][k] * b[i][I];
/* copy d matrix to end of b matrix */
for (i=0; i<M; i++)
for (I=0; I<N; I++)
b[maxnvect-1-i][I] = d[i][I];
/* reorder b matrix pointers */
for (i=0; i<L; i++) jnk[i] = b[i];
for (i=0; i<L; i++) b[i] = jnk[maxnvect-1-i];
L = M;
iter2 = 0;
/* zero out old parts of b matrix */
for (i=L; i<maxnvect; i++) zero_arr(b[i], N);
/* reform G matrix */
mmult(b, 0, A, 0, tmp_mat, 0, L, N, N, 0); /* tmp = B * A */
mmult(tmp_mat, 0, b, 1, G, 0, L, N, L, 0); /* G = tmp * B(T) */
/* solve the L x L eigenvalue problem G a = lambda a for M roots */
sq_rsp(L, L, G, lambda, 1, alpha, 1E-14);
if (N<100 && print_ >= 3) {
outfile->Printf(" Reformed G matrix (%d)\n",iter-1);
print_mat(G,L,L,"outfile");
outfile->Printf("\n");
}
if (lse_do) outfile->Printf(" Least Squares Extrapolation\n");
outfile->Printf(" Collapse Davidson subspace to %d vectors\n", L);
} /* end collapse */
/* form the d part of the correction vector */
zero_mat(d, M, N);
for (k=0; k<M; k++) {
for (i=0; i<L; i++) {
mmult(A,0,&(b[i]),1,&(tmp_vec),1,N,N,1,0); /* tmp=A*b[i] */
for (I=0; I<N; I++) {
d[k][I] += alpha[i][k] * (tmp_vec[I] - lambda[k] * b[i][I]);
}
}
}
if (N<100 && print_ >= 3) {
outfile->Printf(" D vectors for iter (%d)\n",iter-1);
print_mat(d,M,N,"outfile");
}
/* check for convergence */
converged = 1;
for (i=0; i<M; i++) {
dot_arr(d[i], d[i], N, &tval);
tval = sqrt(tval);
dvecnorm[i] = tval;
if (dvecnorm[i] <= conv_rms && fabs(lambda[i] - lastroot[i]) <= conv_e) converged_root[i] = 1;
else {
converged_root[i] = 0;
converged = 0;
}
outfile->Printf( "Iter %3d Root %d = %13.9lf",
iter-1, i+1, (lambda[i] + offst));
outfile->Printf( " Delta_E %10.3E Delta_C %10.3E %c\n",
lambda[i] - lastroot[i], tval, converged_root[i] ? 'c' : ' ');
}
if (M>1) {
outfile->Printf( "\n");
}
if (converged || iter == maxiter) {
for (i=0; i<M; i++) {
evals[i] = lambda[i];
for (j=0; j<L; j++) {
tval = alpha[j][i];
for (I=0; I<N; I++)
evecs[i][I] += tval * b[j][I];
}
}
break;
}
else {
for (i=0; i<M; i++) lastroot[i] = lambda[i];
}
/* form the correction vector and normalize */
for (k=0; k<M; k++) {
for (I=0; I<N; I++) {
tval = lambda[k] - A[I][I];
/* make sure denom isn't 0. If so, make some arbitrary val */
/* It might be interesting to figure the best way to do this */
/* previous way to do it
if (fabs(tval) < MIN_F_DENOM) tval = 0.1;
f[k][I] = d[k][I] / tval;
*/
/* the way GUGA does it */
if (fabs(tval) < 1.0E-8) f[k][I] = 0.0;
else f[k][I] = d[k][I] / tval;
}
}
normalize(f, M, N);
/* Schmidt orthog and append f's to b */
for (i=0; i<M; i++)
if (converged_root[i] == 0)
if (schmidt_add(b, L, N, f[i])) L++;
outfile->Printf(" Number of b vectors = %d\n", L);
if (L > maxnvect) {
std::string str = "(test_sem): L(";
str += std::to_string( L) ;
str += ") > maxnvect(";
str += std::to_string( maxnvect) ;
str += ")! Aborting!";
throw PsiException(str,__FILE__,__LINE__);
}
/* Again Schmidt orthog b's (minimize numerical error) */
/* Doesn't this mess up the sigma vectors slightly */
schmidt(b, L, N, "outfile");
iter++ ;
iter2++;
}
*vu = L;
free(lambda);
free(tmp_vec);
free_matrix(d, M);
free_matrix(f, M);
free_matrix(b, maxnvect);
free_matrix(G, maxnvect);
free_matrix(tmp_mat, maxnvect);
free_matrix(alpha, maxnvect);
free_matrix(sigma_overlap, maxnvect);
for (i=0; i<M; i++) free_matrix(Mmatrix[i], maxnvect);
}
}} // namespace psi::detci
| kannon92/psi4 | psi4/src/psi4/detci/sem_test.cc | C++ | gpl-2.0 | 13,282 |
/* ----------------------------------------------------------------------
LAMMPS - Large-scale Atomic/Molecular Massively Parallel Simulator
http://lammps.sandia.gov, Sandia National Laboratories
Steve Plimpton, sjplimp@sandia.gov
Copyright (2003) Sandia Corporation. Under the terms of Contract
DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains
certain rights in this software. This software is distributed under
the GNU General Public License.
See the README file in the top-level LAMMPS directory.
------------------------------------------------------------------------- */
#include "gmock/gmock.h"
#include "gtest/gtest.h"
#include <deque>
#include <mpi.h>
using ::testing::TestEventListener;
using ::testing::TestCase;
using ::testing::TestSuite;
using ::testing::UnitTest;
using ::testing::TestPartResult;
using ::testing::TestInfo;
class MPIPrinter : public TestEventListener {
MPI_Comm comm;
TestEventListener * default_listener;
int me;
int nprocs;
char * buffer;
size_t buffer_size;
std::deque<TestPartResult> results;
bool finalize_test;
public:
MPIPrinter(TestEventListener * default_listener) : default_listener(default_listener) {
comm = MPI_COMM_WORLD;
MPI_Comm_rank(comm, &me);
MPI_Comm_size(comm, &nprocs);
buffer_size = 1024;
buffer = new char[buffer_size];
finalize_test = false;
}
~MPIPrinter() override {
delete default_listener;
default_listener = nullptr;
delete [] buffer;
buffer = nullptr;
buffer_size = 0;
}
virtual void OnTestProgramStart(const UnitTest& unit_test) override {
if(me == 0) default_listener->OnTestProgramStart(unit_test);
}
virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration) override {
if(me == 0) default_listener->OnTestIterationStart(unit_test, iteration);
}
virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override {
if(me == 0) default_listener->OnEnvironmentsSetUpStart(unit_test);
}
virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) override {
if(me == 0) default_listener->OnEnvironmentsSetUpEnd(unit_test);
}
virtual void OnTestSuiteStart(const TestSuite& test_suite) override {
if(me == 0) default_listener->OnTestSuiteStart(test_suite);
}
// Legacy API is deprecated but still available
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
virtual void OnTestCaseStart(const TestCase& test_case) override {
if(me == 0) default_listener->OnTestSuiteStart(test_case);
}
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
virtual void OnTestStart(const TestInfo& test_info) override {
// Called before a test starts.
if(me == 0) default_listener->OnTestStart(test_info);
results.clear();
finalize_test = false;
}
virtual void OnTestPartResult(const TestPartResult& test_part_result) override {
// Called after a failed assertion or a SUCCESS().
// test_part_result()
if (me == 0 && finalize_test) {
default_listener->OnTestPartResult(test_part_result);
} else {
std::stringstream proc_message;
std::istringstream msg(test_part_result.message());
std::string line;
while(std::getline(msg, line)) {
proc_message << "[Rank " << me << "] " << line << std::endl;
}
results.push_back(TestPartResult(test_part_result.type(), test_part_result.file_name(), test_part_result.line_number(), proc_message.str().c_str()));
}
}
virtual void OnTestEnd(const TestInfo& test_info) override {
// Called after a test ends.
MPI_Barrier(comm);
// other procs send their test part results
if(me != 0) {
int nresults = results.size();
MPI_Send(&nresults, 1, MPI_INT, 0, 0, comm);
for(auto& test_part_result : results) {
int type = test_part_result.type();
MPI_Send(&type, 1, MPI_INT, 0, 0, comm);
const char * str = test_part_result.file_name();
int length = 0;
if(str) length = strlen(str)+1;
MPI_Send(&length, 1, MPI_INT, 0, 0, comm);
if(str) MPI_Send(str, length, MPI_CHAR, 0, 0, comm);
int lineno = test_part_result.line_number();
MPI_Send(&lineno, 1, MPI_INT, 0, 0, comm);
str = test_part_result.message();
length = 0;
if(str) length = strlen(str)+1;
MPI_Send(&length, 1, MPI_INT, 0, 0, comm);
if(str) MPI_Send(str, length, MPI_CHAR, 0, 0, comm);
}
}
if(me == 0) {
// collect results from other procs
for(int p = 1; p < nprocs; p++) {
int nresults = 0;
MPI_Recv(&nresults, 1, MPI_INT, p, 0, comm, MPI_STATUS_IGNORE);
for(int r = 0; r < nresults; r++) {
int type;
MPI_Recv(&type, 1, MPI_INT, p, 0, comm, MPI_STATUS_IGNORE);
int length = 0;
MPI_Recv(&length, 1, MPI_INT, p, 0, comm, MPI_STATUS_IGNORE);
std::string file_name;
if (length > 0) {
if (length > buffer_size) {
delete [] buffer;
buffer = new char[length];
buffer_size = length;
}
MPI_Recv(buffer, length, MPI_CHAR, p, 0, comm, MPI_STATUS_IGNORE);
file_name = buffer;
}
int lineno;
MPI_Recv(&lineno, 1, MPI_INT, p, 0, comm, MPI_STATUS_IGNORE);
MPI_Recv(&length, 1, MPI_INT, p, 0, comm, MPI_STATUS_IGNORE);
std::string message;
if (length > 0) {
if (length > buffer_size) {
delete [] buffer;
buffer = new char[length];
buffer_size = length;
}
MPI_Recv(buffer, length, MPI_CHAR, p, 0, comm, MPI_STATUS_IGNORE);
message = std::string(buffer);
}
results.push_back(TestPartResult((TestPartResult::Type)type, file_name.c_str(), lineno, message.c_str()));
}
}
// ensure failures are reported
finalize_test = true;
// add all failures
while(!results.empty()) {
auto result = results.front();
if(result.failed()) {
ADD_FAILURE_AT(result.file_name(), result.line_number()) << result.message();
} else {
default_listener->OnTestPartResult(result);
}
results.pop_front();
}
default_listener->OnTestEnd(test_info);
}
}
virtual void OnTestSuiteEnd(const TestSuite& test_suite) override {
if(me == 0) default_listener->OnTestSuiteEnd(test_suite);
}
#ifndef GTEST_REMOVE_LEGACY_TEST_CASEAPI_
virtual void OnTestCaseEnd(const TestCase& test_case) override {
if(me == 0) default_listener->OnTestCaseEnd(test_case);
}
#endif // GTEST_REMOVE_LEGACY_TEST_CASEAPI_
virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override {
if(me == 0) default_listener->OnEnvironmentsTearDownStart(unit_test);
}
virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) override {
if(me == 0) default_listener->OnEnvironmentsTearDownEnd(unit_test);
}
virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override {
if(me == 0) default_listener->OnTestIterationEnd(unit_test, iteration);
}
virtual void OnTestProgramEnd(const UnitTest& unit_test) override {
if(me == 0) default_listener->OnTestProgramEnd(unit_test);
}
};
| rbberger/lammps | unittest/testing/mpitesting.h | C | gpl-2.0 | 8,246 |
/*
This file is part of p4est.
p4est is a C library to manage a collection (a forest) of multiple
connected adaptive quadtrees or octrees in parallel.
Copyright (C) 2010 The University of Texas System
Written by Carsten Burstedde, Lucas C. Wilcox, and Tobin Isaac
p4est is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
p4est is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with p4est; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifdef P4_TO_P8
#include <p8est_algorithms.h>
#include <p8est_bits.h>
#include <p8est_communication.h>
#include <p8est_extended.h>
#include <p8est_ghost.h>
#else
#include <p4est_algorithms.h>
#include <p4est_bits.h>
#include <p4est_communication.h>
#include <p4est_extended.h>
#include <p4est_ghost.h>
#endif /* !P4_TO_P8 */
#include <sc_io.h>
#include <sc_notify.h>
#include <sc_ranges.h>
#include <sc_search.h>
#include <sc_zlib.h>
#ifdef SC_ALLGATHER
#include <sc_allgather.h>
#define MPI_Allgather sc_allgather
#endif
#ifdef P4EST_MPIIO
#define P4EST_MPIIO_WRITE
#endif
#ifdef P4EST_HAVE_UNISTD_H
#include <unistd.h>
#endif
typedef struct
{
int8_t have_first_count, have_first_load;
int8_t have_second_count, have_second_load;
int recv_first_count, recv_second_count;
int send_first_count, send_second_count;
sc_array_t send_first, send_second, recv_first, recv_second;
}
p4est_balance_peer_t;
#ifndef P4_TO_P8
static int p4est_uninitialized_key;
void *P4EST_DATA_UNINITIALIZED = &p4est_uninitialized_key;
const int p4est_num_ranges = 25;
#endif /* P4_TO_P8 */
static const size_t number_toread_quadrants = 32;
static const int8_t fully_owned_flag = 0x01;
static const int8_t any_face_flag = 0x02;
#ifdef P4EST_MPI
/** Correct partition to allow one level of coarsening.
*
* \param [in] p4est forest whose partition is corrected
* \param [in,out] num_quadrants_in_proc partition that will be corrected
* \return absolute number of moved quadrants
*/
static p4est_locidx_t p4est_partition_for_coarsening (p4est_t * p4est,
p4est_locidx_t *
num_quadrants_in_proc);
#endif
void
p4est_qcoord_to_vertex (p4est_connectivity_t * connectivity,
p4est_topidx_t treeid,
p4est_qcoord_t x, p4est_qcoord_t y,
#ifdef P4_TO_P8
p4est_qcoord_t z,
#endif
double vxyz[3])
{
const double *vertices = connectivity->vertices;
#ifdef P4EST_DEBUG
const p4est_topidx_t num_vertices = connectivity->num_vertices;
#endif
const p4est_topidx_t *vindices;
int xi, yi;
double wx[2], wy[2];
#ifdef P4_TO_P8
int zi;
double wz[2];
#endif
double xfactor, yfactor;
p4est_topidx_t vindex;
P4EST_ASSERT (num_vertices > 0);
P4EST_ASSERT (vertices != NULL);
P4EST_ASSERT (treeid >= 0 && treeid < connectivity->num_trees);
P4EST_ASSERT (connectivity->tree_to_vertex != NULL);
vindices = connectivity->tree_to_vertex + P4EST_CHILDREN * treeid;
vxyz[0] = vxyz[1] = vxyz[2] = 0.;
P4EST_ASSERT (x >= 0 && x <= P4EST_ROOT_LEN);
wx[1] = (double) x / (double) P4EST_ROOT_LEN;
wx[0] = 1. - wx[1];
P4EST_ASSERT (y >= 0 && y <= P4EST_ROOT_LEN);
wy[1] = (double) y / (double) P4EST_ROOT_LEN;
wy[0] = 1. - wy[1];
#ifdef P4_TO_P8
P4EST_ASSERT (z >= 0 && z <= P4EST_ROOT_LEN);
wz[1] = (double) z / (double) P4EST_ROOT_LEN;
wz[0] = 1. - wz[1];
for (zi = 0; zi < 2; ++zi) {
#endif
for (yi = 0; yi < 2; ++yi) {
#ifdef P4_TO_P8
yfactor = wz[zi] * wy[yi];
#else
yfactor = wy[yi];
#endif
for (xi = 0; xi < 2; ++xi) {
xfactor = yfactor * wx[xi];
vindex = *vindices++;
P4EST_ASSERT (vindex >= 0 && vindex < num_vertices);
vxyz[0] += xfactor * vertices[3 * vindex + 0];
vxyz[1] += xfactor * vertices[3 * vindex + 1];
vxyz[2] += xfactor * vertices[3 * vindex + 2];
}
}
#ifdef P4_TO_P8
}
#endif
}
size_t
p4est_memory_used (p4est_t * p4est)
{
const int mpisize = p4est->mpisize;
size_t size;
p4est_topidx_t nt;
p4est_tree_t *tree;
size = sizeof (p4est_t) +
(mpisize + 1) * (sizeof (p4est_gloidx_t) + sizeof (p4est_quadrant_t));
size += sc_array_memory_used (p4est->trees, 1);
for (nt = 0; nt < p4est->connectivity->num_trees; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
size += sc_array_memory_used (&tree->quadrants, 0);
}
if (p4est->data_size > 0) {
size += sc_mempool_memory_used (p4est->user_data_pool);
}
size += sc_mempool_memory_used (p4est->quadrant_pool);
return size;
}
p4est_t *
p4est_new (MPI_Comm mpicomm, p4est_connectivity_t * connectivity,
size_t data_size, p4est_init_t init_fn, void *user_pointer)
{
return p4est_new_ext (mpicomm, connectivity, 0, 0, 1,
data_size, init_fn, user_pointer);
}
p4est_t *
p4est_new_ext (MPI_Comm mpicomm, p4est_connectivity_t * connectivity,
p4est_locidx_t min_quadrants, int min_level, int fill_uniform,
size_t data_size, p4est_init_t init_fn, void *user_pointer)
{
int mpiret;
int num_procs, rank;
int i, must_remove_last_quadrant;
int level;
uint64_t first_morton, last_morton, miu, count;
p4est_topidx_t jt, num_trees;
p4est_gloidx_t tree_num_quadrants, global_num_quadrants;
p4est_gloidx_t first_tree, first_quadrant, first_tree_quadrant;
p4est_gloidx_t last_tree, last_quadrant, last_tree_quadrant;
p4est_gloidx_t quadrant_index;
p4est_t *p4est;
p4est_tree_t *tree;
p4est_quadrant_t *quad;
p4est_quadrant_t a, b, c;
p4est_quadrant_t *global_first_position;
sc_array_t *tquadrants;
P4EST_GLOBAL_PRODUCTIONF
("Into " P4EST_STRING
"_new with min quadrants %lld level %d uniform %d\n",
(long long) min_quadrants, SC_MAX (min_level, 0), fill_uniform);
P4EST_ASSERT (p4est_connectivity_is_valid (connectivity));
P4EST_ASSERT (min_level <= P4EST_QMAXLEVEL);
/* retrieve MPI information */
mpiret = MPI_Comm_size (mpicomm, &num_procs);
SC_CHECK_MPI (mpiret);
mpiret = MPI_Comm_rank (mpicomm, &rank);
SC_CHECK_MPI (mpiret);
/* assign some data members */
p4est = P4EST_ALLOC_ZERO (p4est_t, 1);
p4est->mpicomm = mpicomm;
p4est->mpisize = num_procs;
p4est->mpirank = rank;
p4est->data_size = data_size;
p4est->user_pointer = user_pointer;
p4est->connectivity = connectivity;
num_trees = connectivity->num_trees;
/* allocate memory pools */
if (p4est->data_size > 0) {
p4est->user_data_pool = sc_mempool_new (p4est->data_size);
}
else {
p4est->user_data_pool = NULL;
}
p4est->quadrant_pool = sc_mempool_new (sizeof (p4est_quadrant_t));
/* determine uniform level of initial tree */
tree_num_quadrants = 1;
for (level = 0; level < P4EST_QMAXLEVEL; ++level) {
if (tree_num_quadrants >=
(num_procs * (p4est_gloidx_t) min_quadrants + (num_trees - 1))
/ num_trees) {
break;
}
tree_num_quadrants *= P4EST_CHILDREN;
P4EST_ASSERT (tree_num_quadrants > 0);
}
for (; level < min_level; ++level) {
tree_num_quadrants *= P4EST_CHILDREN;
P4EST_ASSERT (tree_num_quadrants > 0);
}
P4EST_ASSERT (level <= P4EST_QMAXLEVEL
&& tree_num_quadrants <= (p4est_gloidx_t) P4EST_LOCIDX_MAX);
/* compute global number of quadrants */
global_num_quadrants = tree_num_quadrants * num_trees;
P4EST_GLOBAL_PRODUCTIONF ("New " P4EST_STRING
" with %lld trees on %d processors\n",
(long long) num_trees, num_procs);
P4EST_GLOBAL_INFOF ("Initial level %d potential global quadrants"
" %lld per tree %lld\n",
level, (long long) global_num_quadrants,
(long long) tree_num_quadrants);
/* compute index of first tree for this processor */
first_quadrant = (global_num_quadrants * rank) / num_procs;
first_tree = first_quadrant / tree_num_quadrants;
first_tree_quadrant = first_quadrant - first_tree * tree_num_quadrants;
last_quadrant = (global_num_quadrants * (rank + 1)) / num_procs - 1;
P4EST_VERBOSEF
("first tree %lld first quadrant %lld global quadrant %lld\n",
(long long) first_tree, (long long) first_tree_quadrant,
(long long) first_quadrant);
P4EST_ASSERT (first_tree_quadrant < tree_num_quadrants);
/* compute index of last tree for this processor */
if (first_quadrant <= last_quadrant) {
last_tree = last_quadrant / tree_num_quadrants;
last_tree_quadrant = last_quadrant - last_tree * tree_num_quadrants;
P4EST_VERBOSEF
("last tree %lld last quadrant %lld global quadrant %lld\n",
(long long) last_tree, (long long) last_tree_quadrant,
(long long) last_quadrant);
/* check ranges of various integers to be 32bit compatible */
P4EST_ASSERT (first_tree <= last_tree && last_tree < num_trees);
P4EST_ASSERT (0 <= first_tree_quadrant && 0 <= last_tree_quadrant);
P4EST_ASSERT (last_tree_quadrant < tree_num_quadrants);
if (first_tree == last_tree) {
P4EST_ASSERT (first_tree_quadrant <= last_tree_quadrant);
}
}
else {
P4EST_VERBOSE ("Empty processor");
P4EST_ASSERT (0 <= first_tree && 0 <= first_tree_quadrant);
first_tree = -1;
last_tree = -2;
last_tree_quadrant = -1;
}
/* allocate trees and quadrants */
p4est->trees = sc_array_new (sizeof (p4est_tree_t));
sc_array_resize (p4est->trees, num_trees);
for (jt = 0; jt < num_trees; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
sc_array_init (&tree->quadrants, sizeof (p4est_quadrant_t));
P4EST_QUADRANT_INIT (&tree->first_desc);
P4EST_QUADRANT_INIT (&tree->last_desc);
tree->quadrants_offset = 0;
for (i = 0; i <= P4EST_QMAXLEVEL; ++i) {
tree->quadrants_per_level[i] = 0;
}
for (; i <= P4EST_MAXLEVEL; ++i) {
tree->quadrants_per_level[i] = -1;
}
tree->maxlevel = 0;
}
p4est->local_num_quadrants = 0;
p4est->global_num_quadrants = 0;
/* for every locally non-empty tree fill first and last quadrant */
P4EST_QUADRANT_INIT (&a);
P4EST_QUADRANT_INIT (&b);
P4EST_QUADRANT_INIT (&c);
for (jt = first_tree; jt <= last_tree; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tquadrants = &tree->quadrants;
quad = NULL;
if (!fill_uniform) { /* fill with coarsest possible quadrants */
must_remove_last_quadrant = 0;
/* set morton id of first quadrant and initialize user data */
if (jt == first_tree) {
p4est_quadrant_set_morton (&a, level, first_tree_quadrant);
}
else {
p4est_quadrant_set_morton (&a, level, 0);
}
#ifdef P4_TO_P8
P4EST_LDEBUGF ("tree %lld first morton 0x%llx 0x%llx 0x%llx\n",
(long long) jt, (long long) a.x,
(long long) a.y, (long long) a.z);
#else
P4EST_LDEBUGF ("tree %lld first morton 0x%llx 0x%llx\n",
(long long) jt, (long long) a.x, (long long) a.y);
#endif
p4est_quadrant_first_descendant (&a, &tree->first_desc,
P4EST_QMAXLEVEL);
/* set morton id of last quadrant */
if (tree_num_quadrants == 1 ||
(jt == first_tree
&& first_tree_quadrant == tree_num_quadrants - 1)) {
/* There is only a in the tree */
quad = p4est_quadrant_array_push (tquadrants);
*quad = a;
p4est_quadrant_init_data (p4est, jt, quad, init_fn);
tree->maxlevel = a.level;
tree->quadrants_per_level[a.level] = 1;
}
else {
if (jt == last_tree) {
if (last_tree_quadrant == tree_num_quadrants - 1) {
quadrant_index = last_tree_quadrant;
}
else {
quadrant_index = last_tree_quadrant + 1;
must_remove_last_quadrant = 1;
}
p4est_quadrant_set_morton (&b, level, quadrant_index);
}
else {
p4est_quadrant_set_morton (&b, level, tree_num_quadrants - 1);
}
#ifdef P4_TO_P8
P4EST_LDEBUGF ("tree %lld last morton 0x%llx 0x%llx 0x%llx\n",
(long long) jt, (long long) b.x,
(long long) b.y, (long long) b.z);
#else
P4EST_LDEBUGF ("tree %lld last morton 0x%llx 0x%llx\n",
(long long) jt, (long long) b.x, (long long) b.y);
#endif
/* fill up tree between a and b with coarse quadrants */
p4est_complete_region (p4est, &a, 1, &b, !must_remove_last_quadrant,
tree, jt, init_fn);
quad = p4est_quadrant_array_index (tquadrants,
tquadrants->elem_count - 1);
}
}
else { /* fill tree with quadrants of given level */
/* determine range of quadrants in this tree */
first_morton = (uint64_t)
(jt == first_tree ? first_tree_quadrant : 0);
last_morton = (uint64_t)
(jt == last_tree ? last_tree_quadrant : tree_num_quadrants - 1);
count = last_morton - first_morton + 1;
P4EST_ASSERT (count > 0);
/* populate quadrant array in Morton order */
sc_array_resize (tquadrants, (size_t) count);
for (miu = 0; miu < count; ++miu) {
quad = p4est_quadrant_array_index (tquadrants, (size_t) miu);
p4est_quadrant_set_morton (quad, level, first_morton + miu);
p4est_quadrant_init_data (p4est, jt, quad, init_fn);
}
/* remember first tree position */
p4est_quadrant_first_descendant (p4est_quadrant_array_index
(tquadrants, 0), &tree->first_desc,
P4EST_QMAXLEVEL);
/* set tree counters */
tree->maxlevel = (int8_t) level;
tree->quadrants_per_level[level] = (p4est_locidx_t) count;
}
#if 0
P4EST_VERBOSEF ("tree %lld quadrants %llu\n", (long long) jt,
(unsigned long long) tquadrants->elem_count);
#endif
tree->quadrants_offset = p4est->local_num_quadrants;
p4est->local_num_quadrants += tquadrants->elem_count;
p4est_quadrant_last_descendant (quad, &tree->last_desc, P4EST_QMAXLEVEL);
}
if (last_tree >= 0) {
for (; jt < num_trees; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tree->quadrants_offset = p4est->local_num_quadrants;
}
}
/* compute some member variables */
p4est->first_local_tree = first_tree;
p4est->last_local_tree = last_tree;
p4est->global_first_quadrant = P4EST_ALLOC (p4est_gloidx_t, num_procs + 1);
if (!fill_uniform && level > 0) {
/* this performs an allgather to count all quadrants */
p4est_comm_count_quadrants (p4est);
}
else {
/* for a uniform forest we know all global information a priori */
for (i = 0; i <= num_procs; ++i) {
p4est->global_first_quadrant[i] =
(global_num_quadrants * i) / num_procs;
}
p4est->global_num_quadrants = global_num_quadrants;
}
/* fill in global partition information */
global_first_position = P4EST_ALLOC_ZERO (p4est_quadrant_t, num_procs + 1);
for (i = 0; i <= num_procs; ++i) {
first_quadrant = (global_num_quadrants * i) / num_procs;
first_tree = first_quadrant / tree_num_quadrants;
first_tree_quadrant = first_quadrant - first_tree * tree_num_quadrants;
p4est_quadrant_set_morton (&c, level, first_tree_quadrant);
global_first_position[i].x = c.x;
global_first_position[i].y = c.y;
#ifdef P4_TO_P8
global_first_position[i].z = c.z;
#endif
global_first_position[i].level = P4EST_QMAXLEVEL;
global_first_position[i].p.which_tree = first_tree;
}
p4est->global_first_position = global_first_position;
/* print more statistics */
P4EST_VERBOSEF ("total local quadrants %lld\n",
(long long) p4est->local_num_quadrants);
P4EST_ASSERT (p4est_is_valid (p4est));
P4EST_GLOBAL_PRODUCTIONF ("Done " P4EST_STRING
"_new with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
return p4est;
}
void
p4est_destroy (p4est_t * p4est)
{
#ifdef P4EST_DEBUG
size_t qz;
#endif
p4est_topidx_t jt;
p4est_tree_t *tree;
for (jt = 0; jt < p4est->connectivity->num_trees; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
#ifdef P4EST_DEBUG
for (qz = 0; qz < tree->quadrants.elem_count; ++qz) {
p4est_quadrant_t *quad =
p4est_quadrant_array_index (&tree->quadrants, qz);
p4est_quadrant_free_data (p4est, quad);
}
#endif
sc_array_reset (&tree->quadrants);
}
sc_array_destroy (p4est->trees);
if (p4est->user_data_pool != NULL) {
sc_mempool_destroy (p4est->user_data_pool);
}
sc_mempool_destroy (p4est->quadrant_pool);
P4EST_FREE (p4est->global_first_quadrant);
P4EST_FREE (p4est->global_first_position);
P4EST_FREE (p4est);
}
p4est_t *
p4est_copy (p4est_t * input, int copy_data)
{
const p4est_topidx_t num_trees = input->connectivity->num_trees;
const p4est_topidx_t first_tree = input->first_local_tree;
const p4est_topidx_t last_tree = input->last_local_tree;
size_t icount;
size_t zz;
p4est_topidx_t jt;
p4est_t *p4est;
p4est_tree_t *itree, *ptree;
p4est_quadrant_t *iq, *pq;
sc_array_t *iquadrants, *pquadrants;
/* create a shallow copy and zero out dependent fields */
p4est = P4EST_ALLOC (p4est_t, 1);
memcpy (p4est, input, sizeof (p4est_t));
p4est->global_first_quadrant = NULL;
p4est->global_first_position = NULL;
p4est->trees = NULL;
p4est->user_data_pool = NULL;
p4est->quadrant_pool = NULL;
/* allocate a user data pool if necessary and a quadrant pool */
if (copy_data && p4est->data_size > 0) {
p4est->user_data_pool = sc_mempool_new (p4est->data_size);
}
else {
p4est->data_size = 0;
}
p4est->quadrant_pool = sc_mempool_new (sizeof (p4est_quadrant_t));
/* copy quadrants for each tree */
p4est->trees = sc_array_new (sizeof (p4est_tree_t));
sc_array_resize (p4est->trees, num_trees);
for (jt = 0; jt < num_trees; ++jt) {
itree = p4est_tree_array_index (input->trees, jt);
ptree = p4est_tree_array_index (p4est->trees, jt);
memcpy (ptree, itree, sizeof (p4est_tree_t));
sc_array_init (&ptree->quadrants, sizeof (p4est_quadrant_t));
}
for (jt = first_tree; jt <= last_tree; ++jt) {
itree = p4est_tree_array_index (input->trees, jt);
iquadrants = &itree->quadrants;
icount = iquadrants->elem_count;
ptree = p4est_tree_array_index (p4est->trees, jt);
pquadrants = &ptree->quadrants;
sc_array_resize (pquadrants, icount);
memcpy (pquadrants->array, iquadrants->array,
icount * sizeof (p4est_quadrant_t));
if (p4est->data_size > 0) {
P4EST_ASSERT (copy_data);
for (zz = 0; zz < icount; ++zz) {
iq = p4est_quadrant_array_index (iquadrants, zz);
pq = p4est_quadrant_array_index (pquadrants, zz);
pq->p.user_data = sc_mempool_alloc (p4est->user_data_pool);
memcpy (pq->p.user_data, iq->p.user_data, p4est->data_size);
}
}
}
/* allocate and copy global quadrant count */
p4est->global_first_quadrant =
P4EST_ALLOC (p4est_gloidx_t, p4est->mpisize + 1);
memcpy (p4est->global_first_quadrant, input->global_first_quadrant,
(p4est->mpisize + 1) * sizeof (p4est_gloidx_t));
/* allocate and copy global partition information */
p4est->global_first_position = P4EST_ALLOC (p4est_quadrant_t,
p4est->mpisize + 1);
memcpy (p4est->global_first_position, input->global_first_position,
(p4est->mpisize + 1) * sizeof (p4est_quadrant_t));
/* check for valid p4est and return */
P4EST_ASSERT (p4est_is_valid (p4est));
return p4est;
}
void
p4est_reset_data (p4est_t * p4est, size_t data_size,
p4est_init_t init_fn, void *user_pointer)
{
int doresize;
size_t zz;
p4est_topidx_t jt;
p4est_quadrant_t *q;
p4est_tree_t *tree;
sc_array_t *tquadrants;
doresize = (p4est->data_size != data_size);
p4est->data_size = data_size;
p4est->user_pointer = user_pointer;
if (doresize) {
if (p4est->user_data_pool != NULL) {
sc_mempool_destroy (p4est->user_data_pool);
}
if (p4est->data_size > 0) {
p4est->user_data_pool = sc_mempool_new (p4est->data_size);
}
else {
p4est->user_data_pool = NULL;
}
}
for (jt = p4est->first_local_tree; jt <= p4est->last_local_tree; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tquadrants = &tree->quadrants;
for (zz = 0; zz < tquadrants->elem_count; ++zz) {
q = p4est_quadrant_array_index (tquadrants, zz);
if (doresize) {
if (p4est->data_size > 0) {
q->p.user_data = sc_mempool_alloc (p4est->user_data_pool);
}
else {
q->p.user_data = NULL;
}
}
if (init_fn != NULL) {
init_fn (p4est, jt, q);
}
}
}
}
void
p4est_refine (p4est_t * p4est, int refine_recursive,
p4est_refine_t refine_fn, p4est_init_t init_fn)
{
p4est_refine_ext (p4est, refine_recursive, -1, refine_fn, NULL, init_fn);
}
void
p4est_refine_ext (p4est_t * p4est, int refine_recursive, int allowed_level,
p4est_refine_t refine_fn, p4est_refine_ext_t refine_ext_fn,
p4est_init_t init_fn)
{
#ifdef P4EST_DEBUG
size_t quadrant_pool_size, data_pool_size;
#endif
int dorefine;
int i, maxlevel;
p4est_topidx_t nt;
size_t incount, current, restpos, movecount;
sc_list_t *list;
p4est_tree_t *tree;
p4est_quadrant_t *q, *qalloc, *qpop;
p4est_quadrant_t *c0, *c1, *c2, *c3;
#ifdef P4_TO_P8
p4est_quadrant_t *c4, *c5, *c6, *c7;
#endif
sc_array_t *tquadrants;
if (allowed_level == 0 || (refine_fn == NULL && refine_ext_fn == NULL)) {
P4EST_GLOBAL_PRODUCTIONF ("Noop " P4EST_STRING
"_refine with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
return;
}
if (allowed_level < 0) {
allowed_level = P4EST_QMAXLEVEL;
}
P4EST_GLOBAL_PRODUCTIONF ("Into " P4EST_STRING
"_refine with %lld total quadrants maxlevel %d\n",
(long long) p4est->global_num_quadrants,
allowed_level);
P4EST_ASSERT (p4est_is_valid (p4est));
P4EST_ASSERT (0 <= allowed_level && allowed_level <= P4EST_QMAXLEVEL);
if (refine_ext_fn != NULL) {
sc_abort_collective ("Extended refinement callback not implemented");
}
/*
q points to a quadrant that is an array member
qalloc is a quadrant that has been allocated through quadrant_pool
qpop is a quadrant that has been allocated through quadrant_pool
never mix these two types of quadrant pointers
The quadrant->pad8 field of list quadrants is interpreted as boolean
and set to true for quadrants that have already been refined.
*/
list = sc_list_new (NULL);
p4est->local_num_quadrants = 0;
/* loop over all local trees */
for (nt = p4est->first_local_tree; nt <= p4est->last_local_tree; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
tree->quadrants_offset = p4est->local_num_quadrants;
tquadrants = &tree->quadrants;
#ifdef P4EST_DEBUG
quadrant_pool_size = p4est->quadrant_pool->elem_count;
data_pool_size = 0;
if (p4est->user_data_pool != NULL) {
data_pool_size = p4est->user_data_pool->elem_count;
}
#endif
/* initial log message for this tree */
P4EST_VERBOSEF ("Into refine tree %lld with %llu\n", (long long) nt,
(unsigned long long) tquadrants->elem_count);
/* reset the quadrant counters */
maxlevel = 0;
for (i = 0; i <= P4EST_QMAXLEVEL; ++i) {
tree->quadrants_per_level[i] = 0;
}
/* run through the array to find first quadrant to be refined */
q = NULL;
dorefine = 0;
incount = tquadrants->elem_count;
for (current = 0; current < incount; ++current) {
q = p4est_quadrant_array_index (tquadrants, current);
dorefine = (((int) q->level < allowed_level) &&
refine_fn (p4est, nt, q));
if (dorefine) {
break;
}
maxlevel = SC_MAX (maxlevel, (int) q->level);
++tree->quadrants_per_level[q->level];
}
if (!dorefine) {
p4est->local_num_quadrants += incount;
continue;
}
/* now we have a quadrant to refine, prepend it to the list */
qalloc = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
*qalloc = *q; /* never prepend array members directly */
qalloc->pad8 = 0; /* this quadrant has not been refined yet */
sc_list_prepend (list, qalloc); /* only newly allocated quadrants */
/*
current points to the next array member to write
restpos points to the next array member to read
*/
restpos = current + 1;
/* run through the list and refine recursively */
while (list->elem_count > 0) {
qpop = p4est_quadrant_list_pop (list);
if (dorefine ||
((refine_recursive || !qpop->pad8) &&
(int) qpop->level < allowed_level &&
refine_fn (p4est, nt, qpop))) {
dorefine = 0;
sc_array_resize (tquadrants,
tquadrants->elem_count + P4EST_CHILDREN - 1);
/* compute children and prepend them to the list */
p4est_quadrant_free_data (p4est, qpop);
c0 = qpop;
c1 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
c2 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
c3 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
#ifdef P4_TO_P8
c4 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
c5 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
c6 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
c7 = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
p8est_quadrant_children (qpop, c0, c1, c2, c3, c4, c5, c6, c7);
#else
p4est_quadrant_children (qpop, c0, c1, c2, c3);
#endif
p4est_quadrant_init_data (p4est, nt, c0, init_fn);
p4est_quadrant_init_data (p4est, nt, c1, init_fn);
p4est_quadrant_init_data (p4est, nt, c2, init_fn);
p4est_quadrant_init_data (p4est, nt, c3, init_fn);
c0->pad8 = c1->pad8 = c2->pad8 = c3->pad8 = 1;
#ifdef P4_TO_P8
p4est_quadrant_init_data (p4est, nt, c4, init_fn);
p4est_quadrant_init_data (p4est, nt, c5, init_fn);
p4est_quadrant_init_data (p4est, nt, c6, init_fn);
p4est_quadrant_init_data (p4est, nt, c7, init_fn);
c4->pad8 = c5->pad8 = c6->pad8 = c7->pad8 = 1;
sc_list_prepend (list, c7);
sc_list_prepend (list, c6);
sc_list_prepend (list, c5);
sc_list_prepend (list, c4);
#endif
sc_list_prepend (list, c3);
sc_list_prepend (list, c2);
sc_list_prepend (list, c1);
sc_list_prepend (list, c0);
}
else {
/* need to make room in the array to store this new quadrant */
if (restpos < incount && current == restpos) {
movecount = SC_MIN (incount - restpos, number_toread_quadrants);
while (movecount > 0) {
q = p4est_quadrant_array_index (tquadrants, restpos);
qalloc = p4est_quadrant_mempool_alloc (p4est->quadrant_pool);
*qalloc = *q; /* never append array members directly */
qalloc->pad8 = 0; /* has not been refined yet */
sc_list_append (list, qalloc); /* only newly allocated quadrants */
--movecount;
++restpos;
}
}
/* store new quadrant and update counters */
q = p4est_quadrant_array_index (tquadrants, current);
*q = *qpop;
maxlevel = SC_MAX (maxlevel, (int) qpop->level);
++tree->quadrants_per_level[qpop->level];
++current;
sc_mempool_free (p4est->quadrant_pool, qpop);
}
}
tree->maxlevel = (int8_t) maxlevel;
p4est->local_num_quadrants += tquadrants->elem_count;
P4EST_ASSERT (restpos == incount);
P4EST_ASSERT (current == tquadrants->elem_count);
P4EST_ASSERT (list->first == NULL && list->last == NULL);
P4EST_ASSERT (quadrant_pool_size == p4est->quadrant_pool->elem_count);
if (p4est->user_data_pool != NULL) {
P4EST_ASSERT (data_pool_size + tquadrants->elem_count ==
p4est->user_data_pool->elem_count + incount);
}
P4EST_ASSERT (p4est_tree_is_sorted (tree));
P4EST_ASSERT (p4est_tree_is_complete (tree));
/* final log message for this tree */
P4EST_VERBOSEF ("Done refine tree %lld now %llu\n", (long long) nt,
(unsigned long long) tquadrants->elem_count);
}
if (p4est->last_local_tree >= 0) {
for (; nt < p4est->connectivity->num_trees; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
tree->quadrants_offset = p4est->local_num_quadrants;
}
}
sc_list_destroy (list);
/* compute global number of quadrants */
p4est_comm_count_quadrants (p4est);
P4EST_ASSERT (p4est_is_valid (p4est));
P4EST_GLOBAL_PRODUCTIONF ("Done " P4EST_STRING
"_refine with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
}
void
p4est_coarsen (p4est_t * p4est, int coarsen_recursive,
p4est_coarsen_t coarsen_fn, p4est_init_t init_fn)
{
#ifdef P4EST_DEBUG
size_t data_pool_size;
#endif
int i, maxlevel;
int couldbegood;
size_t zz;
size_t incount, removed;
size_t window, start, length, cidz;
p4est_locidx_t num_quadrants, prev_offset;
p4est_topidx_t jt;
p4est_tree_t *tree;
p4est_quadrant_t *c[P4EST_CHILDREN];
p4est_quadrant_t *cfirst, *clast;
sc_array_t *tquadrants;
P4EST_GLOBAL_PRODUCTIONF ("Into " P4EST_STRING
"_coarsen with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
P4EST_ASSERT (p4est_is_valid (p4est));
/* loop over all local trees */
prev_offset = 0;
for (jt = p4est->first_local_tree; jt <= p4est->last_local_tree; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tquadrants = &tree->quadrants;
#ifdef P4EST_DEBUG
data_pool_size = 0;
if (p4est->user_data_pool != NULL) {
data_pool_size = p4est->user_data_pool->elem_count;
}
#endif
removed = 0;
/* initial log message for this tree */
P4EST_VERBOSEF ("Into coarsen tree %lld with %llu\n", (long long) jt,
(unsigned long long) tquadrants->elem_count);
/* state information */
window = 0; /* start position of sliding window in array */
start = 1; /* start position of hole in window/array */
length = 0; /* length of hole in window/array */
/* run through the array and coarsen recursively */
incount = tquadrants->elem_count;
while (window + P4EST_CHILDREN + length <= incount) {
P4EST_ASSERT (window < start);
cidz = incount;
couldbegood = 1;
for (zz = 0; zz < P4EST_CHILDREN; ++zz) {
c[zz] = (window + zz < start) ?
p4est_quadrant_array_index (tquadrants, window + zz) :
p4est_quadrant_array_index (tquadrants, window + length + zz);
if (zz != (size_t) p4est_quadrant_child_id (c[zz])) {
couldbegood = 0;
break;
}
}
if (couldbegood && p4est_quadrant_is_familypv (c) &&
coarsen_fn (p4est, jt, c)) {
/* coarsen this family of quadrants */
for (zz = 0; zz < P4EST_CHILDREN; ++zz) {
p4est_quadrant_free_data (p4est, c[zz]);
}
tree->quadrants_per_level[c[0]->level] -= P4EST_CHILDREN;
cfirst = c[0];
p4est_quadrant_parent (c[0], cfirst);
p4est_quadrant_init_data (p4est, jt, cfirst, init_fn);
tree->quadrants_per_level[cfirst->level] += 1;
p4est->local_num_quadrants -= P4EST_CHILDREN - 1;
removed += P4EST_CHILDREN - 1;
cidz = (size_t) p4est_quadrant_child_id (cfirst);
start = window + 1;
length += P4EST_CHILDREN - 1;
}
if (cidz <= window && coarsen_recursive) {
window -= cidz;
}
else {
++window;
if (window == start && start + length < incount) {
if (length > 0) {
cfirst = p4est_quadrant_array_index (tquadrants, start);
clast = p4est_quadrant_array_index (tquadrants, start + length);
*cfirst = *clast;
}
start = window + 1;
}
}
}
/* adjust final array size */
if (length > 0) {
for (zz = start + length; zz < incount; ++zz) {
cfirst = p4est_quadrant_array_index (tquadrants, zz - length);
clast = p4est_quadrant_array_index (tquadrants, zz);
*cfirst = *clast;
}
sc_array_resize (tquadrants, incount - length);
}
/* compute maximum level */
maxlevel = 0;
num_quadrants = 0;
for (i = 0; i <= P4EST_QMAXLEVEL; ++i) {
P4EST_ASSERT (tree->quadrants_per_level[i] >= 0);
num_quadrants += tree->quadrants_per_level[i]; /* same type */
if (tree->quadrants_per_level[i] > 0) {
maxlevel = i;
}
}
tree->maxlevel = (int8_t) maxlevel;
tree->quadrants_offset = prev_offset;
prev_offset += num_quadrants;
/* do some sanity checks */
P4EST_ASSERT (num_quadrants == (p4est_locidx_t) tquadrants->elem_count);
P4EST_ASSERT (tquadrants->elem_count == incount - removed);
if (p4est->user_data_pool != NULL) {
P4EST_ASSERT (data_pool_size - removed ==
p4est->user_data_pool->elem_count);
}
P4EST_ASSERT (p4est_tree_is_sorted (tree));
P4EST_ASSERT (p4est_tree_is_complete (tree));
/* final log message for this tree */
P4EST_VERBOSEF ("Done coarsen tree %lld now %llu\n", (long long) jt,
(unsigned long long) tquadrants->elem_count);
}
if (p4est->last_local_tree >= 0) {
for (; jt < p4est->connectivity->num_trees; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tree->quadrants_offset = p4est->local_num_quadrants;
}
}
/* compute global number of quadrants */
p4est_comm_count_quadrants (p4est);
P4EST_ASSERT (p4est_is_valid (p4est));
P4EST_GLOBAL_PRODUCTIONF ("Done " P4EST_STRING
"_coarsen with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
}
/** Check if the insulation layer of a quadrant overlaps anybody.
* If yes, the quadrant itself is scheduled for sending.
* Both quadrants are in the receiving tree's coordinates.
* \param [in] qtree Tree id of the receiving tree.
* \param [in] inter_tree Boolean flag to specify inter-tree communication.
* \param [in] q The quadrant to be sent if there is overlap.
* \param [in] insul An insulation quadrant of \a q.
* \param [in,out] first_peer Lowest peer, will be updated.
* \param [in,out] last_peer Highest peer, will be updated.
*/
static void
p4est_balance_schedule (p4est_t * p4est, p4est_balance_peer_t * peers,
p4est_topidx_t qtree, int inter_tree,
const p4est_quadrant_t * q,
const p4est_quadrant_t * insul,
int *first_peer, int *last_peer)
{
const int rank = p4est->mpirank;
int found;
int back, pos;
int owner, first_owner, last_owner;
p4est_gloidx_t *global_first_quadrant = p4est->global_first_quadrant;
p4est_quadrant_t ld, *s;
p4est_balance_peer_t *peer;
P4EST_QUADRANT_INIT (&ld);
/* querying insul is equivalent to querying first descendant */
first_owner = p4est_comm_find_owner (p4est, qtree, insul, rank);
/* querying last descendant */
p4est_quadrant_last_descendant (insul, &ld, P4EST_QMAXLEVEL);
last_owner = p4est_comm_find_owner (p4est, qtree, &ld, rank);
/* send to all processors possibly intersecting insulation */
for (owner = first_owner; owner <= last_owner; ++owner) {
if (owner == rank && !inter_tree) {
/* do not send to self for the same tree */
continue;
}
if (global_first_quadrant[owner] == global_first_quadrant[owner + 1]) {
/* do not send to empty processors */
continue;
}
peer = peers + owner;
/* avoid duplicates in the send array */
found = 0;
for (back = 0; back < P4EST_INSUL - 1; ++back) {
pos = (int) peer->send_first.elem_count - back - 1;
if (pos < 0) {
break;
}
s = (p4est_quadrant_t *) sc_array_index_int (&peer->send_first, pos);
if (p4est_quadrant_is_equal (s, q) && s->p.piggy2.which_tree == qtree
&& s->p.piggy2.from_tree == q->p.piggy2.from_tree) {
found = 1;
break;
}
}
if (found) {
continue;
}
/* copy quadrant into shipping list */
s = p4est_quadrant_array_push (&peer->send_first);
*s = *q;
s->p.piggy2.which_tree = qtree; /* piggy back tree id */
/* update lowest and highest peer */
if (owner != rank) {
*first_peer = SC_MIN (owner, *first_peer);
*last_peer = SC_MAX (owner, *last_peer);
}
}
}
static void
p4est_balance_response (p4est_t * p4est, p4est_balance_peer_t * peer,
p4est_connect_type_t balance, sc_array_t * borders)
{
/* compute and uniqify overlap quadrants */
if (p4est->inspect != NULL && p4est->inspect->use_overlap_new) {
sc_array_t *first_seeds =
sc_array_new (sizeof (p4est_quadrant_t));
p4est_tree_compute_overlap_new (p4est, &peer->recv_first,
&peer->send_second, balance, borders,
first_seeds);
/* replace peer->recv_first with first_seeds */
p4est_tree_uniqify_overlap_new (&peer->send_second);
p4est_tree_uniqify_overlap_new (first_seeds);
/* replace peer->recv_first with first_seeds */
sc_array_resize (&peer->recv_first, first_seeds->elem_count);
memcpy (peer->recv_first.array, first_seeds->array,
first_seeds->elem_size * first_seeds->elem_count);
sc_array_destroy (first_seeds);
}
else {
p4est_tree_compute_overlap (p4est, &peer->recv_first, &peer->send_second);
p4est_tree_uniqify_overlap (&peer->send_first, &peer->send_second);
}
if (p4est->inspect) {
p4est->inspect->balance_comm_sent += peer->send_second.elem_count;
if (peer->send_second.elem_count) {
p4est->inspect->balance_comm_nzpeers++;
}
}
}
void
p4est_balance (p4est_t * p4est, p4est_connect_type_t btype,
p4est_init_t init_fn)
{
const int rank = p4est->mpirank;
const int num_procs = p4est->mpisize;
int j, k, l, m, which;
int face;
int first_peer, last_peer;
int quad_contact[P4EST_FACES];
int any_face, tree_contact[P4EST_FACES];
int tree_fully_owned, full_tree[2];
int8_t *tree_flags;
size_t zz, treecount, ctree;
size_t localcount;
size_t qcount, qbytes;
size_t all_incount, all_outcount;
p4est_qcoord_t qh;
const p4est_qcoord_t rh = P4EST_ROOT_LEN;
p4est_topidx_t qtree, nt;
p4est_topidx_t first_tree, last_tree;
p4est_locidx_t skipped;
p4est_balance_peer_t *peers, *peer;
p4est_tree_t *tree;
p4est_quadrant_t mylow, nextlow;
p4est_quadrant_t tosend, insulq, tempq;
p4est_quadrant_t *q, *s;
p4est_connectivity_t *conn = p4est->connectivity;
sc_array_t *qarray, *tquadrants;
sc_array_t *borders;
#ifdef P4EST_DEBUG
size_t data_pool_size;
#endif
int ftransform[P4EST_FTRANSFORM];
int face_axis[3]; /* 3 not P4EST_DIM */
int contact_face_only, contact_edge_only;
#ifdef P4_TO_P8
int edge;
size_t etree;
p8est_edge_info_t ei;
p8est_edge_transform_t *et;
sc_array_t *eta;
#endif
int corner;
p4est_corner_info_t ci;
p4est_corner_transform_t *ct;
sc_array_t *cta;
#ifdef P4EST_MPI
#ifdef P4EST_DEBUG
unsigned checksum;
sc_array_t checkarray;
p4est_gloidx_t ltotal[2], gtotal[2];
#endif /* P4EST_DEBUG */
int i;
int mpiret, rcount;
int first_bound;
int request_first_count, request_second_count, outcount;
int request_send_count, total_send_count, total_recv_count;
int nwin, maxpeers, maxwin, twomaxwin;
int send_zero[2], send_load[2];
int recv_zero[2], recv_load[2];
int my_ranges[2 * p4est_num_ranges];
int *wait_indices;
int *procs, *all_ranges;
int *receiver_ranks, *sender_ranks;
int num_receivers, num_senders;
int *receiver_ranks_ranges, *sender_ranks_ranges;
int num_receivers_ranges, num_senders_ranges;
int *receiver_ranks_notify, *sender_ranks_notify;
int num_receivers_notify, num_senders_notify;
int is_ranges_primary, is_balance_verify;
int is_ranges_active, is_notify_active;
int max_ranges;
MPI_Request *requests_first, *requests_second;
MPI_Request *send_requests_first_count, *send_requests_first_load;
MPI_Request *send_requests_second_count, *send_requests_second_load;
MPI_Status *recv_statuses, *jstatus;
#endif /* P4EST_MPI */
P4EST_GLOBAL_PRODUCTIONF ("Into " P4EST_STRING
"_balance %s with %lld total quadrants\n",
p4est_connect_type_string (btype),
(long long) p4est->global_num_quadrants);
P4EST_ASSERT (p4est_is_valid (p4est));
#ifndef P4_TO_P8
P4EST_ASSERT (btype == P4EST_CONNECT_FACE || btype == P4EST_CONNECT_CORNER);
#else
P4EST_ASSERT (btype == P8EST_CONNECT_FACE || btype == P8EST_CONNECT_EDGE ||
btype == P8EST_CONNECT_CORNER);
#endif
#ifdef P4EST_DEBUG
data_pool_size = 0;
if (p4est->user_data_pool != NULL) {
data_pool_size = p4est->user_data_pool->elem_count;
}
#endif
P4EST_QUADRANT_INIT (&mylow);
P4EST_QUADRANT_INIT (&nextlow);
P4EST_QUADRANT_INIT (&tosend);
P4EST_QUADRANT_INIT (&insulq);
P4EST_QUADRANT_INIT (&tempq);
/* tree status flags (max 8 per tree) */
tree_flags = P4EST_ALLOC (int8_t, conn->num_trees);
for (nt = 0; nt < conn->num_trees; ++nt) {
tree_flags[nt] = 0x00;
}
localcount = (size_t) (p4est->last_local_tree + 1 -
p4est->first_local_tree);
if (p4est->inspect != NULL && p4est->inspect->use_borders) {
borders = sc_array_new_size (sizeof (sc_array_t), localcount);
for (zz = 0; zz < localcount; zz++) {
qarray = (sc_array_t *) sc_array_index (borders, zz);
sc_array_init (qarray, sizeof (p4est_quadrant_t));
}
}
else {
borders = NULL;
}
#ifdef P4EST_MPI
requests_first = P4EST_ALLOC (MPI_Request, 6 * num_procs);
requests_second = requests_first + 1 * num_procs;
send_requests_first_count = requests_first + 2 * num_procs;
send_requests_first_load = requests_first + 3 * num_procs;
send_requests_second_count = requests_first + 4 * num_procs;
send_requests_second_load = requests_first + 5 * num_procs;
recv_statuses = P4EST_ALLOC (MPI_Status, num_procs);
for (j = 0; j < num_procs; ++j) {
requests_first[j] = requests_second[j] = MPI_REQUEST_NULL;
send_requests_first_count[j] = MPI_REQUEST_NULL;
send_requests_first_load[j] = MPI_REQUEST_NULL;
send_requests_second_count[j] = MPI_REQUEST_NULL;
send_requests_second_load[j] = MPI_REQUEST_NULL;
}
wait_indices = P4EST_ALLOC (int, num_procs);
#ifdef P4EST_DEBUG
sc_array_init (&checkarray, 4);
#endif /* P4EST_DEBUG */
#endif /* P4EST_MPI */
/* allocate per peer storage and initialize requests */
peers = P4EST_ALLOC (p4est_balance_peer_t, num_procs);
for (j = 0; j < num_procs; ++j) {
peer = peers + j;
sc_array_init (&peer->send_first, sizeof (p4est_quadrant_t));
sc_array_init (&peer->send_second, sizeof (p4est_quadrant_t));
sc_array_init (&peer->recv_first, sizeof (p4est_quadrant_t));
sc_array_init (&peer->recv_second, sizeof (p4est_quadrant_t));
peer->send_first_count = peer->send_second_count = 0;
peer->recv_first_count = peer->recv_second_count = 0;
peer->have_first_count = peer->have_first_load = 0;
peer->have_second_count = peer->have_second_load = 0;
}
#ifdef P4_TO_P8
eta = &ei.edge_transforms;
sc_array_init (eta, sizeof (p8est_edge_transform_t));
#endif
cta = &ci.corner_transforms;
sc_array_init (cta, sizeof (p4est_corner_transform_t));
/* compute first quadrant on finest level */
mylow.x = p4est->global_first_position[rank].x;
mylow.y = p4est->global_first_position[rank].y;
#ifdef P4_TO_P8
mylow.z = p4est->global_first_position[rank].z;
#endif
mylow.level = P4EST_QMAXLEVEL;
/* and the first finest quadrant of the next processor */
nextlow.x = p4est->global_first_position[rank + 1].x;
nextlow.y = p4est->global_first_position[rank + 1].y;
#ifdef P4_TO_P8
nextlow.z = p4est->global_first_position[rank + 1].z;
#endif
nextlow.level = P4EST_QMAXLEVEL;
/* start balance_A timing */
if (p4est->inspect != NULL) {
p4est->inspect->balance_A = -MPI_Wtime ();
p4est->inspect->balance_A_count_in = 0;
p4est->inspect->balance_A_count_out = 0;
p4est->inspect->use_B = 0;
}
/* loop over all local trees to assemble first send list */
first_tree = p4est->first_local_tree;
last_tree = p4est->last_local_tree;
first_peer = num_procs;
last_peer = -1;
all_incount = 0;
skipped = 0;
for (nt = first_tree; nt <= last_tree; ++nt) {
p4est_comm_tree_info (p4est, nt, full_tree, tree_contact, NULL, NULL);
tree_fully_owned = full_tree[0] && full_tree[1];
any_face = 0;
for (face = 0; face < P4EST_FACES; ++face) {
any_face = any_face || tree_contact[face];
}
if (any_face) {
tree_flags[nt] |= any_face_flag;
}
tree = p4est_tree_array_index (p4est->trees, nt);
tquadrants = &tree->quadrants;
all_incount += tquadrants->elem_count;
/* initial log message for this tree */
P4EST_VERBOSEF ("Into balance tree %lld with %llu\n", (long long) nt,
(unsigned long long) tquadrants->elem_count);
/* local balance first pass */
p4est_balance_subtree (p4est, btype, nt, init_fn);
treecount = tquadrants->elem_count;
P4EST_VERBOSEF ("Balance tree %lld A %llu\n",
(long long) nt, (unsigned long long) treecount);
/* check if this tree is not shared with other processors */
if (tree_fully_owned) {
/* all quadrants in this tree are owned by me */
tree_flags[nt] |= fully_owned_flag;
if (!any_face) {
/* this tree is isolated, no balance between trees */
continue;
}
}
if (borders != NULL) {
qarray = (sc_array_t *) sc_array_index (borders,
(size_t) (nt - first_tree));
}
else {
qarray = NULL;
}
/* identify boundary quadrants and prepare them to be sent */
for (zz = 0; zz < treecount; ++zz) {
/* this quadrant may be on the boundary with a range of processors */
q = p4est_quadrant_array_index (tquadrants, zz);
qh = P4EST_QUADRANT_LEN (q->level);
if (p4est_comm_neighborhood_owned (p4est, nt,
full_tree, tree_contact, q)) {
/* this quadrant's 3x3 neighborhood is onwed by this processor */
++skipped;
continue;
}
if (qarray != NULL) {
s = (p4est_quadrant_t *) sc_array_push (qarray);
*s = *q;
}
#ifdef P4_TO_P8
for (m = 0; m < 3; ++m) {
#if 0
}
#endif
#else
m = 0;
#endif
for (k = 0; k < 3; ++k) {
for (l = 0; l < 3; ++l) {
which = m * 9 + k * 3 + l; /* 2D: 0..8, 3D: 0..26 */
/* exclude myself from the queries */
if (which == P4EST_INSUL / 2) {
continue;
}
/* may modify insulq below, never modify q itself! */
insulq = *q;
insulq.x += (l - 1) * qh;
insulq.y += (k - 1) * qh;
#ifdef P4_TO_P8
insulq.z += (m - 1) * qh;
#endif
/* check boundary status of insulation quadrant */
quad_contact[0] = (insulq.x < 0);
quad_contact[1] = (insulq.x >= rh);
face_axis[0] = quad_contact[0] || quad_contact[1];
quad_contact[2] = (insulq.y < 0);
quad_contact[3] = (insulq.y >= rh);
face_axis[1] = quad_contact[2] || quad_contact[3];
#ifndef P4_TO_P8
face_axis[2] = 0;
#else
quad_contact[4] = (insulq.z < 0);
quad_contact[5] = (insulq.z >= rh);
face_axis[2] = quad_contact[4] || quad_contact[5];
edge = -1;
#endif
contact_edge_only = contact_face_only = 0;
face = -1;
if (face_axis[0] || face_axis[1] || face_axis[2]) {
/* this quadrant is relevant for inter-tree balancing */
if (!face_axis[1] && !face_axis[2]) {
contact_face_only = 1;
face = 0 + quad_contact[1];
}
else if (!face_axis[0] && !face_axis[2]) {
contact_face_only = 1;
face = 2 + quad_contact[3];
}
#ifdef P4_TO_P8
else if (!face_axis[0] && !face_axis[1]) {
contact_face_only = 1;
face = 4 + quad_contact[5];
}
else if (!face_axis[0]) {
contact_edge_only = 1;
edge = 0 + 2 * quad_contact[5] + quad_contact[3];
}
else if (!face_axis[1]) {
contact_edge_only = 1;
edge = 4 + 2 * quad_contact[5] + quad_contact[1];
}
else if (!face_axis[2]) {
contact_edge_only = 1;
edge = 8 + 2 * quad_contact[3] + quad_contact[1];
}
#endif
if (contact_face_only) {
/* square contact across a face */
P4EST_ASSERT (!contact_edge_only);
P4EST_ASSERT (face >= 0 && face < P4EST_FACES);
P4EST_ASSERT (quad_contact[face]);
qtree = p4est_find_face_transform (conn, nt, face, ftransform);
if (qtree >= 0) {
P4EST_ASSERT (tree_contact[face]);
p4est_quadrant_transform_face (q, &tosend, ftransform);
tosend.p.piggy2.from_tree = nt;
p4est_quadrant_transform_face (&insulq, &tempq, ftransform);
p4est_balance_schedule (p4est, peers, qtree, 1,
&tosend, &tempq,
&first_peer, &last_peer);
}
else {
/* goes across a face with no neighbor */
P4EST_ASSERT (!tree_contact[face]);
}
}
#ifdef P4_TO_P8
else if (contact_edge_only) {
/* this quadrant crosses an edge */
P4EST_ASSERT (!contact_face_only);
P4EST_ASSERT (edge >= 0 && edge < P8EST_EDGES);
p8est_find_edge_transform (conn, nt, edge, &ei);
for (etree = 0; etree < eta->elem_count; ++etree) {
et = p8est_edge_array_index (eta, etree);
p8est_quadrant_transform_edge (q, &tosend, &ei, et, 0);
tosend.p.piggy2.from_tree = nt;
p8est_quadrant_transform_edge (&insulq, &tempq, &ei, et, 1);
p4est_balance_schedule (p4est, peers, et->ntree, 1,
&tosend, &tempq,
&first_peer, &last_peer);
}
}
#endif
else {
/* this quadrant crosses a corner */
P4EST_ASSERT (face_axis[0] && face_axis[1]);
corner = quad_contact[1] + 2 * quad_contact[3];
#ifdef P4_TO_P8
P4EST_ASSERT (face_axis[2]);
corner += 4 * quad_contact[5];
#endif
P4EST_ASSERT (p4est_quadrant_touches_corner (q, corner, 1));
P4EST_ASSERT (p4est_quadrant_touches_corner
(&insulq, corner, 0));
p4est_find_corner_transform (conn, nt, corner, &ci);
for (ctree = 0; ctree < cta->elem_count; ++ctree) {
ct = p4est_corner_array_index (cta, ctree);
tosend = *q;
p4est_quadrant_transform_corner (&tosend, (int) ct->ncorner,
0);
tosend.p.piggy2.from_tree = nt;
tempq = insulq;
p4est_quadrant_transform_corner (&tempq, (int) ct->ncorner,
1);
p4est_balance_schedule (p4est, peers, ct->ntree, 1,
&tosend, &tempq, &first_peer,
&last_peer);
}
}
}
else {
/* no inter-tree contact */
tosend = *q;
tosend.p.piggy2.from_tree = nt;
p4est_balance_schedule (p4est, peers, nt, 0,
&tosend, &insulq, &first_peer,
&last_peer);
}
}
}
#ifdef P4_TO_P8
#if 0
{
#endif
}
#endif
}
tquadrants = NULL; /* safeguard */
}
/* end balance_A, start balance_comm */
#ifdef P4EST_MPI
is_ranges_primary = 0;
is_ranges_active = 0;
is_notify_active = 1;
is_balance_verify = 0;
#endif
if (p4est->inspect != NULL) {
p4est->inspect->balance_A += MPI_Wtime ();
p4est->inspect->balance_comm = -MPI_Wtime ();
p4est->inspect->balance_comm_sent = 0;
p4est->inspect->balance_comm_nzpeers = 0;
for (k = 0; k < 2; ++k) {
p4est->inspect->balance_zero_sends[k] = 0;
p4est->inspect->balance_zero_receives[k] = 0;
}
p4est->inspect->balance_ranges = 0.;
p4est->inspect->balance_notify = 0.;
p4est->inspect->balance_notify_allgather = 0.;
#ifdef P4EST_MPI
is_ranges_primary = p4est->inspect->use_balance_ranges;
is_ranges_active = is_ranges_primary;
is_notify_active = !is_ranges_primary;
if (p4est->inspect->use_balance_ranges_notify) {
is_ranges_active = is_notify_active = 1;
}
is_balance_verify = p4est->inspect->use_balance_verify;
#endif
}
#ifdef P4EST_MPI
/* encode and distribute the asymmetric communication pattern */
procs = NULL;
receiver_ranks = sender_ranks = NULL;
num_receivers = num_senders = 0;
receiver_ranks_ranges = sender_ranks_ranges = NULL;
num_receivers_ranges = num_senders_ranges = 0;
receiver_ranks_notify = sender_ranks_notify = NULL;
num_receivers_notify = num_senders_notify = 0;
/* determine asymmetric communication pattern by sc_ranges function */
if (is_ranges_active) {
procs = P4EST_ALLOC (int, num_procs);
receiver_ranks_ranges = P4EST_ALLOC (int, num_procs);
sender_ranks_ranges = P4EST_ALLOC (int, num_procs);
for (j = 0; j < num_procs; ++j) {
procs[j] = (int) peers[j].send_first.elem_count;
}
maxpeers = first_peer;
maxwin = last_peer;
max_ranges = p4est_num_ranges;
if (p4est->inspect != NULL) {
if (p4est->inspect->balance_max_ranges > 0 &&
p4est->inspect->balance_max_ranges < p4est_num_ranges) {
max_ranges = p4est->inspect->balance_max_ranges;
}
p4est->inspect->balance_ranges = -MPI_Wtime ();
}
nwin = sc_ranges_adaptive (p4est_package_id,
p4est->mpicomm, procs, &maxpeers, &maxwin,
max_ranges, my_ranges, &all_ranges);
twomaxwin = 2 * maxwin;
if (p4est->inspect != NULL) {
p4est->inspect->balance_ranges += MPI_Wtime ();
}
sc_ranges_decode (num_procs, rank, maxwin, all_ranges,
&num_receivers_ranges, receiver_ranks_ranges,
&num_senders_ranges, sender_ranks_ranges);
if (is_balance_verify) {
/* verification written after using sc_ranges_decode */
k = 0;
for (j = 0; j < num_procs; ++j) {
if (j == rank) {
continue;
}
if (procs[j] > 0) {
P4EST_ASSERT (k < num_receivers_ranges &&
receiver_ranks_ranges[k] == j);
++k;
}
else {
if (k < num_receivers_ranges && receiver_ranks_ranges[k] == j) {
++k;
}
}
}
P4EST_ASSERT (k == num_receivers_ranges);
/* original verification loop modified and partially redundant */
k = 0;
for (j = first_peer; j <= last_peer; ++j) {
if (j == rank) {
P4EST_ASSERT (k == num_receivers_ranges ||
receiver_ranks_ranges[k] != j);
continue;
}
peer = peers + j;
qcount = peer->send_first.elem_count;
for (i = 0; i < nwin - 1; ++i) {
if (j > my_ranges[2 * i + 1] && j < my_ranges[2 * (i + 1)]) {
break;
}
}
if (i < nwin - 1) {
P4EST_ASSERT (qcount == 0);
P4EST_ASSERT (k == num_receivers_ranges ||
receiver_ranks_ranges[k] != j);
continue;
}
P4EST_ASSERT (k < num_receivers_ranges &&
receiver_ranks_ranges[k] == j);
++k;
}
P4EST_ASSERT (k == num_receivers_ranges);
/* original verification loop of who is sending to me */
k = 0;
for (j = 0; j < num_procs; ++j) {
if (j == rank) {
P4EST_ASSERT (k == num_senders_ranges ||
sender_ranks_ranges[k] != j);
continue;
}
for (i = 0; i < maxwin; ++i) {
first_bound = all_ranges[twomaxwin * j + 2 * i];
if (first_bound == -1 || first_bound > rank) {
P4EST_ASSERT (k == num_senders_ranges ||
sender_ranks_ranges[k] != j);
break;
}
if (rank <= all_ranges[twomaxwin * j + 2 * i + 1]) {
/* processor j is sending to me */
P4EST_ASSERT (k < num_senders_ranges &&
sender_ranks_ranges[k] == j);
++k;
break;
}
}
}
P4EST_ASSERT (k == num_senders_ranges);
}
#ifdef P4EST_DEBUG
P4EST_GLOBAL_STATISTICSF ("Max peers %d ranges %d/%d\n",
maxpeers, maxwin, max_ranges);
sc_ranges_statistics (p4est_package_id, SC_LP_STATISTICS,
p4est->mpicomm, num_procs, procs,
rank, max_ranges, my_ranges);
#endif
SC_FREE (all_ranges);
P4EST_FREE (procs);
P4EST_VERBOSEF ("Peer ranges %d/%d/%d first %d last %d\n",
nwin, maxwin, max_ranges, first_peer, last_peer);
}
/* determine asymmetric communication pattern by sc_notify function */
if (is_notify_active) {
receiver_ranks_notify = P4EST_ALLOC (int, num_procs);
sender_ranks_notify = P4EST_ALLOC (int, num_procs);
num_receivers_notify = num_senders_notify = 0;
for (j = 0; j < num_procs; ++j) {
if (j != rank && peers[j].send_first.elem_count > 0) {
receiver_ranks_notify[num_receivers_notify++] = j;
}
}
if (p4est->inspect != NULL) {
p4est->inspect->balance_notify = -MPI_Wtime ();
}
mpiret = sc_notify (receiver_ranks_notify, num_receivers_notify,
sender_ranks_notify, &num_senders_notify,
p4est->mpicomm);
SC_CHECK_MPI (mpiret);
if (p4est->inspect != NULL) {
p4est->inspect->balance_notify += MPI_Wtime ();
}
/* double-check sc_notify results by sc_notify_allgather */
if (is_balance_verify) {
int *sender_ranks2, num_senders2;
sender_ranks2 = P4EST_ALLOC (int, num_procs);
if (p4est->inspect != NULL) {
p4est->inspect->balance_notify_allgather = -MPI_Wtime ();
}
mpiret = sc_notify_allgather (receiver_ranks_notify,
num_receivers_notify,
sender_ranks2, &num_senders2,
p4est->mpicomm);
SC_CHECK_MPI (mpiret);
if (p4est->inspect != NULL) {
p4est->inspect->balance_notify_allgather += MPI_Wtime ();
}
/* run verification against sc_notify_allgather */
SC_CHECK_ABORT (num_senders2 == num_senders_notify,
"Failed notify_allgather sender count");
for (j = 0; j < num_senders_notify; ++j) {
SC_CHECK_ABORT (sender_ranks2[j] == sender_ranks_notify[j],
"Failed notify_allgather sender rank");
}
P4EST_FREE (sender_ranks2);
}
}
/* verify sc_ranges and sc_notify against each other */
if (is_ranges_active && is_notify_active && is_balance_verify) {
int found_in_ranges, found_in_notify;
/* verify receiver side */
P4EST_ASSERT (num_receivers_notify <= num_receivers_ranges);
k = l = 0;
for (j = 0; j < num_procs; ++j) {
found_in_ranges = found_in_notify = 0;
if (k < num_receivers_ranges && receiver_ranks_ranges[k] == j) {
P4EST_ASSERT (j != rank);
found_in_ranges = 1;
++k;
}
if (l < num_receivers_notify && receiver_ranks_notify[l] == j) {
P4EST_ASSERT (j != rank && found_in_ranges);
found_in_notify = 1;
++l;
}
if (j != rank && peers[j].send_first.elem_count > 0) {
P4EST_ASSERT (found_in_ranges && found_in_notify);
}
if (peers[j].send_first.elem_count == 0) {
P4EST_ASSERT (!found_in_notify);
}
}
P4EST_ASSERT (k == num_receivers_ranges);
P4EST_ASSERT (l == num_receivers_notify);
/* verify sender side */
P4EST_ASSERT (num_senders_notify <= num_senders_ranges);
k = l = 0;
for (j = 0; j < num_procs; ++j) {
found_in_ranges = found_in_notify = 0;
if (k < num_senders_ranges && sender_ranks_ranges[k] == j) {
P4EST_ASSERT (j != rank);
found_in_ranges = 1;
++k;
}
if (l < num_senders_notify && sender_ranks_notify[l] == j) {
P4EST_ASSERT (j != rank && found_in_ranges);
found_in_notify = 1; /* kept for symmetry */
++l;
}
}
P4EST_ASSERT (k == num_senders_ranges);
P4EST_ASSERT (l == num_senders_notify);
}
/*
* loop over all peers and send first round of quadrants
* for intra-tree balancing, each load is contained in one tree
*/
total_send_count = total_recv_count = 0;
request_first_count = request_second_count = request_send_count = 0;
send_zero[0] = send_load[0] = recv_zero[0] = recv_load[0] = 0;
send_zero[1] = send_load[1] = recv_zero[1] = recv_load[1] = 0;
if (is_ranges_primary) {
P4EST_ASSERT (is_ranges_active);
receiver_ranks = receiver_ranks_ranges;
sender_ranks = sender_ranks_ranges;
num_receivers = num_receivers_ranges;
num_senders = num_senders_ranges;
}
else {
P4EST_ASSERT (is_notify_active);
receiver_ranks = receiver_ranks_notify;
sender_ranks = sender_ranks_notify;
num_receivers = num_receivers_notify;
num_senders = num_senders_notify;
}
P4EST_ASSERT (receiver_ranks != NULL && sender_ranks != NULL);
num_receivers_ranges = num_senders_ranges = 0;
num_receivers_notify = num_senders_notify = 0;
/* Use receiver_ranks array to send to them */
for (k = 0; k < num_receivers; ++k) {
j = receiver_ranks[k];
P4EST_ASSERT (j >= first_peer && j <= last_peer && j != rank);
peer = peers + j;
qcount = peer->send_first.elem_count;
/* first send number of quadrants to be expected */
if (qcount > 0) {
P4EST_LDEBUGF ("Balance A send %llu quadrants to %d\n",
(unsigned long long) qcount, j);
++send_load[0];
}
else {
P4EST_ASSERT (is_ranges_primary);
++send_zero[0];
}
peer->send_first_count = (int) qcount;
mpiret = MPI_Isend (&peer->send_first_count, 1, MPI_INT,
j, P4EST_COMM_BALANCE_FIRST_COUNT,
p4est->mpicomm, &send_requests_first_count[j]);
SC_CHECK_MPI (mpiret);
++request_send_count;
/* sort and send the actual quadrants and post receive for reply */
if (qcount > 0) {
sc_array_sort (&peer->send_first, p4est_quadrant_compare_piggy);
#ifdef P4EST_DEBUG
checksum = p4est_quadrant_checksum (&peer->send_first, &checkarray, 0);
P4EST_LDEBUGF ("Balance A send checksum 0x%08x to %d\n", checksum, j);
#endif /* P4EST_DEBUG */
total_send_count += qcount;
qbytes = qcount * sizeof (p4est_quadrant_t);
mpiret = MPI_Isend (peer->send_first.array, (int) qbytes, MPI_BYTE,
j, P4EST_COMM_BALANCE_FIRST_LOAD,
p4est->mpicomm, &send_requests_first_load[j]);
SC_CHECK_MPI (mpiret);
++request_send_count;
mpiret = MPI_Irecv (&peer->recv_second_count, 1, MPI_INT,
j, P4EST_COMM_BALANCE_SECOND_COUNT,
p4est->mpicomm, &requests_second[j]);
SC_CHECK_MPI (mpiret);
++request_second_count;
}
}
peer = NULL;
P4EST_FREE (receiver_ranks_ranges);
P4EST_FREE (receiver_ranks_notify);
receiver_ranks = receiver_ranks_ranges = receiver_ranks_notify = NULL;
/* find out who is sending to me and receive quadrant counts */
for (k = 0; k < num_senders; ++k) {
j = sender_ranks[k];
++request_first_count;
mpiret = MPI_Irecv (&peers[j].recv_first_count, 1, MPI_INT,
j, P4EST_COMM_BALANCE_FIRST_COUNT,
p4est->mpicomm, &requests_first[j]);
SC_CHECK_MPI (mpiret);
}
P4EST_FREE (sender_ranks_ranges);
P4EST_FREE (sender_ranks_notify);
sender_ranks = sender_ranks_ranges = sender_ranks_notify = NULL;
/* wait for quadrant counts and post receive and send for quadrants */
while (request_first_count > 0) {
mpiret = MPI_Waitsome (num_procs, requests_first,
&outcount, wait_indices, recv_statuses);
SC_CHECK_MPI (mpiret);
P4EST_ASSERT (outcount != MPI_UNDEFINED);
P4EST_ASSERT (outcount > 0);
for (i = 0; i < outcount; ++i) {
/* retrieve sender's rank */
j = wait_indices[i];
jstatus = &recv_statuses[i];
wait_indices[i] = -1;
P4EST_ASSERT (j != rank && 0 <= j && j < num_procs);
P4EST_ASSERT (requests_first[j] == MPI_REQUEST_NULL);
P4EST_ASSERT (jstatus->MPI_SOURCE == j);
/* check if we are in receiving count or load */
peer = peers + j;
P4EST_ASSERT (!peer->have_first_load);
if (!peer->have_first_count) {
/* verify message size */
P4EST_ASSERT (jstatus->MPI_TAG == P4EST_COMM_BALANCE_FIRST_COUNT);
mpiret = MPI_Get_count (jstatus, MPI_INT, &rcount);
SC_CHECK_MPI (mpiret);
SC_CHECK_ABORTF (rcount == 1, "Receive count mismatch A %d", rcount);
/* process the count information received */
peer->have_first_count = 1;
qcount = (size_t) peer->recv_first_count;
if (qcount > 0) {
/* received nonzero count, post receive for load */
P4EST_LDEBUGF ("Balance A recv %llu quadrants from %d\n",
(unsigned long long) qcount, j);
P4EST_ASSERT (peer->recv_first.elem_count == 0);
sc_array_resize (&peer->recv_first, qcount);
total_recv_count += qcount;
qbytes = qcount * sizeof (p4est_quadrant_t);
P4EST_ASSERT (requests_first[j] == MPI_REQUEST_NULL);
mpiret = MPI_Irecv (peer->recv_first.array, (int) qbytes, MPI_BYTE,
j, P4EST_COMM_BALANCE_FIRST_LOAD,
p4est->mpicomm, &requests_first[j]);
SC_CHECK_MPI (mpiret);
++recv_load[0];
}
else {
/* will not receive load, close this request */
P4EST_ASSERT (qcount == 0);
P4EST_ASSERT (requests_first[j] == MPI_REQUEST_NULL);
--request_first_count;
++recv_zero[0];
}
}
else {
/* verify received size */
P4EST_ASSERT (jstatus->MPI_TAG == P4EST_COMM_BALANCE_FIRST_LOAD);
P4EST_ASSERT (peer->recv_first_count > 0);
mpiret = MPI_Get_count (jstatus, MPI_BYTE, &rcount);
SC_CHECK_MPI (mpiret);
SC_CHECK_ABORTF (rcount ==
peer->recv_first_count *
(int) sizeof (p4est_quadrant_t),
"Receive load mismatch A %d %dx%llu", rcount,
peer->recv_first_count,
(unsigned long long) sizeof (p4est_quadrant_t));
/* received load, close this request */
peer->have_first_load = 1;
P4EST_ASSERT (requests_first[j] == MPI_REQUEST_NULL);
--request_first_count;
#ifdef P4EST_DEBUG
checksum =
p4est_quadrant_checksum (&peer->recv_first, &checkarray, 0);
P4EST_LDEBUGF ("Balance A recv checksum 0x%08x from %d\n", checksum,
j);
#endif /* P4EST_DEBUG */
/* process incoming quadrants to interleave with communication */
p4est_balance_response (p4est, peer, btype, borders);
qcount = peer->send_second.elem_count;
if (qcount > 0) {
P4EST_LDEBUGF ("Balance B send %llu quadrants to %d\n",
(unsigned long long) qcount, j);
++send_load[1];
}
else {
++send_zero[1];
}
peer->send_second_count = (int) qcount;
mpiret = MPI_Isend (&peer->send_second_count, 1, MPI_INT,
j, P4EST_COMM_BALANCE_SECOND_COUNT,
p4est->mpicomm, &send_requests_second_count[j]);
SC_CHECK_MPI (mpiret);
++request_send_count;
if (qcount > 0) {
#ifdef P4EST_DEBUG
checksum =
p4est_quadrant_checksum (&peer->send_second, &checkarray, 0);
P4EST_LDEBUGF ("Balance B send checksum 0x%08x to %d\n", checksum,
j);
#endif /* P4EST_DEBUG */
total_send_count += qcount;
qbytes = qcount * sizeof (p4est_quadrant_t);
mpiret = MPI_Isend (peer->send_second.array, (int) qbytes, MPI_BYTE,
j, P4EST_COMM_BALANCE_SECOND_LOAD,
p4est->mpicomm, &send_requests_second_load[j]);
SC_CHECK_MPI (mpiret);
++request_send_count;
}
}
}
}
for (j = 0; j < num_procs; ++j) {
P4EST_ASSERT (requests_first[j] == MPI_REQUEST_NULL);
}
#endif /* P4EST_MPI */
/* simulate send and receive with myself across tree boundaries */
peer = peers + rank;
sc_array_sort (&peer->send_first, p4est_quadrant_compare_piggy);
qcount = peer->send_first.elem_count;
peer->recv_first_count = peer->send_first_count = (int) qcount;
qbytes = qcount * sizeof (p4est_quadrant_t);
qarray = &peer->recv_first;
sc_array_resize (qarray, qcount);
memcpy (qarray->array, peer->send_first.array, qbytes);
p4est_balance_response (p4est, peer, btype, borders);
qcount = peer->send_second.elem_count;
peer->recv_second_count = peer->send_second_count = (int) qcount;
qbytes = qcount * sizeof (p4est_quadrant_t);
qarray = &peer->recv_second;
sc_array_resize (qarray, qcount);
memcpy (qarray->array, peer->send_second.array, qbytes);
#ifdef P4EST_MPI
/* receive second round appending to the same receive buffer */
while (request_second_count > 0) {
mpiret = MPI_Waitsome (num_procs, requests_second,
&outcount, wait_indices, recv_statuses);
SC_CHECK_MPI (mpiret);
P4EST_ASSERT (outcount != MPI_UNDEFINED);
P4EST_ASSERT (outcount > 0);
for (i = 0; i < outcount; ++i) {
/* retrieve sender's rank */
j = wait_indices[i];
jstatus = &recv_statuses[i];
wait_indices[i] = -1;
P4EST_ASSERT (j != rank && 0 <= j && j < num_procs);
P4EST_ASSERT (requests_second[j] == MPI_REQUEST_NULL);
P4EST_ASSERT (jstatus->MPI_SOURCE == j);
/* check if we are in receiving count or load */
peer = peers + j;
P4EST_ASSERT (!peer->have_second_load);
if (!peer->have_second_count) {
/* verify message size */
P4EST_ASSERT (jstatus->MPI_TAG == P4EST_COMM_BALANCE_SECOND_COUNT);
mpiret = MPI_Get_count (jstatus, MPI_INT, &rcount);
SC_CHECK_MPI (mpiret);
SC_CHECK_ABORTF (rcount == 1, "Receive count mismatch B %d", rcount);
/* process the count information received */
peer->have_second_count = 1;
qcount = (size_t) peer->recv_second_count;
if (qcount > 0) {
/* received nonzero count, post receive for load */
P4EST_LDEBUGF ("Balance B recv %llu quadrants from %d\n",
(unsigned long long) qcount, j);
P4EST_ASSERT (peer->recv_second.elem_count == 0);
sc_array_resize (&peer->recv_second, qcount);
total_recv_count += qcount;
qbytes = qcount * sizeof (p4est_quadrant_t);
P4EST_ASSERT (requests_second[j] == MPI_REQUEST_NULL);
mpiret = MPI_Irecv (peer->recv_second.array, (int) qbytes,
MPI_BYTE, j, P4EST_COMM_BALANCE_SECOND_LOAD,
p4est->mpicomm, &requests_second[j]);
SC_CHECK_MPI (mpiret);
++recv_load[1];
}
else {
/* will not receive load, close this request */
P4EST_ASSERT (qcount == 0);
P4EST_ASSERT (requests_second[j] == MPI_REQUEST_NULL);
--request_second_count;
++recv_zero[1];
}
}
else {
/* verify received size */
P4EST_ASSERT (jstatus->MPI_TAG == P4EST_COMM_BALANCE_SECOND_LOAD);
P4EST_ASSERT (peer->recv_second_count > 0);
mpiret = MPI_Get_count (jstatus, MPI_BYTE, &rcount);
SC_CHECK_MPI (mpiret);
SC_CHECK_ABORTF (rcount ==
peer->recv_second_count *
(int) sizeof (p4est_quadrant_t),
"Receive load mismatch B %d %dx%llu", rcount,
peer->recv_second_count,
(unsigned long long) sizeof (p4est_quadrant_t));
/* received load, close this request */
peer->have_second_load = 1;
P4EST_ASSERT (requests_second[j] == MPI_REQUEST_NULL);
--request_second_count;
#ifdef P4EST_DEBUG
checksum =
p4est_quadrant_checksum (&peer->recv_second, &checkarray, 0);
P4EST_LDEBUGF ("Balance B recv checksum 0x%08x from %d\n", checksum,
j);
#endif /* P4EST_DEBUG */
}
}
}
for (j = 0; j < num_procs; ++j) {
P4EST_ASSERT (requests_second[j] == MPI_REQUEST_NULL);
}
/* print buffer statistics */
P4EST_VERBOSEF ("first send Z %d L %d recv Z %d L %d\n",
send_zero[0], send_load[0], recv_zero[0], recv_load[0]);
P4EST_VERBOSEF ("second send Z %d L %d recv Z %d L %d\n",
send_zero[1], send_load[1], recv_zero[1], recv_load[1]);
P4EST_VERBOSEF ("total send %d recv %d\n", total_send_count,
total_recv_count);
for (j = 0; j < num_procs; ++j) {
peer = peers + j;
if (peer->send_first.elem_count > 0 || peer->recv_first_count > 0 ||
peer->send_second.elem_count > 0 || peer->recv_second_count > 0) {
P4EST_VERBOSEF ("peer %d first S %llu R %d second S %llu R %d\n",
j, (unsigned long long) peer->send_first.elem_count,
peer->recv_first_count,
(unsigned long long) peer->send_second.elem_count,
peer->recv_second_count);
}
}
#endif /* P4EST_MPI */
/* end balance_comm, start balance_B */
if (p4est->inspect != NULL) {
p4est->inspect->balance_comm += MPI_Wtime ();
p4est->inspect->balance_B = -MPI_Wtime ();
p4est->inspect->balance_B_count_in = 0;
p4est->inspect->balance_B_count_out = 0;
p4est->inspect->use_B = 1;
#ifdef P4EST_MPI
for (k = 0; k < 2; ++k) {
p4est->inspect->balance_zero_sends[k] = send_zero[k];
p4est->inspect->balance_zero_receives[k] = recv_zero[k];
}
#endif
}
/* merge received quadrants */
for (j = 0; j < num_procs; ++j) {
size_t fcount;
/* access peer information */
peer = peers + j;
fcount = peer->recv_first.elem_count;
qcount = fcount + peer->recv_second.elem_count;
P4EST_ASSERT (peer->send_first_count ==
(int) peer->send_first.elem_count);
P4EST_ASSERT (peer->send_second_count ==
(int) peer->send_second.elem_count);
if (p4est->inspect == NULL || !p4est->inspect->use_overlap_new) {
P4EST_ASSERT (peer->recv_first_count ==
(int) peer->recv_first.elem_count);
}
P4EST_ASSERT (peer->recv_second_count ==
(int) peer->recv_second.elem_count);
if (qcount == 0) {
continue;
}
/* merge received quadrants into correct tree */
for (zz = 0; zz < qcount; ++zz) {
s = zz < fcount ? p4est_quadrant_array_index (&peer->recv_first, zz) :
p4est_quadrant_array_index (&peer->recv_second, zz - fcount);
P4EST_ASSERT (p4est_quadrant_is_extended (s));
qtree = s->p.piggy2.which_tree;
if (qtree < first_tree || qtree > last_tree) {
/* this is a corner/edge quadrant from the second pass of balance */
#ifdef P4EST_DEBUG
if (p4est->inspect == NULL || !p4est->inspect->use_overlap_new) {
P4EST_ASSERT (zz >= (size_t) peer->recv_first_count);
P4EST_ASSERT (0 <= qtree && qtree < conn->num_trees);
face_axis[0] = (s->x < 0 || s->x >= rh);
face_axis[1] = (s->y < 0 || s->y >= rh);
#ifndef P4_TO_P8
face_axis[2] = 0;
#else
face_axis[2] = (s->z < 0 || s->z >= rh);
#endif
P4EST_ASSERT ((face_axis[0] && face_axis[1]) ||
(face_axis[0] && face_axis[2]) ||
(face_axis[1] && face_axis[2]));
}
#endif
continue;
}
if (borders == NULL) {
tree = p4est_tree_array_index (p4est->trees, qtree);
q = p4est_quadrant_array_push (&tree->quadrants);
*q = *s;
++tree->quadrants_per_level[q->level];
tree->maxlevel = (int8_t) SC_MAX (tree->maxlevel, q->level);
++p4est->local_num_quadrants;
p4est_quadrant_init_data (p4est, qtree, q, init_fn);
}
else {
qarray = (sc_array_t *) sc_array_index (borders,
(int) (qtree - first_tree));
q = p4est_quadrant_array_push (qarray);
*q = *s;
}
}
}
/* rebalance and clamp result back to original tree boundaries */
p4est->local_num_quadrants = 0;
for (nt = first_tree; nt <= last_tree; ++nt) {
/* check if we are the only processor in an isolated tree */
tree = p4est_tree_array_index (p4est->trees, nt);
tree->quadrants_offset = p4est->local_num_quadrants;
tquadrants = &tree->quadrants;
treecount = tquadrants->elem_count;
if (!(tree_flags[nt] & fully_owned_flag) ||
(tree_flags[nt] & any_face_flag)) {
/* we have most probably received quadrants, run sort and balance */
if (borders == NULL) {
sc_array_sort (tquadrants, p4est_quadrant_compare);
if (p4est->inspect != NULL && p4est->inspect->use_overlap_new) {
p4est_linearize_tree (p4est, tree);
}
p4est_balance_subtree (p4est, btype, nt, init_fn);
}
else {
/* balance the border, add it back into the tree, and linearize */
p4est_balance_border (p4est, btype, nt, init_fn, borders);
}
P4EST_VERBOSEF ("Balance tree %lld B %llu to %llu\n",
(long long) nt,
(unsigned long long) treecount,
(unsigned long long) tquadrants->elem_count);
}
p4est->local_num_quadrants += tquadrants->elem_count;
tquadrants = NULL; /* safeguard */
}
if (last_tree >= 0) {
for (; nt < conn->num_trees; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
tree->quadrants_offset = p4est->local_num_quadrants;
}
}
/* end balance_B */
if (p4est->inspect != NULL) {
p4est->inspect->balance_B += MPI_Wtime ();
}
#ifdef P4EST_MPI
/* wait for all send operations */
if (request_send_count > 0) {
mpiret = MPI_Waitall (4 * num_procs,
send_requests_first_count, MPI_STATUSES_IGNORE);
SC_CHECK_MPI (mpiret);
}
/* compute global sum of send and receive counts */
#ifdef P4EST_DEBUG
gtotal[0] = gtotal[1] = 0;
ltotal[0] = (p4est_gloidx_t) total_send_count;
ltotal[1] = (p4est_gloidx_t) total_recv_count;
mpiret = MPI_Reduce (ltotal, gtotal, 2, P4EST_MPI_GLOIDX,
MPI_SUM, 0, p4est->mpicomm);
SC_CHECK_MPI (mpiret);
P4EST_GLOBAL_STATISTICSF ("Global number of shipped quadrants %lld\n",
(long long) gtotal[0]);
P4EST_ASSERT (rank != 0 || gtotal[0] == gtotal[1]);
#endif /* P4EST_DEBUG */
#endif /* P4EST_MPI */
/* loop over all local trees to finalize balance */
all_outcount = 0;
for (nt = first_tree; nt <= last_tree; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
all_outcount += tree->quadrants.elem_count;
/* final log message for this tree */
P4EST_VERBOSEF ("Done balance tree %lld now %llu\n", (long long) nt,
(unsigned long long) tree->quadrants.elem_count);
}
/* cleanup temporary storage */
P4EST_FREE (tree_flags);
for (j = 0; j < num_procs; ++j) {
peer = peers + j;
sc_array_reset (&peer->send_first);
sc_array_reset (&peer->send_second);
sc_array_reset (&peer->recv_first);
sc_array_reset (&peer->recv_second);
}
P4EST_FREE (peers);
if (borders != NULL) {
for (zz = 0; zz < localcount; zz++) {
qarray = (sc_array_t *) sc_array_index (borders, zz);
sc_array_reset (qarray);
}
sc_array_destroy (borders);
}
#ifdef P4_TO_P8
sc_array_reset (eta);
#endif
sc_array_reset (cta);
#ifdef P4EST_MPI
P4EST_FREE (requests_first); /* includes allocation for requests_second */
P4EST_FREE (recv_statuses);
P4EST_FREE (wait_indices);
#ifdef P4EST_DEBUG
sc_array_reset (&checkarray);
#endif /* P4EST_DEBUG */
#endif /* P4EST_MPI */
/* compute global number of quadrants */
p4est_comm_count_quadrants (p4est);
/* some sanity checks */
P4EST_ASSERT ((p4est_locidx_t) all_outcount == p4est->local_num_quadrants);
P4EST_ASSERT (all_outcount >= all_incount);
if (p4est->user_data_pool != NULL) {
P4EST_ASSERT (data_pool_size + all_outcount - all_incount ==
p4est->user_data_pool->elem_count);
}
P4EST_ASSERT (p4est_is_valid (p4est));
P4EST_ASSERT (p4est_is_balanced (p4est, btype));
P4EST_VERBOSEF ("Balance skipped %lld\n", (long long) skipped);
P4EST_GLOBAL_PRODUCTIONF ("Done " P4EST_STRING
"_balance with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
}
void
p4est_partition (p4est_t * p4est, p4est_weight_t weight_fn)
{
p4est_partition_ext (p4est, 0, weight_fn);
}
void
p4est_partition_ext (p4est_t * p4est, int partition_for_coarsening,
p4est_weight_t weight_fn)
{
p4est_gloidx_t global_shipped = 0;
const p4est_gloidx_t global_num_quadrants = p4est->global_num_quadrants;
#ifdef P4EST_MPI
int mpiret;
int low_source, high_source;
const int num_procs = p4est->mpisize;
const int rank = p4est->mpirank;
const p4est_topidx_t first_tree = p4est->first_local_tree;
const p4est_topidx_t last_tree = p4est->last_local_tree;
const p4est_locidx_t local_num_quadrants = p4est->local_num_quadrants;
int i, p;
int send_lowest, send_highest;
int num_sends, rcount, base_index;
size_t lz;
ssize_t lowers;
p4est_topidx_t nt;
p4est_locidx_t kl, qlocal;
p4est_locidx_t *num_quadrants_in_proc;
p4est_gloidx_t prev_quadrant, next_quadrant;
p4est_gloidx_t send_index, recv_low, recv_high, qcount;
p4est_gloidx_t *send_array;
int64_t weight, weight_sum;
int64_t cut, my_lowcut, my_highcut;
int64_t *local_weights; /* cumulative weights by quadrant */
int64_t *global_weight_sums;
p4est_quadrant_t *q;
p4est_tree_t *tree;
MPI_Request *send_requests, recv_requests[2];
MPI_Status recv_statuses[2];
p4est_locidx_t num_corrected;
#endif /* P4EST_MPI */
P4EST_ASSERT (p4est_is_valid (p4est));
P4EST_GLOBAL_PRODUCTIONF
("Into " P4EST_STRING
"_partition with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
/* this function does nothing in a serial setup */
if (p4est->mpisize == 1) {
P4EST_GLOBAL_PRODUCTION ("Done " P4EST_STRING "_partition no shipping\n");
return;
}
#ifdef P4EST_MPI
/* allocate new quadrant distribution counts */
num_quadrants_in_proc = P4EST_ALLOC (p4est_locidx_t, num_procs);
if (weight_fn == NULL) {
/* Divide up the quadants equally */
for (p = 0, next_quadrant = 0; p < num_procs; ++p) {
prev_quadrant = next_quadrant;
next_quadrant = (global_num_quadrants * (p + 1)) / num_procs;
qcount = next_quadrant - prev_quadrant;
P4EST_ASSERT (0 <= qcount
&& qcount <= (p4est_gloidx_t) P4EST_LOCIDX_MAX);
num_quadrants_in_proc[p] = (p4est_locidx_t) (qcount);
}
}
else {
/* do a weighted partition */
local_weights = P4EST_ALLOC (int64_t, local_num_quadrants + 1);
global_weight_sums = P4EST_ALLOC (int64_t, num_procs + 1);
P4EST_VERBOSEF ("local quadrant count %lld\n",
(long long) local_num_quadrants);
/* linearly sum weights across all trees */
kl = 0;
local_weights[0] = 0;
for (nt = first_tree; nt <= last_tree; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
for (lz = 0; lz < tree->quadrants.elem_count; ++lz, ++kl) {
q = p4est_quadrant_array_index (&tree->quadrants, lz);
weight = (int64_t) weight_fn (p4est, nt, q);
P4EST_ASSERT (weight >= 0);
local_weights[kl + 1] = local_weights[kl] + weight;
}
}
P4EST_ASSERT (kl == local_num_quadrants);
weight_sum = local_weights[local_num_quadrants];
P4EST_VERBOSEF ("local weight sum %lld\n", (long long) weight_sum);
/* distribute local weight sums */
global_weight_sums[0] = 0;
mpiret = MPI_Allgather (&weight_sum, 1, MPI_LONG_LONG_INT,
&global_weight_sums[1], 1, MPI_LONG_LONG_INT,
p4est->mpicomm);
SC_CHECK_MPI (mpiret);
/* adjust all arrays to reflect the global weight */
for (i = 0; i < num_procs; ++i) {
global_weight_sums[i + 1] += global_weight_sums[i];
}
if (rank > 0) {
weight_sum = global_weight_sums[rank];
for (kl = 0; kl <= local_num_quadrants; ++kl) {
local_weights[kl] += weight_sum;
}
}
P4EST_ASSERT (local_weights[0] == global_weight_sums[rank]);
P4EST_ASSERT (local_weights[local_num_quadrants] ==
global_weight_sums[rank + 1]);
weight_sum = global_weight_sums[num_procs];
if (rank == 0) {
for (i = 0; i <= num_procs; ++i) {
P4EST_GLOBAL_VERBOSEF ("Global weight sum [%d] %lld\n",
i, (long long) global_weight_sums[i]);
}
}
/* if all quadrants have zero weight we do nothing */
if (weight_sum == 0) {
P4EST_FREE (local_weights);
P4EST_FREE (global_weight_sums);
P4EST_FREE (num_quadrants_in_proc);
P4EST_GLOBAL_PRODUCTION ("Done " P4EST_STRING
"_partition no shipping\n");
return;
}
/* determine processor ids to send to */
send_lowest = num_procs;
send_highest = 0;
for (i = 1; i <= num_procs; ++i) {
cut = (weight_sum * i) / num_procs;
if (global_weight_sums[rank] < cut &&
cut <= global_weight_sums[rank + 1]) {
send_lowest = SC_MIN (send_lowest, i);
send_highest = SC_MAX (send_highest, i);
}
}
/*
* send low cut to send_lowest..send_highest
* and high cut to send_lowest-1..send_highest-1
*/
P4EST_LDEBUGF ("my send peers %d %d\n", send_lowest, send_highest);
num_sends = 2 * (send_highest - send_lowest + 1);
if (num_sends <= 0) {
num_sends = 0;
send_requests = NULL;
send_array = NULL;
}
else {
send_requests = P4EST_ALLOC (MPI_Request, num_sends);
send_array = P4EST_ALLOC (p4est_gloidx_t, num_sends);
lowers = 0;
for (i = send_lowest; i <= send_highest; ++i) {
base_index = 2 * (i - send_lowest);
if (i < num_procs) {
/* do binary search in the weight array */
lowers = sc_search_lower_bound64 ((weight_sum * i) / num_procs,
local_weights,
(size_t) local_num_quadrants + 1,
(size_t) lowers);
P4EST_ASSERT (lowers > 0
&& (p4est_locidx_t) lowers <= local_num_quadrants);
/* send low bound */
send_index = send_array[base_index + 1] =
(p4est_gloidx_t) lowers + p4est->global_first_quadrant[rank];
P4EST_LDEBUGF ("send A %d %d %d index %lld base %d to %d\n",
send_lowest, i, send_highest,
(long long) send_index, base_index + 1, i);
mpiret =
MPI_Isend (&send_array[base_index + 1], 1, P4EST_MPI_GLOIDX, i,
P4EST_COMM_PARTITION_WEIGHTED_LOW, p4est->mpicomm,
&send_requests[base_index + 1]);
SC_CHECK_MPI (mpiret);
}
else {
lowers = 0;
send_index = global_num_quadrants;
send_requests[base_index + 1] = MPI_REQUEST_NULL;
send_array[base_index + 1] = -1;
}
/* send high bound */
send_array[base_index] = send_index;
P4EST_LDEBUGF ("send B %d %d %d index %lld base %d to %d\n",
send_lowest, i, send_highest,
(long long) send_index, base_index, i - 1);
mpiret = MPI_Isend (&send_array[base_index], 1, P4EST_MPI_GLOIDX,
i - 1, P4EST_COMM_PARTITION_WEIGHTED_HIGH,
p4est->mpicomm, &send_requests[base_index]);
SC_CHECK_MPI (mpiret);
}
}
/* determine processor ids to receive from and post irecv */
i = 0;
my_lowcut = (weight_sum * rank) / num_procs;
if (my_lowcut == 0) {
recv_low = 0;
recv_requests[0] = MPI_REQUEST_NULL;
low_source = -1;
}
else {
for (; i < num_procs; ++i) {
if (global_weight_sums[i] < my_lowcut &&
my_lowcut <= global_weight_sums[i + 1]) {
P4EST_LDEBUGF ("recv A from %d\n", i);
mpiret = MPI_Irecv (&recv_low, 1, P4EST_MPI_GLOIDX, i,
P4EST_COMM_PARTITION_WEIGHTED_LOW,
p4est->mpicomm, &recv_requests[0]);
SC_CHECK_MPI (mpiret);
break;
}
}
P4EST_ASSERT (i < num_procs);
low_source = i;
}
my_highcut = (weight_sum * (rank + 1)) / num_procs;
if (my_highcut == 0) {
recv_high = 0;
recv_requests[1] = MPI_REQUEST_NULL;
high_source = -1;
}
else {
for (; i < num_procs; ++i) {
if (global_weight_sums[i] < my_highcut &&
my_highcut <= global_weight_sums[i + 1]) {
P4EST_LDEBUGF ("recv B from %d\n", i);
mpiret = MPI_Irecv (&recv_high, 1, P4EST_MPI_GLOIDX, i,
P4EST_COMM_PARTITION_WEIGHTED_HIGH,
p4est->mpicomm, &recv_requests[1]);
SC_CHECK_MPI (mpiret);
break;
}
}
P4EST_ASSERT (i < num_procs);
high_source = i;
}
P4EST_LDEBUGF ("my recv peers %d %d cuts %lld %lld\n",
low_source, high_source,
(long long) my_lowcut, (long long) my_highcut);
/* free temporary memory */
P4EST_FREE (local_weights);
P4EST_FREE (global_weight_sums);
/* wait for sends and receives to complete */
if (num_sends > 0) {
mpiret = MPI_Waitall (num_sends, send_requests, MPI_STATUSES_IGNORE);
SC_CHECK_MPI (mpiret);
P4EST_FREE (send_requests);
P4EST_FREE (send_array);
}
mpiret = MPI_Waitall (2, recv_requests, recv_statuses);
SC_CHECK_MPI (mpiret);
if (my_lowcut != 0) {
SC_CHECK_ABORT (recv_statuses[0].MPI_SOURCE == low_source,
"Wait low source");
SC_CHECK_ABORT (recv_statuses[0].MPI_TAG ==
P4EST_COMM_PARTITION_WEIGHTED_LOW, "Wait low tag");
mpiret = MPI_Get_count (&recv_statuses[0], P4EST_MPI_GLOIDX, &rcount);
SC_CHECK_MPI (mpiret);
SC_CHECK_ABORTF (rcount == 1, "Wait low count %d", rcount);
}
if (my_highcut != 0) {
SC_CHECK_ABORT (recv_statuses[1].MPI_SOURCE == high_source,
"Wait high source");
SC_CHECK_ABORT (recv_statuses[1].MPI_TAG ==
P4EST_COMM_PARTITION_WEIGHTED_HIGH, "Wait high tag");
mpiret = MPI_Get_count (&recv_statuses[1], P4EST_MPI_GLOIDX, &rcount);
SC_CHECK_MPI (mpiret);
SC_CHECK_ABORTF (rcount == 1, "Wait high count %d", rcount);
}
/* communicate the quadrant ranges */
qcount = recv_high - recv_low;
P4EST_LDEBUGF ("weighted partition count %lld\n", (long long) qcount);
P4EST_ASSERT (qcount >= 0 && qcount <= (p4est_gloidx_t) P4EST_LOCIDX_MAX);
qlocal = (p4est_locidx_t) qcount;
mpiret = MPI_Allgather (&qlocal, 1, P4EST_MPI_LOCIDX,
num_quadrants_in_proc, 1, P4EST_MPI_LOCIDX,
p4est->mpicomm);
SC_CHECK_MPI (mpiret);
#if(0)
/* run through the count array and repair zero ranges */
for (i = 0; i < num_procs; ++i) {
if (num_quadrants_in_proc[i] == 0) {
for (p = i - 1; p >= 0; --p) {
P4EST_ASSERT (num_quadrants_in_proc[p] > 0);
if (num_quadrants_in_proc[p] > 1) {
--num_quadrants_in_proc[p];
++num_quadrants_in_proc[i];
break;
}
}
if (p < 0) {
for (p = i + 1; p < num_procs; ++p) {
P4EST_ASSERT (num_quadrants_in_proc[p] >= 0);
if (num_quadrants_in_proc[p] > 1) {
--num_quadrants_in_proc[p];
++num_quadrants_in_proc[i];
break;
}
}
P4EST_ASSERT (p < num_procs);
}
}
}
#endif
}
/* correct partition */
if (partition_for_coarsening) {
num_corrected =
p4est_partition_for_coarsening (p4est, num_quadrants_in_proc);
P4EST_GLOBAL_INFOF
("Designated partition for coarsening %lld quadrants moved\n",
(long long) num_corrected);
}
/* run the partition algorithm with proper quadrant counts */
global_shipped = p4est_partition_given (p4est, num_quadrants_in_proc);
P4EST_FREE (num_quadrants_in_proc);
/* check validity of the p4est */
P4EST_ASSERT (p4est_is_valid (p4est));
#endif /* P4EST_MPI */
P4EST_GLOBAL_PRODUCTIONF
("Done " P4EST_STRING "_partition shipped %lld quadrants %.3g%%\n",
(long long) global_shipped,
global_shipped * 100. / global_num_quadrants);
}
#ifdef P4EST_MPI
static p4est_locidx_t
p4est_partition_for_coarsening (p4est_t * p4est,
p4est_locidx_t * num_quadrants_in_proc)
{
int num_procs = p4est->mpisize;
int rank = p4est->mpirank;
int mpiret;
p4est_gloidx_t global_num_quadrants = p4est->global_num_quadrants;
int i, send_lowest, send_highest, num_sends;
int parent_index;
p4est_quadrant_t *q;
p4est_tree_t *tree;
p4est_locidx_t num_quadrants_in_tree;
p4est_topidx_t it, tree_index;
p4est_gloidx_t iq, quad_id_near_cut;
p4est_gloidx_t min_quad_id, max_quad_id;
int8_t quad_near_cut_level;
p4est_gloidx_t *partition_now = p4est->global_first_quadrant;
p4est_gloidx_t *partition_new;
p4est_quadrant_t *parent_send;
MPI_Request *send_requests;
MPI_Request *receive_requests;
int receive_lowest, receive_highest, num_receives;
int process_with_cut, process_with_cut_recv_id;
p4est_quadrant_t *parent_receive;
int *receive_process;
int *correction, correction_local;
int current_proc, next_proc;
p4est_locidx_t num_moved_quadrants;
/* create array with first quadrants of new partition */
partition_new = P4EST_ALLOC (p4est_gloidx_t, num_procs + 1);
partition_new[0] = 0;
for (i = 1; i < num_procs; i++) { /* loop over all processes */
partition_new[i] = partition_new[i - 1] + num_quadrants_in_proc[i - 1];
}
partition_new[num_procs] = global_num_quadrants;
/* BEGIN: send */
if (partition_now[rank] < partition_now[rank + 1]) {
/* if this process has quadrants */
/* determine number and min/max process ids to send to */
num_sends = 0; /* number of sends */
send_lowest = num_procs; /* lowest process id */
send_highest = 0; /* highest process id */
for (i = 1; i < num_procs; i++) {
/* loop over all processes (without first) */
if (partition_new[i] < partition_new[i + 1] &&
partition_now[rank] <= partition_new[i] + P4EST_CHILDREN - 2 &&
partition_new[i] - P4EST_CHILDREN + 1 < partition_now[rank + 1]) {
/* if this process has relevant quadrants for process `i` */
num_sends++;
send_lowest = SC_MIN (send_lowest, i);
send_highest = SC_MAX (send_highest, i);
}
}
}
else {
/* set number of messages to send */
num_sends = 0;
}
if (num_sends > 0) { /* if this process sends messages */
/* allocate send messages */
send_requests = P4EST_ALLOC (MPI_Request, num_sends);
parent_send = P4EST_ALLOC (p4est_quadrant_t, num_sends);
/* array index of send messages */
parent_index = 0;
for (i = send_lowest; i <= send_highest; i++) {
/* loop over all process candidates to send to */
if (!(partition_new[i] < partition_new[i + 1] &&
partition_now[rank] <= partition_new[i] + P4EST_CHILDREN - 2 &&
partition_new[i] - P4EST_CHILDREN + 1 <
partition_now[rank + 1])) {
/* if this process has no relevant quadrants for process `i` */
continue;
}
/* get nearest quadrant `quad_id_near_cut` to cut `partition_new[i]` */
if (partition_now[rank] <= partition_new[i] &&
partition_new[i] < partition_now[rank + 1]) {
/* if cut is owned by this process */
quad_id_near_cut = partition_new[i];
}
else {
if (abs (partition_new[i] - partition_now[rank]) <
abs (partition_new[i] - partition_now[rank + 1] + 1)) {
quad_id_near_cut = partition_now[rank];
}
else {
quad_id_near_cut = partition_now[rank + 1] - 1;
}
}
/* get tree `tree` of quadrant `quad_id_near_cut` */
num_quadrants_in_tree = partition_now[rank + 1] - partition_now[rank];
for (it = p4est->first_local_tree; it <= p4est->last_local_tree; it++) {
/* loop over all local trees */
tree = p4est_tree_array_index (p4est->trees, it);
if (tree->quadrants_offset <= quad_id_near_cut - partition_now[rank]) {
tree_index = it;
}
else {
num_quadrants_in_tree = tree->quadrants_offset;
break;
}
}
tree = p4est_tree_array_index (p4est->trees, tree_index);
num_quadrants_in_tree -= tree->quadrants_offset;
/* get quadrant with index `quad_id_near_cut` */
q = p4est_quadrant_array_index (&tree->quadrants,
quad_id_near_cut - partition_now[rank] -
tree->quadrants_offset);
/* get level of quadrant near cut */
quad_near_cut_level = q->level;
if (quad_near_cut_level > 0) {
/* if quadrant near cut is not root of tree, i.e. level is not zero */
/* get parent of quadrant near cut */
p4est_quadrant_parent (q, &parent_send[parent_index]);
/* get min quadrant with same parent */
min_quad_id = quad_id_near_cut;
for (iq = quad_id_near_cut;
iq >= SC_MAX (partition_now[rank] + tree->quadrants_offset,
partition_new[i] - P4EST_CHILDREN + 1); iq--) {
/* loop over eligible quadrants */
/* get quadrant with index `iq` */
q = p4est_quadrant_array_index (&tree->quadrants,
iq - partition_now[rank] -
tree->quadrants_offset);
/* check quadrant `iq` */
if (q->level == quad_near_cut_level) { /* if same level */
if (p4est_quadrant_is_parent (&parent_send[parent_index], q)) {
/* if same parent */
min_quad_id = iq;
}
else {
break;
}
}
else {
break;
}
}
/* get max quadrant with same parent */
max_quad_id = quad_id_near_cut;
for (iq = quad_id_near_cut;
iq <=
SC_MIN (partition_now[rank] + tree->quadrants_offset +
num_quadrants_in_tree - 1,
partition_new[i] + P4EST_CHILDREN - 2); iq++) {
/* loop over eligible quadrants */
/* get quadrant `iq` */
q = p4est_quadrant_array_index (&tree->quadrants,
iq - partition_now[rank] -
tree->quadrants_offset);
/* check quadrant `iq` */
if (q->level == quad_near_cut_level) { /* if same level */
if (p4est_quadrant_is_parent (&parent_send[parent_index], q)) {
/* if same parent */
max_quad_id = iq;
}
else {
break;
}
}
else {
break;
}
}
/* write tree */
parent_send[parent_index].p.piggy3.which_tree = tree_index;
if (quad_id_near_cut == partition_new[i]) {
/* if this process has cut */
/* encode number of quadrants with same parent before and after
* `partition_new[i]` into one integer */
parent_send[parent_index].p.piggy3.local_num =
(partition_new[i] - min_quad_id) * P4EST_CHILDREN +
(max_quad_id - partition_new[i]);
}
else {
/* write number of quadrants with same parent */
parent_send[parent_index].p.piggy3.local_num =
max_quad_id - min_quad_id + 1;
}
/* MPI send: parent */
mpiret = MPI_Isend (&parent_send[parent_index],
sizeof (p4est_quadrant_t), MPI_BYTE,
i,
P4EST_COMM_PARTITION_CORRECTION, p4est->mpicomm,
&send_requests[parent_index]);
SC_CHECK_MPI (mpiret);
}
else { /* if quadrant near cut is root of tree, i.e. level is zero */
/* set parent as tree root `q` */
parent_send[parent_index].level = q->level;
parent_send[parent_index].x = q->x;
parent_send[parent_index].y = q->y;
#ifdef P4_TO_P8
parent_send[parent_index].z = q->z;
#endif
/* write tree */
parent_send[parent_index].p.piggy3.which_tree = tree_index;
/* write number of quadrants with same "parent" */
parent_send[parent_index].p.piggy3.local_num = 1;
/* MPI send: root of tree */
mpiret = MPI_Isend (&parent_send[parent_index],
sizeof (p4est_quadrant_t), MPI_BYTE,
i,
P4EST_COMM_PARTITION_CORRECTION, p4est->mpicomm,
&send_requests[parent_index]);
SC_CHECK_MPI (mpiret);
}
/* increment parent index */
parent_index++;
}
}
/* END: send */
/* BEGIN: receive */
if (rank != 0 && partition_new[rank] < partition_new[rank + 1]) {
/* if this process should get quadrants */
/* determine process ids to receive from */
num_receives = 0; /* number of receives */
receive_lowest = num_procs; /* lowest process id */
receive_highest = 0; /* highest process id */
for (i = 0; i < num_procs; i++) { /* loop over all processes */
if (partition_now[i] < partition_now[i + 1] &&
partition_now[i] <= partition_new[rank] + P4EST_CHILDREN - 2 &&
partition_new[rank] - P4EST_CHILDREN + 1 < partition_now[i + 1]) {
/* if process `i` has relevant quadrants for this process */
num_receives++;
receive_lowest = SC_MIN (receive_lowest, i);
receive_highest = SC_MAX (receive_highest, i);
if (partition_now[i] <= partition_new[rank] &&
partition_new[rank] < partition_now[i + 1]) {
/* if cut is owned by process `i` */
/* process id that sends parent of cut quadrant */
process_with_cut = i;
/* array index of receive messages of process with cut */
process_with_cut_recv_id = num_receives - 1;
}
}
}
}
else {
/* set number of messages to receive */
num_receives = 0;
/* set correction */
correction_local = 0;
}
if (num_receives > 0) { /* if this process receives messages */
/* allocate receive messages */
receive_requests = P4EST_ALLOC (MPI_Request, num_receives);
parent_receive = P4EST_ALLOC (p4est_quadrant_t, num_receives);
receive_process = P4EST_ALLOC (int, num_receives);
/* array index of receive messages */
parent_index = 0;
for (i = receive_lowest; i <= receive_highest; i++) {
/* loop over all process candidates to receive from */
if (!(partition_now[i] < partition_now[i + 1] &&
partition_now[i] <= partition_new[rank] + P4EST_CHILDREN - 2 &&
partition_new[rank] - P4EST_CHILDREN + 1 <
partition_now[i + 1])) {
/* if process `i` has no relevant quadrants for this process */
continue;
}
/* store process index */
receive_process[parent_index] = i;
/* MPI receive */
mpiret = MPI_Irecv (&parent_receive[parent_index],
sizeof (p4est_quadrant_t), MPI_BYTE,
i,
P4EST_COMM_PARTITION_CORRECTION, p4est->mpicomm,
&receive_requests[parent_index]);
SC_CHECK_MPI (mpiret);
/* increment parent index */
parent_index++;
}
}
/* END: receive */
/* BEGIN: wait for MPI communication to complete */
if (num_sends > 0) {
/* wait for sends to complete */
mpiret = MPI_Waitall (num_sends, send_requests, MPI_STATUSES_IGNORE);
SC_CHECK_MPI (mpiret);
/* free send memory */
P4EST_FREE (parent_send);
P4EST_FREE (send_requests);
}
if (num_receives > 0) {
/* wait for receives to complete */
mpiret =
MPI_Waitall (num_receives, receive_requests, MPI_STATUSES_IGNORE);
SC_CHECK_MPI (mpiret);
/* free receive memory */
P4EST_FREE (receive_requests);
}
/* END: wait for MPI communication to complete */
/* BEGIN: compute correction with received quadrants */
if (num_receives > 0) {
/* if this process received quadrants */
min_quad_id = partition_new[rank]; /* min quadrant id with same parent */
max_quad_id = partition_new[rank]; /* max quadrant id with same parent */
for (i = 0; i < num_receives; i++) {
/* loop over all received (parent or tree root) quadrants */
if (parent_receive[i].p.piggy3.which_tree ==
parent_receive[process_with_cut_recv_id].p.piggy3.which_tree
&&
p4est_quadrant_is_equal (&parent_receive[i],
&parent_receive[process_with_cut_recv_id]
)) {
/* if trees and parents are equal */
/* decrease/increase min/max quadrant with same parent */
if (receive_process[i] < process_with_cut) {
/* if before process with cut */
/* decrease min quadrant */
min_quad_id -= parent_receive[i].p.piggy3.local_num;
}
else if (receive_process[i] > process_with_cut) {
/* if after process with cut */
/* increase max quadrant */
max_quad_id += parent_receive[i].p.piggy3.local_num;
}
else {
/* decrease min quadrant */
min_quad_id -=
parent_receive[i].p.piggy3.local_num / P4EST_CHILDREN;
/* increase max quadrant */
max_quad_id +=
parent_receive[i].p.piggy3.local_num % P4EST_CHILDREN;
}
}
}
/* compute correction */
correction_local =
(int) p4est_partition_correction (partition_new, num_procs, rank,
min_quad_id, max_quad_id);
/* free receive memory */
P4EST_FREE (parent_receive);
P4EST_FREE (receive_process);
}
/* END: compute correction with received parent quadrants */
/* free memory */
P4EST_FREE (partition_new);
/* communicate corrections */
correction = P4EST_ALLOC (int, num_procs);
mpiret = MPI_Allgather (&correction_local, 1, MPI_INT,
correction, 1, MPI_INT, p4est->mpicomm);
SC_CHECK_MPI (mpiret);
/* correct partition */
current_proc =
p4est_next_nonempty_process (0, num_procs, num_quadrants_in_proc);
next_proc =
p4est_next_nonempty_process (current_proc + 1, num_procs,
num_quadrants_in_proc);
num_moved_quadrants = 0;
while (current_proc < num_procs) {
/* loop over all non empty processes */
/* compute correct partition for process `current_proc` */
if (0 < current_proc && current_proc < num_procs) {
/* if any process but first */
num_quadrants_in_proc[current_proc] += correction[current_proc];
num_moved_quadrants += (p4est_locidx_t) abs (correction[current_proc]);
}
if (current_proc == 0 || next_proc < num_procs) {
/* if first process or next process is feasible */
num_quadrants_in_proc[current_proc] -= correction[next_proc];
}
/* increase process ids */
current_proc = next_proc;
next_proc =
p4est_next_nonempty_process (next_proc + 1, num_procs,
num_quadrants_in_proc);
}
/* free memory */
P4EST_FREE (correction);
/* return absolute number of moved quadrants */
return num_moved_quadrants;
}
#endif
unsigned
p4est_checksum (p4est_t * p4est)
{
uLong treecrc, crc;
size_t scount, ssum;
p4est_topidx_t nt;
p4est_tree_t *tree;
sc_array_t checkarray;
P4EST_ASSERT (p4est_is_valid (p4est));
sc_array_init (&checkarray, 4);
crc = adler32 (0, Z_NULL, 0);
ssum = 0;
for (nt = p4est->first_local_tree; nt <= p4est->last_local_tree; ++nt) {
tree = p4est_tree_array_index (p4est->trees, nt);
treecrc =
(uLong) p4est_quadrant_checksum (&tree->quadrants, &checkarray, 0);
scount = 4 * checkarray.elem_count;
ssum += scount;
crc = adler32_combine (crc, treecrc, (z_off_t) scount);
}
sc_array_reset (&checkarray);
P4EST_ASSERT ((p4est_locidx_t) ssum ==
p4est->local_num_quadrants * 4 * (P4EST_DIM + 1));
return p4est_comm_checksum (p4est, (unsigned) crc, ssum);
}
void
p4est_save (const char *filename, p4est_t * p4est, int save_data)
{
const int headc = 6;
const int align = 16;
#ifdef P4EST_MPI
int mpiret;
#ifndef P4EST_MPIIO_WRITE
MPI_Status mpistatus;
#endif
#endif
int retval;
int num_procs, rank;
int i;
long fpos = -1, foffset;
size_t data_size, qbuf_size;
size_t zz, zcount;
uint64_t *u64a;
FILE *file;
#ifdef P4EST_MPIIO_WRITE
MPI_File mpifile;
MPI_Offset mpipos;
MPI_Offset mpithis;
#else
long fthis;
#endif
p4est_topidx_t jt;
p4est_tree_t *tree;
p4est_quadrant_t lq, *gfpos, *q;
p4est_qcoord_t qbuffer[P4EST_DIM + 1];
p4est_qcoord_t *qall, *qpos;
sc_array_t *tquadrants;
P4EST_GLOBAL_PRODUCTIONF ("Into " P4EST_STRING "_save %s\n", filename);
P4EST_ASSERT (p4est_connectivity_is_valid (p4est->connectivity));
P4EST_ASSERT (p4est_is_valid (p4est));
/* when data is not saved the size is set to zero */
data_size = save_data ? p4est->data_size : 0;
/* zero data size is effectively not saved */
if (data_size == 0)
save_data = 0;
num_procs = p4est->mpisize;
rank = p4est->mpirank;
gfpos = p4est->global_first_position;
qbuf_size = (P4EST_DIM + 1) * sizeof (p4est_qcoord_t);
if (rank == 0) {
p4est_connectivity_save (filename, p4est->connectivity);
/* open file after writing connectivity to it */
file = fopen (filename, "ab");
SC_CHECK_ABORT (file != NULL, "file open");
/* align the start of the forest */
fpos = ftell (file);
SC_CHECK_ABORT (fpos > 0, "file tell");
while (fpos % align != 0) {
retval = fputc ('\0', file);
SC_CHECK_ABORT (retval == 0, "file align");
++fpos;
}
/* write format and partition information */
u64a = P4EST_ALLOC (uint64_t, num_procs + headc);
u64a[0] = P4EST_ONDISK_FORMAT;
u64a[1] = (uint64_t) sizeof (p4est_qcoord_t);
u64a[2] = (uint64_t) sizeof (p4est_quadrant_t);
u64a[3] = (uint64_t) data_size;
u64a[4] = (uint64_t) save_data;
u64a[5] = (uint64_t) num_procs;
for (i = 0; i < num_procs; ++i) {
u64a[i + headc] = (uint64_t) p4est->global_first_quadrant[i + 1];
}
sc_fwrite (u64a, sizeof (uint64_t), (size_t) (num_procs + headc),
file, "write quadrant partition");
P4EST_FREE (u64a);
fpos += (headc + num_procs) * sizeof (uint64_t);
sc_fwrite (gfpos, sizeof (p4est_quadrant_t),
(size_t) (num_procs + 1), file, "write tree partition");
fpos += (num_procs + 1) * sizeof (p4est_quadrant_t);
#ifdef P4EST_MPIIO_WRITE
/* We will close the sequential access to the file */
/* best attempt to flush file to disk */
retval = fflush (file);
SC_CHECK_ABORT (retval == 0, "file flush");
#ifdef P4EST_HAVE_FSYNC
retval = fsync (fileno (file));
SC_CHECK_ABORT (retval == 0, "file fsync");
#endif
retval = fclose (file);
SC_CHECK_ABORT (retval == 0, "file close");
file = NULL;
#endif
}
else
file = NULL;
#ifndef P4EST_MPIIO_WRITE
if (rank > 0) {
/* wait for sequential synchronization */
#ifdef P4EST_MPI
mpiret = MPI_Recv (&fpos, 1, MPI_LONG, rank - 1, P4EST_COMM_SAVE,
p4est->mpicomm, &mpistatus);
SC_CHECK_MPI (mpiret);
#endif
/* open file after all previous processors have written to it */
file = fopen (filename, "rb+");
SC_CHECK_ABORT (file != NULL, "file open");
}
#else
/* Every core opens the file in append mode */
mpiret = MPI_File_open (p4est->mpicomm, (char *) filename,
MPI_MODE_WRONLY | MPI_MODE_APPEND |
MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &mpifile);
SC_CHECK_MPI (mpiret);
mpiret = MPI_File_get_position (mpifile, &mpipos);
SC_CHECK_MPI (mpiret);
#endif
if (rank > 0) {
/* seek to the beginning of this processor's storage */
foffset = (long)
(p4est->global_first_quadrant[rank] * qbuf_size +
(2 * rank + gfpos[rank].p.which_tree) * sizeof (p4est_quadrant_t));
if (save_data) {
foffset += p4est->global_first_quadrant[rank] * data_size;
}
#ifndef P4EST_MPIIO_WRITE
fthis = fpos + foffset;
retval = fseek (file, fthis, SEEK_SET);
SC_CHECK_ABORT (retval == 0, "seek data");
#else
mpithis = mpipos + (MPI_Offset) foffset;
mpiret = MPI_File_seek (mpifile, mpithis, MPI_SEEK_SET);
SC_CHECK_MPI (mpiret);
#endif
}
/*
* Write local last tree and quadrant information.
* Each processor writes so many quadrants:
* 1 (number of last populated tree)
* + gfpos[rank + 1].p.which_tree - gfpos[rank].p.which_tree + 1
* (number of tree count quadrants)
* + local_num_quadrants (the quadrants of all trees)
* and all quadrant data if save_data is true.
*/
P4EST_QUADRANT_INIT (&lq);
lq.level = (int8_t) 'p';
lq.pad8 = (int8_t) 'r';
lq.pad16 = (int16_t) ('o' + ('c' << 8));
lq.p.which_tree = p4est->last_local_tree;
#ifndef P4EST_MPIIO_WRITE
sc_fwrite (&lq, sizeof (p4est_quadrant_t), 1, file, "write last tree");
#else
sc_mpi_write (mpifile, &lq, sizeof (p4est_quadrant_t), MPI_BYTE,
"write last tree");
#endif
for (jt = p4est->first_local_tree; jt <= p4est->last_local_tree; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tquadrants = &tree->quadrants;
zcount = tquadrants->elem_count;
P4EST_QUADRANT_INIT (&lq);
lq.level = (int8_t) 't';
lq.pad8 = (int8_t) 'r';
lq.pad16 = (int16_t) ('e' + ('e' << 8));
lq.p.piggy3.local_num = (p4est_locidx_t) zcount;
#ifndef P4EST_MPIIO_WRITE
sc_fwrite (&lq, sizeof (p4est_quadrant_t), 1, file, "write tree count");
#else
sc_mpi_write (mpifile, &lq, sizeof (p4est_quadrant_t), MPI_BYTE,
"write tree count");
#endif
if (!save_data) {
qpos = qall = P4EST_ALLOC (p4est_qcoord_t, (P4EST_DIM + 1) * zcount);
for (zz = 0; zz < zcount; ++zz) {
q = p4est_quadrant_array_index (tquadrants, zz);
*qpos++ = q->x;
*qpos++ = q->y;
#ifdef P4_TO_P8
*qpos++ = q->z;
#endif
*qpos++ = (p4est_qcoord_t) q->level;
}
#ifndef P4EST_MPIIO_WRITE
sc_fwrite (qall, qbuf_size, zcount, file, "write quadrants");
#else
sc_mpi_write (mpifile, qall, qbuf_size * zcount, MPI_BYTE,
"write quadrants");
#endif
P4EST_FREE (qall);
}
else {
for (zz = 0; zz < zcount; ++zz) {
q = p4est_quadrant_array_index (tquadrants, zz);
qbuffer[0] = q->x;
qbuffer[1] = q->y;
#ifdef P4_TO_P8
qbuffer[2] = q->z;
#endif
qbuffer[P4EST_DIM] = (p4est_qcoord_t) q->level;
#ifndef P4EST_MPIIO_WRITE
sc_fwrite (qbuffer, qbuf_size, 1, file, "write quadrant");
sc_fwrite (q->p.user_data, data_size, 1, file, "write quadrant data");
#else
sc_mpi_write (mpifile, qbuffer, qbuf_size, MPI_BYTE,
"write quadrant");
sc_mpi_write (mpifile, q->p.user_data, data_size, MPI_BYTE,
"write quadrant data");
#endif
}
}
}
if (p4est->last_local_tree < gfpos[rank + 1].p.which_tree) {
P4EST_QUADRANT_INIT (&lq);
lq.level = (int8_t) 'x';
lq.pad8 = (int8_t) 't';
lq.pad16 = (int16_t) ('r' + ('a' << 8));
#ifndef P4EST_MPIIO_WRITE
sc_fwrite (&lq, sizeof (p4est_quadrant_t), 1, file, "write extra tree");
#else
sc_mpi_write (mpifile, &lq, sizeof (p4est_quadrant_t), MPI_BYTE,
"write extra tree");
#endif
}
#ifndef P4EST_MPIIO_WRITE
/* best attempt to flush file to disk */
retval = fflush (file);
SC_CHECK_ABORT (retval == 0, "file flush");
#ifdef P4EST_HAVE_FSYNC
retval = fsync (fileno (file));
SC_CHECK_ABORT (retval == 0, "file fsync");
#endif
retval = fclose (file);
SC_CHECK_ABORT (retval == 0, "file close");
file = NULL;
/* initiate sequential synchronization */
#ifdef P4EST_MPI
if (rank < num_procs - 1) {
mpiret =
MPI_Send (&fpos, 1, MPI_LONG, rank + 1, P4EST_COMM_SAVE,
p4est->mpicomm);
SC_CHECK_MPI (mpiret);
}
#endif
#else
mpiret = MPI_File_close (&mpifile);
SC_CHECK_MPI (mpiret);
#endif
P4EST_GLOBAL_PRODUCTION ("Done " P4EST_STRING "_save\n");
}
p4est_t *
p4est_load (const char *filename, MPI_Comm mpicomm, size_t data_size,
int load_data, void *user_pointer,
p4est_connectivity_t ** connectivity)
{
const int headc = 6;
const int align = 16;
int retval;
int mpiret;
int num_procs, rank;
int fnum_procs;
int i;
long fpos;
int save_data;
uint64_t *u64a;
size_t save_data_size;
size_t qbuf_size;
size_t zz, zcount;
FILE *file;
p4est_topidx_t jt;
p4est_connectivity_t *conn;
p4est_t *p4est;
p4est_tree_t *tree;
p4est_quadrant_t lq, *gfpos, *q;
p4est_qcoord_t qbuffer[P4EST_DIM + 1];
p4est_qcoord_t *qall, *qpos;
sc_array_t *tquadrants;
P4EST_GLOBAL_PRODUCTIONF ("Into " P4EST_STRING "_load %s\n", filename);
conn = *connectivity = p4est_connectivity_load (filename, &fpos);
p4est = P4EST_ALLOC_ZERO (p4est_t, 1);
fpos = ((fpos + align - 1) / align) * align;
/* retrieve MPI information */
mpiret = MPI_Comm_size (mpicomm, &num_procs);
SC_CHECK_MPI (mpiret);
mpiret = MPI_Comm_rank (mpicomm, &rank);
SC_CHECK_MPI (mpiret);
/* assign some data members */
p4est->mpicomm = mpicomm;
p4est->mpisize = num_procs;
p4est->mpirank = rank;
p4est->data_size = data_size;
p4est->user_pointer = user_pointer;
p4est->connectivity = conn;
qbuf_size = (P4EST_DIM + 1) * sizeof (p4est_qcoord_t);
/* allocate memory pools */
if (data_size > 0) {
p4est->user_data_pool = sc_mempool_new (data_size);
}
else {
p4est->user_data_pool = NULL;
load_data = 0;
}
p4est->quadrant_pool = sc_mempool_new (sizeof (p4est_quadrant_t));
/* create tree array */
p4est->trees = sc_array_new (sizeof (p4est_tree_t));
sc_array_resize (p4est->trees, conn->num_trees);
for (jt = 0; jt < conn->num_trees; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
sc_array_init (&tree->quadrants, sizeof (p4est_quadrant_t));
P4EST_QUADRANT_INIT (&tree->first_desc);
P4EST_QUADRANT_INIT (&tree->last_desc);
tree->quadrants_offset = 0;
for (i = 0; i <= P4EST_QMAXLEVEL; ++i) {
tree->quadrants_per_level[i] = 0;
}
for (; i <= P4EST_MAXLEVEL; ++i) {
tree->quadrants_per_level[i] = -1;
}
tree->maxlevel = 0;
}
/* allocate partition data */
p4est->global_first_quadrant = P4EST_ALLOC (p4est_gloidx_t, num_procs + 1);
gfpos = p4est->global_first_position =
P4EST_ALLOC (p4est_quadrant_t, num_procs + 1);
/* open file and skip connectivity */
file = fopen (filename, "rb");
SC_CHECK_ABORT (file != NULL, "file open");
retval = fseek (file, fpos, SEEK_SET);
SC_CHECK_ABORT (retval == 0, "seek header");
/* read format and partition information */
u64a = P4EST_ALLOC (uint64_t, headc);
sc_fread (u64a, sizeof (uint64_t), (size_t) headc, file, "read format");
SC_CHECK_ABORT (u64a[0] == P4EST_ONDISK_FORMAT, "invalid format");
SC_CHECK_ABORT (u64a[1] == (uint64_t) sizeof (p4est_qcoord_t),
"invalid coordinate size");
SC_CHECK_ABORT (u64a[2] == (uint64_t) sizeof (p4est_quadrant_t),
"invalid quadrant size");
save_data_size = (size_t) u64a[3];
save_data = (int) u64a[4];
if (load_data) {
SC_CHECK_ABORT (save_data_size == data_size, "invalid data size");
SC_CHECK_ABORT (save_data, "quadrant data not saved");
}
fnum_procs = u64a[5];
SC_CHECK_ABORT (fnum_procs <= num_procs, "invalid MPI size");
u64a = P4EST_REALLOC (u64a, uint64_t, fnum_procs);
//SC_CHECK_ABORT (u64a[5] == (uint64_t) num_procs, "invalid MPI size");
sc_fread (u64a, sizeof (uint64_t), (size_t) fnum_procs, file,
"read quadrant partition");
p4est->global_first_quadrant[0] = 0;
/* the strategy is for the last (num_procs - fnum_procs) to be empty
* partitions, then to repartition at the end */
for (i = 0; i < fnum_procs; ++i) {
p4est->global_first_quadrant[i + 1] = (p4est_gloidx_t) u64a[i];
}
for (; i < num_procs; i++) {
p4est->global_first_quadrant[i + 1] = (p4est_gloidx_t)
u64a[fnum_procs - 1];
}
P4EST_FREE (u64a);
sc_fread (gfpos, sizeof (p4est_quadrant_t),
(size_t) (fnum_procs + 1), file, "read tree partition");
for (i = fnum_procs + 1; i <= num_procs; i++) {
gfpos[i] = gfpos[fnum_procs];
}
p4est->global_num_quadrants = p4est->global_first_quadrant[num_procs];
p4est->local_num_quadrants = 0;
/* seek to the beginning of this processor's storage */
if (rank > 0 && rank < fnum_procs) {
fpos = (long)
(p4est->global_first_quadrant[rank] * qbuf_size +
(2 * rank + gfpos[rank].p.which_tree) * sizeof (p4est_quadrant_t));
if (save_data) {
fpos += p4est->global_first_quadrant[rank] * save_data_size;
}
retval = fseek (file, fpos, SEEK_CUR);
SC_CHECK_ABORT (retval == 0, "seek data");
}
/*
* Read local last tree and quadrant information.
* See comments and code in p4est_save for the data layout.
*/
if (rank < fnum_procs) {
sc_fread (&lq, sizeof (p4est_quadrant_t), 1, file, "read last tree");
p4est->last_local_tree = lq.p.which_tree;
if (p4est->last_local_tree < 0) {
SC_CHECK_ABORT (p4est->last_local_tree == -2, "invalid empty tree");
p4est->first_local_tree = -1;
}
else {
p4est->first_local_tree = gfpos[rank].p.which_tree;
}
for (jt = p4est->first_local_tree; jt <= p4est->last_local_tree; ++jt) {
/* read tree quadrants */
tree = p4est_tree_array_index (p4est->trees, jt);
tquadrants = &tree->quadrants;
sc_fread (&lq, sizeof (p4est_quadrant_t), 1, file, "read tree count");
SC_CHECK_ABORT (lq.p.piggy3.local_num > 0, "invalid tree count");
zcount = (size_t) lq.p.piggy3.local_num;
sc_array_resize (tquadrants, zcount);
memset (tquadrants->array, 0, zcount * sizeof (p4est_quadrant_t));
if (!save_data) {
qpos = qall = P4EST_ALLOC (p4est_qcoord_t, (P4EST_DIM + 1) * zcount);
sc_fread (qall, qbuf_size, zcount, file, "read quadrants");
}
else {
qpos = qall = NULL;
}
for (zz = 0; zz < zcount; ++zz) {
q = p4est_quadrant_array_index (tquadrants, zz);
if (save_data) {
sc_fread (qbuffer, qbuf_size, 1, file, "read quadrant");
q->x = qbuffer[0];
q->y = qbuffer[1];
#ifdef P4_TO_P8
q->z = qbuffer[2];
#endif
q->level = (int8_t) qbuffer[P4EST_DIM];
}
else {
q->x = *qpos++;
q->y = *qpos++;
#ifdef P4_TO_P8
q->z = *qpos++;
#endif
/* *INDENT-OFF* HORRIBLE indent bug */
q->level = (int8_t) *qpos++;
/* *INDENT-ON* */
}
SC_CHECK_ABORT (p4est_quadrant_is_valid (q), "invalid quadrant");
if (data_size > 0)
q->p.user_data = sc_mempool_alloc (p4est->user_data_pool);
else
q->p.user_data = NULL;
if (load_data) {
P4EST_ASSERT (data_size > 0);
sc_fread (q->p.user_data, data_size, 1, file, "read quadrant data");
}
else if (save_data) {
retval = fseek (file, (long) save_data_size, SEEK_CUR);
SC_CHECK_ABORT (retval == 0, "seek quadrant data");
}
++tree->quadrants_per_level[q->level];
}
P4EST_FREE (qall);
/* compute tree properties */
q = p4est_quadrant_array_index (tquadrants, 0);
p4est_quadrant_first_descendant (q, &tree->first_desc, P4EST_QMAXLEVEL);
q = p4est_quadrant_array_index (tquadrants, tquadrants->elem_count - 1);
p4est_quadrant_last_descendant (q, &tree->last_desc, P4EST_QMAXLEVEL);
for (i = 0; i <= P4EST_QMAXLEVEL; ++i) {
if (tree->quadrants_per_level[i] > 0) {
tree->maxlevel = (int8_t) i;
}
}
for (; i <= P4EST_MAXLEVEL; ++i) {
P4EST_ASSERT (tree->quadrants_per_level[i] == -1);
}
tree->quadrants_offset = p4est->local_num_quadrants;
p4est->local_num_quadrants += (p4est_locidx_t) tquadrants->elem_count;
}
if (p4est->last_local_tree < gfpos[rank + 1].p.which_tree) {
sc_fread (&lq, sizeof (p4est_quadrant_t), 1, file, "read extra tree");
}
/* fix quadrant offset */
if (p4est->last_local_tree >= 0) {
for (jt = p4est->last_local_tree + 1; jt < conn->num_trees; ++jt) {
tree = p4est_tree_array_index (p4est->trees, jt);
tree->quadrants_offset = p4est->local_num_quadrants;
}
}
}
else {
/* create an empty partition */
p4est->first_local_tree = -1;
p4est->last_local_tree = -2;
}
/* close file and return */
retval = fclose (file);
SC_CHECK_ABORT (retval == 0, "file close");
file = NULL;
/* assert that we loaded a valid forest */
SC_CHECK_ABORT (p4est_is_valid (p4est), "invalid forest");
if (fnum_procs < num_procs) {
p4est_partition (p4est, NULL);
}
P4EST_GLOBAL_PRODUCTIONF
("Done " P4EST_STRING "_load with %lld total quadrants\n",
(long long) p4est->global_num_quadrants);
return p4est;
}
| xyuan/p4est | src/p4est.c | C | gpl-2.0 | 132,088 |
/*
2020 © Copyright (c) BiDaE Technology Inc.
Provided under BiDaE SHAREWARE LICENSE-1.0 in the LICENSE.
Project Name:
BeDIS
File Name:
thpool.c
File Description:
This file contains the program to providing a threading pool where we
can use for add work.
Note: This code is forked from https://github.com/Pithikos/C-Thread-Pool
Author: Johan Hanssen Seferidis
License: MIT
Version:
2.0, 20190617
Abstract:
BeDIS uses LBeacons to deliver 3D coordinates and textual descriptions of
their locations to users' devices. Basically, a LBeacon is an inexpensive,
Bluetooth Smart Ready device. The 3D coordinates and location description
of every LBeacon are retrieved from BeDIS (Building/environment Data and
Information System) and stored locally during deployment and maintenance
times. Once initialized, each LBeacon broadcasts its coordinates and
location description to Bluetooth enabled user devices within its coverage
area.
Authors:
Holly Wang , hollywang@iis.sinica.edu.tw
Gary Xiao , garyh0205@hotmail.com
*/
#include "thpool.h"
/* ========================== THREADPOOL ============================ */
/* Initialise thread pool */
struct thpool_ *thpool_init(int num_threads){
int return_value, n;
thpool_ *thpool_p;
if (num_threads < 0){
num_threads = 0;
}
/* Make new thread pool */
thpool_p = (thpool_ *)malloc(sizeof(thpool_));
if (thpool_p == NULL){
err("thpool_init(): Could not allocate memory for thread pool\n");
return NULL;
}
thpool_p->num_threads_alive = 0;
thpool_p->num_threads_working = 0;
thpool_p->threads_keepalive = 1;
thpool_p->mempool_size = SIZE_OF_SLOT;
/* Initialize the memory pool */
if(mp_init(&thpool_p->mempool, SIZE_OF_SLOT,
num_threads * SLOTS_FOR_MEM_POOL_PER_THREAD) != MEMORY_POOL_SUCCESS)
return NULL;
/* Initialise the job queue */
if (jobqueue_init(thpool_p, &thpool_p -> jobqueue) == -1){
err("thpool_init(): Could not allocate memory for job queue\n");
mp_destroy(&thpool_p->mempool);
free(thpool_p);
return NULL;
}
/* Make threads in pool */
thpool_p ->threads = (thread **)malloc((sizeof(struct thread *) *
num_threads));
if (thpool_p -> threads == NULL){
err("thpool_init(): Could not allocate memory for threads\n");
jobqueue_destroy(thpool_p, &thpool_p -> jobqueue);
mp_destroy(&thpool_p->mempool);
free(thpool_p);
return NULL;
}
pthread_mutex_init(&(thpool_p -> thcount_lock), 0);
pthread_cond_init(&(thpool_p->thcount_lock), 0);
/* Thread init */
for (n = 0; n < num_threads; n ++){
thread_init(thpool_p, &thpool_p -> threads[n], n);
}
/* Wait for threads to initialize */
while (thpool_p -> num_threads_alive != num_threads) {
sleep_t(WAITING_TIME);
}
return thpool_p;
}
/* Add work to the thread pool */
int thpool_add_work(thpool_ *thpool_p, void (*function_p)(void *),
void *arg_p, int priority){
job *newjob = NULL;
newjob = (job *)mp_alloc(&thpool_p->mempool);
if (newjob == NULL){
err("thpool_add_work(): Could not allocate memory for new job\n");
return -1;
}
/* add function and argument */
newjob -> function = function_p;
newjob -> arg = arg_p;
newjob -> priority = priority;
/* add job to queue */
jobqueue_push(&thpool_p -> jobqueue, newjob);
return 0;
}
/* Destroy the threadpool */
void thpool_destroy(thpool_ *thpool_p){
volatile int threads_total;
int n;
double TIMEOUT, tpassed;
time_t start, end;
/* No need to destory if it's NULL */
if (thpool_p == NULL) return ;
threads_total = thpool_p -> num_threads_alive;
/* End each thread 's infinite loop */
thpool_p->threads_keepalive = 0;
/* Give one second to kill idle threads */
TIMEOUT = 1.0;
tpassed = 0.0;
time (&start);
while (tpassed < TIMEOUT && thpool_p -> num_threads_alive){
bsem_post_all(thpool_p -> jobqueue.has_jobs);
time (&end);
tpassed = difftime(end, start);
}
/* Poll remaining threads */
while (thpool_p -> num_threads_alive){
bsem_post_all(thpool_p -> jobqueue.has_jobs);
sleep_t(WAITING_TIME);
}
/* Job queue cleanup */
jobqueue_destroy(thpool_p, &thpool_p -> jobqueue);
/* Deallocs */
for (n = 0; n < threads_total; n ++){
thread_destroy(thpool_p -> threads[n]);
}
free(thpool_p -> threads);
mp_destroy(&thpool_p->mempool);
free(thpool_p -> threads);
free(thpool_p);
thpool_p = NULL;
}
int thpool_num_threads_working(thpool_ *thpool_p){
return thpool_p -> num_threads_working;
}
/* ============================ THREAD ============================== */
static int thread_init (thpool_ *thpool_p, thread **thread_p, int id){
*thread_p = NULL;
*thread_p = (thread *)mp_alloc(&thpool_p -> mempool);
if (thread_p == NULL){
err("thread_init(): Could not allocate memory for thread\n");
return -1;
}
(*thread_p)->thpool_p = thpool_p;
(*thread_p)->id = id;
pthread_create(&(*thread_p) -> pthread, NULL, (void *)thread_do,
(*thread_p));
pthread_detach((*thread_p) -> pthread);
return 0;
}
static void *thread_do(thread *thread_p){
thpool_ *thpool_p;
/* Assure all threads have been created before starting serving */
thpool_p = thread_p -> thpool_p;
/* Mark thread as alive (initialized) */
pthread_mutex_lock(&thpool_p -> thcount_lock);
thpool_p->num_threads_alive += 1;
pthread_mutex_unlock(&thpool_p -> thcount_lock);
while(thpool_p->threads_keepalive){
void ( *func_buff)(void *);
void *arg_buff;
job *job_p;
bsem_wait(thpool_p -> jobqueue.has_jobs);
if (thpool_p->threads_keepalive){
pthread_mutex_lock(&thpool_p -> thcount_lock);
thpool_p->num_threads_working ++;
pthread_mutex_unlock(&thpool_p -> thcount_lock);
/* Read job from queue and execute it */
job_p = jobqueue_pull(&thpool_p -> jobqueue);
if (job_p) {
func_buff = job_p -> function;
arg_buff = job_p -> arg;
func_buff(arg_buff);
mp_free(&thread_p->thpool_p->mempool ,job_p);
}
pthread_mutex_lock(&thpool_p -> thcount_lock);
thpool_p -> num_threads_working --;
pthread_mutex_unlock(&thpool_p -> thcount_lock);
}
}
pthread_mutex_lock(&thpool_p -> thcount_lock);
thpool_p -> num_threads_alive --;
pthread_mutex_unlock(&thpool_p -> thcount_lock);
return NULL;
}
/* Frees a thread */
static void thread_destroy (thread *thread_p){
mp_free(&thread_p->thpool_p->mempool, thread_p);
thread_p = NULL;
}
/* ============================ JOB QUEUE =========================== */
/* Initialize queue */
static int jobqueue_init(thpool_ *thpool_p, jobqueue *jobqueue_p){
jobqueue_p -> len = 0;
jobqueue_p -> front = NULL;
jobqueue_p -> rear = NULL;
jobqueue_p -> has_jobs = (bsem *)mp_alloc(&thpool_p-> mempool);
if (jobqueue_p -> has_jobs == NULL){
return -1;
}
pthread_mutex_init(&(jobqueue_p -> rwmutex), 0);
bsem_init(jobqueue_p -> has_jobs, 0);
return 0;
}
/* Clear the queue */
static void jobqueue_clear(thpool_ *thpool_p, jobqueue *jobqueue_p){
while(jobqueue_p -> len){
mp_free(&thpool_p->mempool, jobqueue_pull(jobqueue_p));
}
jobqueue_p -> front = NULL;
jobqueue_p -> rear = NULL;
bsem_reset(jobqueue_p -> has_jobs);
jobqueue_p -> len = 0;
}
/* Add (allocated) job to queue */
static void jobqueue_push(jobqueue *jobqueue_p, job *newjob){
pthread_mutex_lock(&jobqueue_p -> rwmutex);
newjob -> prev = NULL;
switch(jobqueue_p -> len){
case 0: /* if no jobs in queue */
jobqueue_p -> front = newjob;
jobqueue_p -> rear = newjob;
break;
default: /* if jobs in queue */
jobqueue_p -> rear -> prev = newjob;
jobqueue_p -> rear = newjob;
}
jobqueue_p -> len ++;
bsem_post(jobqueue_p -> has_jobs);
pthread_mutex_unlock(&jobqueue_p -> rwmutex);
}
static job *jobqueue_pull(jobqueue *jobqueue_p){
job *job_p;
pthread_mutex_lock(&jobqueue_p -> rwmutex);
job_p = jobqueue_p -> front;
switch(jobqueue_p -> len){
case 0: /* if no jobs in queue */
break;
case 1: /* if one job in queue */
jobqueue_p -> front = NULL;
jobqueue_p -> rear = NULL;
jobqueue_p -> len = 0;
break;
default: /* if >1 jobs in queue */
jobqueue_p -> front = job_p -> prev;
jobqueue_p -> len --;
/* more than one job in queue -> post it */
bsem_post(jobqueue_p -> has_jobs);
}
pthread_mutex_unlock(&jobqueue_p -> rwmutex);
return job_p;
}
/* Free all queue resources back to the system */
static void jobqueue_destroy(thpool_ *thpool_p, jobqueue *jobqueue_p){
jobqueue_clear(thpool_p, jobqueue_p);
mp_free(&thpool_p->mempool, jobqueue_p -> has_jobs);
}
/* ======================== SYNCHRONISATION ========================= */
/* Init semaphore to 1 or 0 */
static void bsem_init(bsem *bsem_p, int value) {
if (value < 0 || value > 1) {
err("bsem_init(): Binary semaphore can take only values 1 or 0");
exit(1);
}
pthread_mutex_init(&(bsem_p -> mutex), 0);
pthread_cond_init(&(bsem_p -> cond), 0);
bsem_p -> v = value;
}
/* Reset semaphore to 0 */
static void bsem_reset(bsem *bsem_p) {
bsem_init(bsem_p, 0);
}
/* Post to at least one thread */
static void bsem_post(bsem *bsem_p) {
pthread_mutex_lock(&bsem_p -> mutex);
bsem_p -> v = 1;
pthread_cond_signal(&bsem_p -> cond);
pthread_mutex_unlock(&bsem_p -> mutex);
}
/* Post to all threads */
static void bsem_post_all(bsem *bsem_p) {
pthread_mutex_lock(&bsem_p -> mutex);
bsem_p -> v = 1;
pthread_cond_broadcast(&bsem_p -> cond);
pthread_mutex_unlock(&bsem_p -> mutex);
}
/* Wait on semaphore until semaphore has value 0 */
static void bsem_wait(bsem *bsem_p) {
pthread_mutex_lock(&bsem_p -> mutex);
while (bsem_p -> v != 1) {
pthread_cond_wait(&bsem_p -> cond, &bsem_p -> mutex);
}
bsem_p -> v = 0;
pthread_mutex_unlock(&bsem_p -> mutex);
} | OpenISDM/Lbeacon | import/thpool.c | C | gpl-2.0 | 10,773 |
//========================================================================================
//
// $HeadURL: svn://localhost/izine/iZinePlns/Codebase/trunk/iZinePublishUI/Source/Controls/TreeViewHeader/IZPTVColumnsInfo.h $
// $Revision: 2644 $
// $Date: 2011-03-31 09:47:39 +0200 (Thu, 31 Mar 2011) $
// $Author: rajkumar.sehrawat $
//
// Creator: Raj Kumar Sehrawat
// Created: 11-10-2010
// Copyright: 2008-2010 iZine Publish. All rights reserved.
//
// Description:
//========================================================================================
#ifndef _h_IZPTVColumnsInfo_
#define _h_IZPTVColumnsInfo_
#pragma once
enum enTVColumnType
{
eTVColType_Empty, //No widget created, used for indentation.
eTVColType_Label,
eTVColType_Chkbox,
eTVColType_Icon,
eTVColType_RolloverIconButton,
eTVColTypeCount
};
struct ZPTVColumnInfo
{
typedef object_type data_type; //To support K2Vector
int mColID; //Logical ID of column.
int mMinWidth; //0 Means no limit
int mMaxWidth; //0 Means no limit
int mMinHeight; //0 Means no limit
int mMaxHeight; //0 Means no limit
bool mCanHide; //false if always visible, like Task Subject
bool mCanChangePosition; //true if DnD is allowed.
enTVColumnType mColType;
PMString mColDispName; //Display name for the column in header/context menu.
PMString mContextMenuDispName; //If empty then mColDispName is used
ZPTVColumnInfo()
: mColID(0), mMinWidth( 0 ), mMaxWidth( 0 ), mMinHeight( 0 ), mMaxHeight( 0 )
, mCanHide( true ), mCanChangePosition( true ), mColType( eTVColType_Label )
{}
};
class IZPTVColumnsInfo : public IPMUnknown
{
public:
enum { kDefaultIID = IID_IZPTVCOLUMNSINFO };
typedef K2Vector<ZPTVColumnInfo> ZPTVColumnInfoArr;
virtual void InitCoumnsInfo(
int inTreeType ) = 0; //1 Tasks, 2 Assets
virtual int GetColumnCount() const = 0;
virtual const ZPTVColumnInfo & GetNthColumnInfo(
const int inColumnIndex ) const = 0;
virtual const ZPTVColumnInfo & GetColumnInfoForColID(
const int inColID ) const = 0;
virtual bool ContainsColumnInfoForColID(
const int inColID ) const = 0;
virtual const WidgetID & GetColumnWidgetStartID() const = 0;
static void GetTaskTreeColumnsInfo(
ZPTVColumnInfoArr & oColumnsInfo );
static void GetAssetsTreeColumnsInfo(
ZPTVColumnInfoArr & oColumnsInfo );
};
extern const ZPTVColumnInfo kZPEmptyTVColumnInfo;
#endif //_h_IZPTVColumnsInfo_
| izine-publish/izine-plugins | src/iZinePublishUI/Source/Controls/TreeViewHeader/IZPTVColumnsInfo.h | C | gpl-2.0 | 2,468 |
/*
* ntfs-3g_common.h - Common declarations for ntfs-3g and lowntfs-3g.
*
* Copyright (c) 2010-2011 Jean-Pierre Andre
* Copyright (c) 2010 Erik Larsson
*
* This program/include file is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as published
* by the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program/include file is distributed in the hope that it will be
* useful, but WITHOUT ANY WARRANTY; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program (in the main directory of the NTFS-3G
* distribution in the file COPYING); if not, write to the Free Software
* Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef _NTFS_3G_COMMON_H
#define _NTFS_3G_COMMON_H
#include "inode.h"
struct ntfs_options {
char *mnt_point; /* Mount point */
char *options; /* Mount options */
char *device; /* Device to mount */
#if !defined(__AROS__) && !defined(AMIGA)
char *arg_device; /* Device requested in argv */
#endif
} ;
typedef enum {
NF_STREAMS_INTERFACE_NONE, /* No access to named data streams. */
NF_STREAMS_INTERFACE_XATTR, /* Map named data streams to xattrs. */
NF_STREAMS_INTERFACE_OPENXATTR, /* Same, not limited to "user." */
NF_STREAMS_INTERFACE_WINDOWS, /* "file:stream" interface. */
} ntfs_fuse_streams_interface;
struct DEFOPTION {
const char *name;
int type;
int flags;
} ;
/* Options, order not significant */
enum {
OPT_RO,
OPT_NOATIME,
OPT_ATIME,
OPT_RELATIME,
OPT_DMTIME,
OPT_FAKE_RW,
OPT_FSNAME,
OPT_NO_DEF_OPTS,
OPT_DEFAULT_PERMISSIONS,
OPT_PERMISSIONS,
OPT_ACL,
OPT_UMASK,
OPT_FMASK,
OPT_DMASK,
OPT_UID,
OPT_GID,
OPT_SHOW_SYS_FILES,
OPT_HIDE_HID_FILES,
OPT_HIDE_DOT_FILES,
OPT_IGNORE_CASE,
OPT_WINDOWS_NAMES,
OPT_COMPRESSION,
OPT_NOCOMPRESSION,
OPT_SILENT,
OPT_RECOVER,
OPT_NORECOVER,
OPT_REMOVE_HIBERFILE,
OPT_SYNC,
OPT_BIG_WRITES,
OPT_LOCALE,
OPT_NFCONV,
OPT_NONFCONV,
OPT_STREAMS_INTERFACE,
OPT_USER_XATTR,
OPT_NOAUTO,
OPT_DEBUG,
OPT_NO_DETACH,
OPT_REMOUNT,
OPT_BLKSIZE,
OPT_INHERIT,
OPT_ADDSECURIDS,
OPT_STATICGRPS,
OPT_USERMAPPING,
OPT_XATTRMAPPING,
OPT_EFS_RAW,
} ;
/* Option flags */
enum {
FLGOPT_BOGUS = 1,
FLGOPT_STRING = 2,
FLGOPT_OCTAL = 4,
FLGOPT_DECIMAL = 8,
FLGOPT_APPEND = 16,
FLGOPT_NOSUPPORT = 32,
FLGOPT_OPTIONAL = 64
} ;
typedef enum {
ATIME_ENABLED,
ATIME_DISABLED,
ATIME_RELATIVE
} ntfs_atime_t;
typedef struct {
ntfs_volume *vol;
unsigned int uid;
unsigned int gid;
unsigned int fmask;
unsigned int dmask;
ntfs_fuse_streams_interface streams;
ntfs_atime_t atime;
u64 dmtime;
BOOL ro;
BOOL show_sys_files;
BOOL hide_hid_files;
BOOL hide_dot_files;
BOOL windows_names;
BOOL ignore_case;
BOOL compression;
BOOL acl;
BOOL silent;
BOOL recover;
BOOL hiberfile;
BOOL sync;
BOOL big_writes;
BOOL debug;
BOOL no_detach;
BOOL blkdev;
BOOL mounted;
#ifdef HAVE_SETXATTR /* extended attributes interface required */
BOOL efs_raw;
#ifdef XATTR_MAPPINGS
char *xattrmap_path;
#endif /* XATTR_MAPPINGS */
#endif /* HAVE_SETXATTR */
struct fuse_chan *fc;
BOOL inherit;
unsigned int secure_flags;
char *usermap_path;
char *abs_mnt_point;
struct PERMISSIONS_CACHE *seccache;
struct SECURITY_CONTEXT security;
struct open_file *open_files; /* only defined in lowntfs-3g */
u64 latest_ghost;
} ntfs_fuse_context_t;
extern const char *EXEC_NAME;
#ifdef FUSE_INTERNAL
#define FUSE_TYPE "integrated FUSE"
#else
#define FUSE_TYPE "external FUSE"
#endif
extern const char xattr_ntfs_3g[];
extern const char nf_ns_user_prefix[];
extern const int nf_ns_user_prefix_len;
extern const char nf_ns_system_prefix[];
extern const int nf_ns_system_prefix_len;
extern const char nf_ns_security_prefix[];
extern const int nf_ns_security_prefix_len;
extern const char nf_ns_trusted_prefix[];
extern const int nf_ns_trusted_prefix_len;
int ntfs_strappend(char **dest, const char *append);
int ntfs_strinsert(char **dest, const char *append);
char *parse_mount_options(ntfs_fuse_context_t *ctx,
const struct ntfs_options *popts, BOOL low_fuse);
#if !defined(__AROS__) && !defined(AMIGA)
int ntfs_parse_options(struct ntfs_options *popts, void (*usage)(void),
int argc, char *argv[]);
#endif
int ntfs_fuse_listxattr_common(ntfs_inode *ni, ntfs_attr_search_ctx *actx,
char *list, size_t size, BOOL prefixing);
#endif /* _NTFS_3G_COMMON_H */
| salass00/ntfs-3g | src/ntfs-3g_common.h | C | gpl-2.0 | 4,673 |
//Added by qt3to4:
#include <q3mimefactory.h>
#include <QCloseEvent>
#include <Q3PopupMenu>
/****************************************************************************
** ui.h extension file, included from the uic-generated form implementation.
**
** If you want to add, delete, or rename functions or slots, use
** Qt Designer to update this file, preserving your code.
**
** You should not define a constructor or destructor in this file.
** Instead, write your code in functions called init() and destroy().
** These will automatically be called by the form's constructor and
** destructor.
*****************************************************************************/
/*
Copyright (C) 2005-2009 Michel de Boer <michel@twinklephone.com>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
void HistoryForm::init()
{
historyListView->setSorting(HISTCOL_TIMESTAMP, false);
historyListView->setColumnWidthMode(HISTCOL_FROMTO, Q3ListView::Manual);
historyListView->setColumnWidth(HISTCOL_FROMTO, 200);
historyListView->setColumnWidthMode(HISTCOL_SUBJECT, Q3ListView::Manual);
historyListView->setColumnWidth(HISTCOL_SUBJECT, 200);
inCheckBox->setChecked(true);
outCheckBox->setChecked(true);
successCheckBox->setChecked(true);
missedCheckBox->setChecked(true);
profileCheckBox->setChecked(true);
timeLastViewed = phone->get_startup_time();
QIcon inviteIcon(qPixmapFromMimeSource("invite.png"));
QIcon deleteIcon(qPixmapFromMimeSource("editdelete.png"));
histPopupMenu = new Q3PopupMenu(this);
MEMMAN_NEW(histPopupMenu);
itemCall = histPopupMenu->insertItem(inviteIcon, tr("Call..."), this, SLOT(call()));
histPopupMenu->insertItem(deleteIcon, tr("Delete"), this, SLOT(deleteEntry()));
}
void HistoryForm::destroy()
{
MEMMAN_DELETE(histPopupMenu);
delete histPopupMenu;
}
void HistoryForm::loadHistory()
{
// Create list of all active profile names
QStringList profile_name_list;
list<t_user *>user_list = phone->ref_users();
for (list<t_user *>::iterator i = user_list.begin(); i != user_list.end(); i++) {
profile_name_list.append((*i)->get_profile_name().c_str());
}
// Fill the history table
unsigned long numberOfCalls = 0;
unsigned long totalCallDuration = 0;
unsigned long totalConversationDuration = 0;
historyListView->clear();
list<t_call_record> history;
call_history->get_history(history);
for (list<t_call_record>::iterator i = history.begin(); i != history.end(); i++) {
if (i->direction == t_call_record::DIR_IN && !inCheckBox->isChecked()) {
continue;
}
if (i->direction == t_call_record::DIR_OUT && !outCheckBox->isChecked()) {
continue;
}
if (i->invite_resp_code < 300 && !successCheckBox->isChecked()) {
continue;
}
if (i->invite_resp_code >= 300 && !missedCheckBox->isChecked()) {
continue;
}
if (!profile_name_list.contains(i->user_profile.c_str()) &&
profileCheckBox->isChecked())
{
continue;
}
numberOfCalls++;
// Calculate total duration
totalCallDuration += i->time_end - i->time_start;
if (i->time_answer != 0) {
totalConversationDuration += i->time_end - i->time_answer;
}
t_user *user_config = phone->ref_user_profile(i->user_profile);
// If the user profile is not active, then use the
// first user profile for formatting
if (!user_config) {
user_config = phone->ref_users().front();
}
new HistoryListViewItem(historyListView,
*i, user_config, timeLastViewed);
}
numberCallsValueTextLabel->setText(QString().setNum(numberOfCalls));
// Total call duration formatting
QString durationText = duration2str(totalCallDuration).c_str();
durationText += " (";
durationText += tr("conversation");
durationText += ": ";
durationText += duration2str(totalConversationDuration).c_str();
durationText += ")";
totalDurationValueTextLabel->setText(durationText);
// Make the first entry the selected entry.
Q3ListViewItem *first = historyListView->firstChild();
if (first) {
historyListView->setSelected(first, true);
showCallDetails(first);
} else {
cdrTextEdit->clear();
}
}
// Update history when triggered by a call back function on the user
// interface.
void HistoryForm::update()
{
// There is no need to update the history when the window is
// hidden.
if (isShown()) loadHistory();
}
void HistoryForm::show()
{
if (isShown()) {
raise();
setActiveWindow();
return;
}
loadHistory();
QDialog::show();
raise();
}
void HistoryForm::closeEvent( QCloseEvent *e )
{
struct timeval t;
gettimeofday(&t, NULL);
timeLastViewed = t.tv_sec;
// If Twinkle is terminated while the history window is
// shown, then the call_history object is destroyed, before this
// window is closed.
if (call_history) {
call_history->clear_num_missed_calls();
}
QDialog::closeEvent(e);
}
void HistoryForm::showCallDetails(Q3ListViewItem *item)
{
QString s;
t_call_record cr = ((HistoryListViewItem *)item)->get_call_record();
cdrTextEdit->clear();
t_user *user_config = phone->ref_user_profile(cr.user_profile);
// If the user profile is not active, then use the
// first user profile for formatting
if (!user_config) {
user_config = phone->ref_users().front();
}
s = "<table>";
// Left column: header names
s += "<tr><td><b>";
s += tr("Call start:") + "<br>";
s += tr("Call answer:") + "<br>";
s += tr("Call end:") + "<br>";
s += tr("Call duration:") + "<br>";
s += tr("Direction:") + "<br>";
s += tr("From:") + "<br>";
s += tr("To:") + "<br>";
if (cr.reply_to_uri.is_valid()) s += tr("Reply to:") + "<br>";
if (cr.referred_by_uri.is_valid()) s += tr("Referred by:") + "<br>";
s += tr("Subject:") + "<br>";
s += tr("Released by:") + "<br>";
s += tr("Status:") + "<br>";
if (!cr.far_end_device.empty()) s += tr("Far end device:") + "<br>";
s += tr("User profile:");
s += "</b></td>";
// Right column: values
s += "<td>";
s += time2str(cr.time_start, "%d %b %Y %H:%M:%S").c_str();
s += "<br>";
if (cr.time_answer != 0) {
s += time2str(cr.time_answer, "%d %b %Y %H:%M:%S").c_str();
}
s += "<br>";
s += time2str(cr.time_end, "%d %b %Y %H:%M:%S").c_str();
s += "<br>";
s += duration2str((unsigned long)(cr.time_end - cr.time_start)).c_str();
if (cr.time_answer != 0) {
s += " (";
s += tr("conversation");
s += ": ";
s += duration2str((unsigned long)(cr.time_end - cr.time_answer)).c_str();
s += ")";
}
s += "<br>";
s += cr.get_direction().c_str();
s += "<br>";
s += str2html(ui->format_sip_address(user_config, cr.from_display, cr.from_uri).c_str());
if (cr.from_organization != "") {
s += ", ";
s += str2html(cr.from_organization.c_str());
}
s += "<br>";
s += str2html(ui->format_sip_address(user_config, cr.to_display, cr.to_uri).c_str());
if (cr.to_organization != "") {
s += ", ";
s += str2html(cr.to_organization.c_str());
}
s += "<br>";
if (cr.reply_to_uri.is_valid()) {
s += str2html(ui->format_sip_address(user_config,
cr.reply_to_display, cr.reply_to_uri).c_str());
s += "<br>";
}
if (cr.referred_by_uri.is_valid()) {
s += str2html(ui->format_sip_address(user_config,
cr.referred_by_display, cr.referred_by_uri).c_str());
s += "<br>";
}
s += str2html(cr.subject.c_str());
s += "<br>";
s += cr.get_rel_cause().c_str();
s += "<br>";
s += int2str(cr.invite_resp_code).c_str();
s += ' ';
s += str2html(cr.invite_resp_reason.c_str());
s += "<br>";
if (!cr.far_end_device.empty()) {
s += str2html(cr.far_end_device.c_str());
s += "<br>";
}
s += str2html(cr.user_profile.c_str());
s += "</td></tr>";
s += "</table>";
cdrTextEdit->setText(s);
}
void HistoryForm::popupMenu(Q3ListViewItem *item, const QPoint &pos)
{
if (!item) return;
HistoryListViewItem *histItem = dynamic_cast<HistoryListViewItem *>(item);
if (!histItem) return;
t_call_record cr = histItem->get_call_record();
// An anonymous caller cannot be called
bool canCall = !(cr.direction == t_call_record::DIR_IN &&
cr.from_uri.encode() == ANONYMOUS_URI);
histPopupMenu->setItemEnabled(itemCall, canCall);
histPopupMenu->popup(pos);
}
void HistoryForm::call(Q3ListViewItem *item)
{
if (!item) return;
HistoryListViewItem *histItem = (HistoryListViewItem *)item;
t_call_record cr = histItem->get_call_record();
t_user *user_config = phone->ref_user_profile(cr.user_profile);
// If the user profile is not active, then use the first profile
if (!user_config) {
user_config = phone->ref_users().front();
}
// Determine subject
QString subject;
if (cr.direction == t_call_record::DIR_IN) {
if (!cr.subject.empty()) {
if (cr.subject.substr(0, tr("Re:").length()) != tr("Re:").ascii()) {
subject = tr("Re:").append(" ");
subject += cr.subject.c_str();
} else {
subject = cr.subject.c_str();
}
}
} else {
subject = cr.subject.c_str();
}
// Send call signal
if (cr.direction == t_call_record::DIR_IN && cr.reply_to_uri.is_valid()) {
// Call to the Reply-To contact
emit call(user_config,
ui->format_sip_address(user_config,
cr.reply_to_display, cr.reply_to_uri).c_str(),
subject, false);
} else {
// For incoming calls, call to the From contact
// For outgoing calls, call to the To contact
bool hide_user = false;
if (cr.direction == t_call_record::DIR_OUT &&
cr.from_uri.encode() == ANONYMOUS_URI)
{
hide_user = true;
}
emit call(user_config, item->text(HISTCOL_FROMTO), subject, hide_user);
}
}
void HistoryForm::call(void)
{
Q3ListViewItem *item = historyListView->currentItem();
if (item) call(item);
}
void HistoryForm::deleteEntry(void)
{
Q3ListViewItem *item = historyListView->currentItem();
HistoryListViewItem *histItem = dynamic_cast<HistoryListViewItem *>(item);
if (!histItem) return;
call_history->delete_call_record(histItem->get_call_record().get_id());
}
void HistoryForm::clearHistory()
{
call_history->clear();
}
| mahoshin/twinkle | src/gui/historyform.ui.h | C | gpl-2.0 | 10,523 |
/*
* Copyright (C) 2001-2003 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of Libav.
*
* Libav is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* Libav is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with Libav; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <inttypes.h>
#include "config.h"
#include "libswscale/swscale.h"
#include "libswscale/swscale_internal.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/x86_cpu.h"
#include "libavutil/cpu.h"
#include "libavutil/pixdesc.h"
#define DITHER1XBPP
DECLARE_ASM_CONST(8, uint64_t, bF8)= 0xF8F8F8F8F8F8F8F8LL;
DECLARE_ASM_CONST(8, uint64_t, bFC)= 0xFCFCFCFCFCFCFCFCLL;
DECLARE_ASM_CONST(8, uint64_t, w10)= 0x0010001000100010LL;
DECLARE_ASM_CONST(8, uint64_t, w02)= 0x0002000200020002LL;
const DECLARE_ALIGNED(8, uint64_t, ff_dither4)[2] = {
0x0103010301030103LL,
0x0200020002000200LL,};
const DECLARE_ALIGNED(8, uint64_t, ff_dither8)[2] = {
0x0602060206020602LL,
0x0004000400040004LL,};
DECLARE_ASM_CONST(8, uint64_t, b16Mask)= 0x001F001F001F001FLL;
DECLARE_ASM_CONST(8, uint64_t, g16Mask)= 0x07E007E007E007E0LL;
DECLARE_ASM_CONST(8, uint64_t, r16Mask)= 0xF800F800F800F800LL;
DECLARE_ASM_CONST(8, uint64_t, b15Mask)= 0x001F001F001F001FLL;
DECLARE_ASM_CONST(8, uint64_t, g15Mask)= 0x03E003E003E003E0LL;
DECLARE_ASM_CONST(8, uint64_t, r15Mask)= 0x7C007C007C007C00LL;
DECLARE_ALIGNED(8, const uint64_t, ff_M24A) = 0x00FF0000FF0000FFLL;
DECLARE_ALIGNED(8, const uint64_t, ff_M24B) = 0xFF0000FF0000FF00LL;
DECLARE_ALIGNED(8, const uint64_t, ff_M24C) = 0x0000FF0000FF0000LL;
#ifdef FAST_BGR2YV12
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000000210041000DULL;
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000FFEEFFDC0038ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00000038FFD2FFF8ULL;
#else
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YCoeff) = 0x000020E540830C8BULL;
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UCoeff) = 0x0000ED0FDAC23831ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2VCoeff) = 0x00003831D0E6F6EAULL;
#endif /* FAST_BGR2YV12 */
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2YOffset) = 0x1010101010101010ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_bgr2UVOffset) = 0x8080808080808080ULL;
DECLARE_ALIGNED(8, const uint64_t, ff_w1111) = 0x0001000100010001ULL;
//MMX versions
#if HAVE_MMX
#undef RENAME
#define COMPILE_TEMPLATE_MMX2 0
#define RENAME(a) a ## _MMX
#include "swscale_template.c"
#endif
//MMX2 versions
#if HAVE_MMX2
#undef RENAME
#undef COMPILE_TEMPLATE_MMX2
#define COMPILE_TEMPLATE_MMX2 1
#define RENAME(a) a ## _MMX2
#include "swscale_template.c"
#endif
void updateMMXDitherTables(SwsContext *c, int dstY, int lumBufIndex, int chrBufIndex,
int lastInLumBuf, int lastInChrBuf)
{
const int dstH= c->dstH;
const int flags= c->flags;
int16_t **lumPixBuf= c->lumPixBuf;
int16_t **chrUPixBuf= c->chrUPixBuf;
int16_t **alpPixBuf= c->alpPixBuf;
const int vLumBufSize= c->vLumBufSize;
const int vChrBufSize= c->vChrBufSize;
int32_t *vLumFilterPos= c->vLumFilterPos;
int32_t *vChrFilterPos= c->vChrFilterPos;
int16_t *vLumFilter= c->vLumFilter;
int16_t *vChrFilter= c->vChrFilter;
int32_t *lumMmxFilter= c->lumMmxFilter;
int32_t *chrMmxFilter= c->chrMmxFilter;
int32_t av_unused *alpMmxFilter= c->alpMmxFilter;
const int vLumFilterSize= c->vLumFilterSize;
const int vChrFilterSize= c->vChrFilterSize;
const int chrDstY= dstY>>c->chrDstVSubSample;
const int firstLumSrcY= vLumFilterPos[dstY]; //First line needed as input
const int firstChrSrcY= vChrFilterPos[chrDstY]; //First line needed as input
c->blueDither= ff_dither8[dstY&1];
if (c->dstFormat == PIX_FMT_RGB555 || c->dstFormat == PIX_FMT_BGR555)
c->greenDither= ff_dither8[dstY&1];
else
c->greenDither= ff_dither4[dstY&1];
c->redDither= ff_dither8[(dstY+1)&1];
if (dstY < dstH - 2) {
const int16_t **lumSrcPtr= (const int16_t **) lumPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize;
const int16_t **chrUSrcPtr= (const int16_t **) chrUPixBuf + chrBufIndex + firstChrSrcY - lastInChrBuf + vChrBufSize;
const int16_t **alpSrcPtr= (CONFIG_SWSCALE_ALPHA && alpPixBuf) ? (const int16_t **) alpPixBuf + lumBufIndex + firstLumSrcY - lastInLumBuf + vLumBufSize : NULL;
int i;
if (firstLumSrcY < 0 || firstLumSrcY + vLumFilterSize > c->srcH) {
const int16_t **tmpY = (const int16_t **) lumPixBuf + 2 * vLumBufSize;
int neg = -firstLumSrcY, i, end = FFMIN(c->srcH - firstLumSrcY, vLumFilterSize);
for (i = 0; i < neg; i++)
tmpY[i] = lumSrcPtr[neg];
for ( ; i < end; i++)
tmpY[i] = lumSrcPtr[i];
for ( ; i < vLumFilterSize; i++)
tmpY[i] = tmpY[i-1];
lumSrcPtr = tmpY;
if (alpSrcPtr) {
const int16_t **tmpA = (const int16_t **) alpPixBuf + 2 * vLumBufSize;
for (i = 0; i < neg; i++)
tmpA[i] = alpSrcPtr[neg];
for ( ; i < end; i++)
tmpA[i] = alpSrcPtr[i];
for ( ; i < vLumFilterSize; i++)
tmpA[i] = tmpA[i - 1];
alpSrcPtr = tmpA;
}
}
if (firstChrSrcY < 0 || firstChrSrcY + vChrFilterSize > c->chrSrcH) {
const int16_t **tmpU = (const int16_t **) chrUPixBuf + 2 * vChrBufSize;
int neg = -firstChrSrcY, i, end = FFMIN(c->chrSrcH - firstChrSrcY, vChrFilterSize);
for (i = 0; i < neg; i++) {
tmpU[i] = chrUSrcPtr[neg];
}
for ( ; i < end; i++) {
tmpU[i] = chrUSrcPtr[i];
}
for ( ; i < vChrFilterSize; i++) {
tmpU[i] = tmpU[i - 1];
}
chrUSrcPtr = tmpU;
}
if (flags & SWS_ACCURATE_RND) {
int s= APCK_SIZE / 8;
for (i=0; i<vLumFilterSize; i+=2) {
*(const void**)&lumMmxFilter[s*i ]= lumSrcPtr[i ];
*(const void**)&lumMmxFilter[s*i+APCK_PTR2/4 ]= lumSrcPtr[i+(vLumFilterSize>1)];
lumMmxFilter[s*i+APCK_COEF/4 ]=
lumMmxFilter[s*i+APCK_COEF/4+1]= vLumFilter[dstY*vLumFilterSize + i ]
+ (vLumFilterSize>1 ? vLumFilter[dstY*vLumFilterSize + i + 1]<<16 : 0);
if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
*(const void**)&alpMmxFilter[s*i ]= alpSrcPtr[i ];
*(const void**)&alpMmxFilter[s*i+APCK_PTR2/4 ]= alpSrcPtr[i+(vLumFilterSize>1)];
alpMmxFilter[s*i+APCK_COEF/4 ]=
alpMmxFilter[s*i+APCK_COEF/4+1]= lumMmxFilter[s*i+APCK_COEF/4 ];
}
}
for (i=0; i<vChrFilterSize; i+=2) {
*(const void**)&chrMmxFilter[s*i ]= chrUSrcPtr[i ];
*(const void**)&chrMmxFilter[s*i+APCK_PTR2/4 ]= chrUSrcPtr[i+(vChrFilterSize>1)];
chrMmxFilter[s*i+APCK_COEF/4 ]=
chrMmxFilter[s*i+APCK_COEF/4+1]= vChrFilter[chrDstY*vChrFilterSize + i ]
+ (vChrFilterSize>1 ? vChrFilter[chrDstY*vChrFilterSize + i + 1]<<16 : 0);
}
} else {
for (i=0; i<vLumFilterSize; i++) {
*(const void**)&lumMmxFilter[4*i+0]= lumSrcPtr[i];
lumMmxFilter[4*i+2]=
lumMmxFilter[4*i+3]=
((uint16_t)vLumFilter[dstY*vLumFilterSize + i])*0x10001;
if (CONFIG_SWSCALE_ALPHA && alpPixBuf) {
*(const void**)&alpMmxFilter[4*i+0]= alpSrcPtr[i];
alpMmxFilter[4*i+2]=
alpMmxFilter[4*i+3]= lumMmxFilter[4*i+2];
}
}
for (i=0; i<vChrFilterSize; i++) {
*(const void**)&chrMmxFilter[4*i+0]= chrUSrcPtr[i];
chrMmxFilter[4*i+2]=
chrMmxFilter[4*i+3]=
((uint16_t)vChrFilter[chrDstY*vChrFilterSize + i])*0x10001;
}
}
}
}
#define SCALE_FUNC(filter_n, from_bpc, to_bpc, opt) \
extern void ff_hscale ## from_bpc ## to ## to_bpc ## _ ## filter_n ## _ ## opt( \
SwsContext *c, int16_t *data, \
int dstW, const uint8_t *src, \
const int16_t *filter, \
const int32_t *filterPos, int filterSize)
#define SCALE_FUNCS(filter_n, opt) \
SCALE_FUNC(filter_n, 8, 15, opt); \
SCALE_FUNC(filter_n, 9, 15, opt); \
SCALE_FUNC(filter_n, 10, 15, opt); \
SCALE_FUNC(filter_n, 16, 15, opt); \
SCALE_FUNC(filter_n, 8, 19, opt); \
SCALE_FUNC(filter_n, 9, 19, opt); \
SCALE_FUNC(filter_n, 10, 19, opt); \
SCALE_FUNC(filter_n, 16, 19, opt)
#define SCALE_FUNCS_MMX(opt) \
SCALE_FUNCS(4, opt); \
SCALE_FUNCS(8, opt); \
SCALE_FUNCS(X, opt)
#define SCALE_FUNCS_SSE(opt) \
SCALE_FUNCS(4, opt); \
SCALE_FUNCS(8, opt); \
SCALE_FUNCS(X4, opt); \
SCALE_FUNCS(X8, opt)
#if ARCH_X86_32
SCALE_FUNCS_MMX(mmx);
#endif
SCALE_FUNCS_SSE(sse2);
SCALE_FUNCS_SSE(ssse3);
SCALE_FUNCS_SSE(sse4);
#define VSCALEX_FUNC(size, opt) \
extern void ff_yuv2planeX_ ## size ## _ ## opt(const int16_t *filter, int filterSize, \
const int16_t **src, uint8_t *dest, int dstW, \
const uint8_t *dither, int offset)
#define VSCALEX_FUNCS(opt) \
VSCALEX_FUNC(8, opt); \
VSCALEX_FUNC(9, opt); \
VSCALEX_FUNC(10, opt)
#if ARCH_X86_32
VSCALEX_FUNCS(mmx2);
#endif
VSCALEX_FUNCS(sse2);
VSCALEX_FUNCS(sse4);
VSCALEX_FUNC(16, sse4);
VSCALEX_FUNCS(avx);
#define VSCALE_FUNC(size, opt) \
extern void ff_yuv2plane1_ ## size ## _ ## opt(const int16_t *src, uint8_t *dst, int dstW, \
const uint8_t *dither, int offset)
#define VSCALE_FUNCS(opt1, opt2) \
VSCALE_FUNC(8, opt1); \
VSCALE_FUNC(9, opt2); \
VSCALE_FUNC(10, opt2); \
VSCALE_FUNC(16, opt1)
#if ARCH_X86_32
VSCALE_FUNCS(mmx, mmx2);
#endif
VSCALE_FUNCS(sse2, sse2);
VSCALE_FUNC(16, sse4);
VSCALE_FUNCS(avx, avx);
#define INPUT_Y_FUNC(fmt, opt) \
extern void ff_ ## fmt ## ToY_ ## opt(uint8_t *dst, const uint8_t *src, \
int w, uint32_t *unused)
#define INPUT_UV_FUNC(fmt, opt) \
extern void ff_ ## fmt ## ToUV_ ## opt(uint8_t *dstU, uint8_t *dstV, \
const uint8_t *src, const uint8_t *unused1, \
int w, uint32_t *unused2)
#define INPUT_FUNC(fmt, opt) \
INPUT_Y_FUNC(fmt, opt); \
INPUT_UV_FUNC(fmt, opt)
#define INPUT_FUNCS(opt) \
INPUT_FUNC(uyvy, opt); \
INPUT_FUNC(yuyv, opt); \
INPUT_UV_FUNC(nv12, opt); \
INPUT_UV_FUNC(nv21, opt); \
INPUT_FUNC(rgba, opt); \
INPUT_FUNC(bgra, opt); \
INPUT_FUNC(argb, opt); \
INPUT_FUNC(abgr, opt); \
INPUT_FUNC(rgb24, opt); \
INPUT_FUNC(bgr24, opt)
#if ARCH_X86_32
INPUT_FUNCS(mmx);
#endif
INPUT_FUNCS(sse2);
INPUT_FUNCS(ssse3);
INPUT_FUNCS(avx);
void ff_sws_init_swScale_mmx(SwsContext *c)
{
int cpu_flags = av_get_cpu_flags();
if (cpu_flags & AV_CPU_FLAG_MMX)
sws_init_swScale_MMX(c);
#if HAVE_MMX2
if (cpu_flags & AV_CPU_FLAG_MMX2)
sws_init_swScale_MMX2(c);
#endif
#if HAVE_YASM
#define ASSIGN_SCALE_FUNC2(hscalefn, filtersize, opt1, opt2) do { \
if (c->srcBpc == 8) { \
hscalefn = c->dstBpc <= 10 ? ff_hscale8to15_ ## filtersize ## _ ## opt2 : \
ff_hscale8to19_ ## filtersize ## _ ## opt1; \
} else if (c->srcBpc == 9) { \
hscalefn = c->dstBpc <= 10 ? ff_hscale9to15_ ## filtersize ## _ ## opt2 : \
ff_hscale9to19_ ## filtersize ## _ ## opt1; \
} else if (c->srcBpc == 10) { \
hscalefn = c->dstBpc <= 10 ? ff_hscale10to15_ ## filtersize ## _ ## opt2 : \
ff_hscale10to19_ ## filtersize ## _ ## opt1; \
} else /* c->srcBpc == 16 */ { \
hscalefn = c->dstBpc <= 10 ? ff_hscale16to15_ ## filtersize ## _ ## opt2 : \
ff_hscale16to19_ ## filtersize ## _ ## opt1; \
} \
} while (0)
#define ASSIGN_MMX_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \
switch (filtersize) { \
case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \
case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \
default: ASSIGN_SCALE_FUNC2(hscalefn, X, opt1, opt2); break; \
}
#define ASSIGN_VSCALEX_FUNC(vscalefn, opt, do_16_case) \
switch(c->dstBpc){ \
case 16: do_16_case; break; \
case 10: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_10_ ## opt; break; \
case 9: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2planeX_9_ ## opt; break; \
default: vscalefn = ff_yuv2planeX_8_ ## opt; break; \
}
#define ASSIGN_VSCALE_FUNC(vscalefn, opt1, opt2, opt2chk) \
switch(c->dstBpc){ \
case 16: if (!isBE(c->dstFormat)) vscalefn = ff_yuv2plane1_16_ ## opt1; break; \
case 10: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_10_ ## opt2; break; \
case 9: if (!isBE(c->dstFormat) && opt2chk) vscalefn = ff_yuv2plane1_9_ ## opt2; break; \
default: vscalefn = ff_yuv2plane1_8_ ## opt1; break; \
}
#define case_rgb(x, X, opt) \
case PIX_FMT_ ## X: \
c->lumToYV12 = ff_ ## x ## ToY_ ## opt; \
if (!c->chrSrcHSubSample) \
c->chrToYV12 = ff_ ## x ## ToUV_ ## opt; \
break
#if ARCH_X86_32
if (cpu_flags & AV_CPU_FLAG_MMX) {
ASSIGN_MMX_SCALE_FUNC(c->hyScale, c->hLumFilterSize, mmx, mmx);
ASSIGN_MMX_SCALE_FUNC(c->hcScale, c->hChrFilterSize, mmx, mmx);
ASSIGN_VSCALE_FUNC(c->yuv2plane1, mmx, mmx2, cpu_flags & AV_CPU_FLAG_MMX2);
switch (c->srcFormat) {
case PIX_FMT_Y400A:
c->lumToYV12 = ff_yuyvToY_mmx;
if (c->alpPixBuf)
c->alpToYV12 = ff_uyvyToY_mmx;
break;
case PIX_FMT_YUYV422:
c->lumToYV12 = ff_yuyvToY_mmx;
c->chrToYV12 = ff_yuyvToUV_mmx;
break;
case PIX_FMT_UYVY422:
c->lumToYV12 = ff_uyvyToY_mmx;
c->chrToYV12 = ff_uyvyToUV_mmx;
break;
case PIX_FMT_NV12:
c->chrToYV12 = ff_nv12ToUV_mmx;
break;
case PIX_FMT_NV21:
c->chrToYV12 = ff_nv21ToUV_mmx;
break;
case_rgb(rgb24, RGB24, mmx);
case_rgb(bgr24, BGR24, mmx);
case_rgb(bgra, BGRA, mmx);
case_rgb(rgba, RGBA, mmx);
case_rgb(abgr, ABGR, mmx);
case_rgb(argb, ARGB, mmx);
default:
break;
}
}
if (cpu_flags & AV_CPU_FLAG_MMX2) {
ASSIGN_VSCALEX_FUNC(c->yuv2planeX, mmx2,);
}
#endif
#define ASSIGN_SSE_SCALE_FUNC(hscalefn, filtersize, opt1, opt2) \
switch (filtersize) { \
case 4: ASSIGN_SCALE_FUNC2(hscalefn, 4, opt1, opt2); break; \
case 8: ASSIGN_SCALE_FUNC2(hscalefn, 8, opt1, opt2); break; \
default: if (filtersize & 4) ASSIGN_SCALE_FUNC2(hscalefn, X4, opt1, opt2); \
else ASSIGN_SCALE_FUNC2(hscalefn, X8, opt1, opt2); \
break; \
}
if (cpu_flags & AV_CPU_FLAG_SSE2) {
ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse2, sse2);
ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse2, sse2);
ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse2,);
ASSIGN_VSCALE_FUNC(c->yuv2plane1, sse2, sse2, 1);
switch (c->srcFormat) {
case PIX_FMT_Y400A:
c->lumToYV12 = ff_yuyvToY_sse2;
if (c->alpPixBuf)
c->alpToYV12 = ff_uyvyToY_sse2;
break;
case PIX_FMT_YUYV422:
c->lumToYV12 = ff_yuyvToY_sse2;
c->chrToYV12 = ff_yuyvToUV_sse2;
break;
case PIX_FMT_UYVY422:
c->lumToYV12 = ff_uyvyToY_sse2;
c->chrToYV12 = ff_uyvyToUV_sse2;
break;
case PIX_FMT_NV12:
c->chrToYV12 = ff_nv12ToUV_sse2;
break;
case PIX_FMT_NV21:
c->chrToYV12 = ff_nv21ToUV_sse2;
break;
case_rgb(rgb24, RGB24, sse2);
case_rgb(bgr24, BGR24, sse2);
case_rgb(bgra, BGRA, sse2);
case_rgb(rgba, RGBA, sse2);
case_rgb(abgr, ABGR, sse2);
case_rgb(argb, ARGB, sse2);
default:
break;
}
}
if (cpu_flags & AV_CPU_FLAG_SSSE3) {
ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, ssse3, ssse3);
ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, ssse3, ssse3);
switch (c->srcFormat) {
case_rgb(rgb24, RGB24, ssse3);
case_rgb(bgr24, BGR24, ssse3);
default:
break;
}
}
if (cpu_flags & AV_CPU_FLAG_SSE4) {
/* Xto15 don't need special sse4 functions */
ASSIGN_SSE_SCALE_FUNC(c->hyScale, c->hLumFilterSize, sse4, ssse3);
ASSIGN_SSE_SCALE_FUNC(c->hcScale, c->hChrFilterSize, sse4, ssse3);
ASSIGN_VSCALEX_FUNC(c->yuv2planeX, sse4,
if (!isBE(c->dstFormat)) c->yuv2planeX = ff_yuv2planeX_16_sse4);
if (c->dstBpc == 16 && !isBE(c->dstFormat))
c->yuv2plane1 = ff_yuv2plane1_16_sse4;
}
if (cpu_flags & AV_CPU_FLAG_AVX) {
ASSIGN_VSCALEX_FUNC(c->yuv2planeX, avx,);
ASSIGN_VSCALE_FUNC(c->yuv2plane1, avx, avx, 1);
switch (c->srcFormat) {
case PIX_FMT_YUYV422:
c->chrToYV12 = ff_yuyvToUV_avx;
break;
case PIX_FMT_UYVY422:
c->chrToYV12 = ff_uyvyToUV_avx;
break;
case PIX_FMT_NV12:
c->chrToYV12 = ff_nv12ToUV_avx;
break;
case PIX_FMT_NV21:
c->chrToYV12 = ff_nv21ToUV_avx;
break;
case_rgb(rgb24, RGB24, avx);
case_rgb(bgr24, BGR24, avx);
case_rgb(bgra, BGRA, avx);
case_rgb(rgba, RGBA, avx);
case_rgb(abgr, ABGR, avx);
case_rgb(argb, ARGB, avx);
default:
break;
}
}
#endif
}
| jeeb/ffdshow-tryouts | src/ffmpeg/libswscale/x86/swscale_mmx.c | C | gpl-2.0 | 19,272 |
<?php
if (!defined('ABSPATH')) {
exit;
}
/**
* declare l10n strings used in JavaScript so a gettext scanner can find them
* NB: never called!
*/
function justDeclare() {
$strings = array(
__('Click for details', 'wp-flexible-map'),
__('Directions', 'wp-flexible-map'),
__('From', 'wp-flexible-map'),
__('Get directions', 'wp-flexible-map'),
);
}
| kaiakonsap/helimix | wp-content/plugins/wp-flexible-map/includes/declare-strings.php | PHP | gpl-2.0 | 360 |
import Controllers.GameController;
import Controllers.GraphicController;
/**
* @author zamil.majdy
* @version 1.10
*
* Main controller class
*/
public class Main {
public static void main(String[] args) {
new GameController();
new GraphicController();
}
}
| zmajdy/Gomoku-game | src/Main.java | Java | gpl-2.0 | 271 |
27 mtime=1195673360.655405
27 atime=1396915204.227922
30 ctime=1396915204.263921838
| MarkRobertJohnson/Ketarin | XmlRpc/PaxHeaders.9759/IHttpRequest.cs | C# | gpl-2.0 | 84 |
global mods
mods = []
| TheCherry/ark-server-manager | src/config.py | Python | gpl-2.0 | 22 |
<!-- HTML header for doxygen 1.8.13-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<link rel="shortcut icon" href="favicon.ico" type="image/x-icon" />
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.13"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>Power System Platform: Project/GeneratorStabForm.h File Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script><script type="text/javascript" src="https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.2/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td id="projectlogo"><img alt="Logo" src="logoHeader.png"/></td>
<td id="projectalign" style="padding-left: 0.5em;">
<div id="projectname">Power System Platform
 <span id="projectnumber">2018w15a</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.13 -->
<script type="text/javascript">
var searchBox = new SearchBox("searchBox", "search",false,'Search');
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
</script>
<div id="main-nav"></div>
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div id="nav-path" class="navpath">
<ul>
<li class="navelem"><a class="el" href="dir_ffd1f789ec7bd0a45fc6ad92579c5070.html">Project</a></li> </ul>
</div>
</div><!-- top -->
<div class="header">
<div class="summary">
<a href="#nested-classes">Classes</a> </div>
<div class="headertitle">
<div class="title">GeneratorStabForm.h File Reference</div> </div>
</div><!--header-->
<div class="contents">
<div class="textblock"><code>#include "ElementFormBase.h"</code><br />
</div><div class="textblock"><div class="dynheader">
Include dependency graph for GeneratorStabForm.h:</div>
<div class="dyncontent">
<div class="center"><img src="_generator_stab_form_8h__incl.png" border="0" usemap="#_project_2_generator_stab_form_8h" alt=""/></div>
</div>
</div><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="nested-classes"></a>
Classes</h2></td></tr>
<tr class="memitem:"><td class="memItemLeft" align="right" valign="top">class  </td><td class="memItemRight" valign="bottom"><a class="el" href="class_generator_stab_form.html">GeneratorStabForm</a></td></tr>
<tr class="memdesc:"><td class="mdescLeft"> </td><td class="mdescRight">Form to edit the synchronous generator data for electromechanical studies. <a href="class_generator_stab_form.html#details">More...</a><br /></td></tr>
<tr class="separator:"><td class="memSeparator" colspan="2"> </td></tr>
</table>
</div><!-- contents -->
<!-- start footer part -->
<hr class="footer"/><address class="footer"><small>
Generated by  <a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/>
</a> 1.8.13
</small></address>
</body>
</html>
| Thales1330/PSP | docs/doxygen/html/_generator_stab_form_8h.html | HTML | gpl-2.0 | 4,401 |
-------------------------------------------
--- Add "Forum Activation" to list view ---
-------------------------------------------
INSERT INTO Association VALUES ('at-relation', 1, 1, 'a-ka-118', '', 'tt-ka-geoobject', 1, 'tt-ka-forum', 1);
INSERT INTO AssociationProp VALUES ('a-ka-118', 1, 'Cardinality', 'one');
INSERT INTO AssociationProp VALUES ('a-ka-118', 1, 'Association Type ID', 'at-association');
INSERT INTO AssociationProp VALUES ('a-ka-118', 1, 'Web Info', 'Related Info');
INSERT INTO AssociationProp VALUES ('a-ka-118', 1, 'Web Form', 'Related Form');
INSERT INTO AssociationProp VALUES ('a-ka-118', 1, 'Ordinal Number', '390');
----------------------------------------
--- New Feature: Customizable Layout ---
----------------------------------------
--- topic type "Stylesheet" ---
INSERT INTO Topic VALUES ('tt-topictype', 1, 1, 'tt-ka-stylesheet', 'Stylesheet');
INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Name', 'Stylesheet');
INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Plural Name', 'Stylesheets');
INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Description', '<html><body><p>Ein <i>Stylesheet</i> ist ...</p></body></html>');
INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Description Query', 'Was ist ein Stylesheet?');
INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Icon', 'description.gif');
-- INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Creation Icon', 'createKompetenzstern.gif');
-- INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Unique Topic Names', 'on');
-- INSERT INTO TopicProp VALUES ('tt-ka-stylesheet', 1, 'Custom Implementation', 'de.kiezatlas.deepamehta.topics.ColorTopic');
-- super type
-- INSERT INTO Association VALUES ('at-derivation', 1, 1, 'a-ka-79', '', 'tt-generic', 1, 'tt-ka-stylesheet', 1);
-- search type
INSERT INTO Topic VALUES ('tt-topictype', 1, 1, 'tt-ka-stylesheet-search', 'Stylesheet Suche');
INSERT INTO TopicProp VALUES ('tt-ka-stylesheet-search', 1, 'Name', 'Stylesheet Suche');
-- INSERT INTO TopicProp VALUES ('tt-ka-stylesheet-search', 1, 'Icon', 'KompetenzsternContainer.gif');
-- derive search type
INSERT INTO Association VALUES ('at-derivation', 1, 1, 'a-ka-106', '', 'tt-topiccontainer', 1, 'tt-ka-stylesheet-search', 1);
-- assign search type to type
INSERT INTO Association VALUES ('at-aggregation', 1, 1, 'a-ka-107', '', 'tt-ka-stylesheet-search', 1, 'tt-ka-stylesheet', 1);
--- property "CSS" ---
INSERT INTO Topic VALUES ('tt-property', 1, 1, 'pp-ka-css', 'CSS');
INSERT INTO TopicProp VALUES ('pp-ka-css', 1, 'Name', 'CSS');
INSERT INTO TopicProp VALUES ('pp-ka-css', 1, 'Visualization', 'Multiline Input Field');
-- assign property to "Stylesheet"
INSERT INTO Association VALUES ('at-composition', 1, 1, 'a-ka-108', '', 'tt-ka-stylesheet', 1, 'pp-ka-css', 1);
INSERT INTO AssociationProp VALUES ('a-ka-108', 1, 'Ordinal Number', '200');
-- assign "Name" property to "Stylesheet"
INSERT INTO Association VALUES ('at-composition', 1, 1, 'a-ka-109', '', 'tt-ka-stylesheet', 1, 'pp-name', 1);
INSERT INTO AssociationProp VALUES ('a-ka-109', 1, 'Ordinal Number', '100');
--- association type "Homepage Link" ---
INSERT INTO Topic VALUES ('tt-assoctype', 1, 1, 'at-ka-homepage-link', 'Homepage Link');
INSERT INTO TopicProp VALUES ('at-ka-homepage-link', 1, 'Name', 'Homepage Link');
INSERT INTO TopicProp VALUES ('at-ka-homepage-link', 1, 'Plural Name', 'Homepage Links');
INSERT INTO TopicProp VALUES ('at-ka-homepage-link', 1, 'Color', '#cc9933');
-- INSERT INTO TopicProp VALUES ('at-ka-homepage-link', 1, 'Custom Implementation', '...');
-- super type
-- INSERT INTO Association VALUES ('at-derivation', 1, 1, 'a-757', '', 'at-generic', 1, 'at-ka-homepage-link', 1);
--- association type "Impressum Link" ---
INSERT INTO Topic VALUES ('tt-assoctype', 1, 1, 'at-ka-impressum-link', 'Impressum Link');
INSERT INTO TopicProp VALUES ('at-ka-impressum-link', 1, 'Name', 'Impressum Link');
INSERT INTO TopicProp VALUES ('at-ka-impressum-link', 1, 'Plural Name', 'Impressum Links');
INSERT INTO TopicProp VALUES ('at-ka-impressum-link', 1, 'Color', '#9933cc');
-- INSERT INTO TopicProp VALUES ('at-ka-impressum-link', 1, 'Custom Implementation', '...');
-- super type
-- INSERT INTO Association VALUES ('at-derivation', 1, 1, 'a-757', '', 'at-generic', 1, 'at-ka-impressum-link', 1);
--- assign types to workspace "Kiez-Atlas" ---
-- "Stylesheet"
INSERT INTO Association VALUES ('at-uses', 1, 1, 'a-ka-111', '', 't-ka-workspace', 1, 'tt-ka-stylesheet', 1);
INSERT INTO AssociationProp VALUES ('a-ka-111', 1, 'Access Permission', 'create');
-- "Image"
INSERT INTO Association VALUES ('at-uses', 1, 1, 'a-ka-112', '', 't-ka-workspace', 1, 'tt-image', 1);
INSERT INTO AssociationProp VALUES ('a-ka-112', 1, 'Access Permission', 'create');
-- "Webpage"
INSERT INTO Association VALUES ('at-uses', 1, 1, 'a-ka-119', '', 't-ka-workspace', 1, 'tt-webpage', 1);
INSERT INTO AssociationProp VALUES ('a-ka-119', 1, 'Access Permission', 'create');
-- "Homepage Link"
INSERT INTO Association VALUES ('at-uses', 1, 1, 'a-ka-114', '', 't-ka-workspace', 1, 'at-ka-homepage-link', 1);
INSERT INTO AssociationProp VALUES ('a-ka-114', 1, 'Access Permission', 'create');
-- "Impressum Link"
INSERT INTO Association VALUES ('at-uses', 1, 1, 'a-ka-115', '', 't-ka-workspace', 1, 'at-ka-impressum-link', 1);
INSERT INTO AssociationProp VALUES ('a-ka-115', 1, 'Access Permission', 'create');
---
--- Default Content ---
---
--- default site logo ---
INSERT INTO Topic VALUES ('tt-image', 1, 1, 't-ka-logo', 'Kiezatlas Logo');
INSERT INTO TopicProp VALUES ('t-ka-logo', 1, 'Name', 'Kiezatlas Logo');
INSERT INTO TopicProp VALUES ('t-ka-logo', 1, 'File', 'kiezatlas-logo.png');
-- assign to "Kiez-Atlas" workspace
INSERT INTO Association VALUES ('at-association', 1, 1, 'a-ka-113', '', 't-ka-workspace', 1, 't-ka-logo', 1);
--- default homepage link ---
INSERT INTO Topic VALUES ('tt-webpage', 1, 1, 't-ka-website', 'Kiezatlas Website');
INSERT INTO TopicProp VALUES ('t-ka-website', 1, 'Name', 'Kiezatlas Website');
INSERT INTO TopicProp VALUES ('t-ka-website', 1, 'URL', 'http://www.kiezatlas.de/');
-- assign to "Kiez-Atlas" workspace
INSERT INTO Association VALUES ('at-ka-homepage-link', 1, 1, 'a-ka-116', '', 't-ka-workspace', 1, 't-ka-website', 1);
--- default impressum link ---
INSERT INTO Topic VALUES ('tt-webpage', 1, 1, 't-ka-impressum', 'Kiezatlas Impressum');
INSERT INTO TopicProp VALUES ('t-ka-impressum', 1, 'Name', 'Kiezatlas Impressum');
INSERT INTO TopicProp VALUES ('t-ka-impressum', 1, 'URL', 'http://www.kiezatlas.de/impressum.html');
-- assign to "Kiez-Atlas" workspace
INSERT INTO Association VALUES ('at-ka-impressum-link', 1, 1, 'a-ka-117', '', 't-ka-workspace', 1, 't-ka-impressum', 1);
--- default stylesheet ---
INSERT INTO Topic VALUES ('tt-ka-stylesheet', 1, 1, 't-ka-default-stylesheet', 'Kiezatlas Stylesheet');
INSERT INTO TopicProp VALUES ('t-ka-default-stylesheet', 1, 'Name', 'Kiezatlas Stylesheet');
-- assign to "Kiez-Atlas" workspace
INSERT INTO Association VALUES ('at-association', 1, 1, 'a-ka-110', '', 't-ka-workspace', 1, 't-ka-default-stylesheet', 1);
-- CSS
INSERT INTO TopicProp VALUES ('t-ka-default-stylesheet', 1, 'CSS', '\n
body, td {\n
\tfont-family: Verdana, Arial, Lucida Sans;\n
\tfont-size: 12px;\n
}\n
\n
body {\n
\tbackground-color: #FFFFFF;\n
\tmargin: 0px;\n
}\n
\n
/* Kiezatlas: header area of right frame */\n
.header-area {\n
\twidth: 95%;\n
\tbackground-color: #F3F3F3;\n
\tpadding: 8px;\n
}\n
\n
/* Kiezatlas: content area of right frame */\n
.content-area {\n
\twidth: 95%;\n
\tbackground-color: #FFFFFF;\n
\tpadding-top: 30px;\n
\tpadding-bottom: 40px;\n
\tpadding-left: 8px;\n
\tpadding-right: 8px;\n
}\n
\n
/* Kiezatlas: footer area of right frame */\n
.footer-area {\n
\twidth: 95%;\n
\tbackground-color: #F3F3F3;\n
\tpadding: 8px;\n
}\n
\n
/* Kiezatlas: the citymap name contained in the header */\n
.citymap-name {\n
\tfont-size: 14px;\n
\tfont-weight: bold;\n
}\n
\n
.small {\n
\tfont-size: 10px;\n
\tcolor: #666666;\n
}\n
\n
.secondary-text {\n
\tfont-size: 10px;\n
\tcolor: #666666;\n
}\n
\n
/* Kiezatlas (list interface): the heading citymap name */\n
.heading {\n
\tfont-size: 18px;\n
\tfont-weight: bold;\n
}\n
\n
/* DeepaMehta list generator: the highlighted row */\n
.list-highlight {\n
\tbackground-color: #FFE0E0;\n
}\n
\n
/* DeepaMehta list generator: an even row (use for zebra striping) */\n
.list-evenrow {\n
\tbackground-color: #E0E0FF;\n
}\n
\n
/* DeepaMehta list generator: an odd row (use for zebra striping) */\n
.list-oddrow {\n
\tbackground-color: #FFFFFF;\n
}\n
\n
/* DeepaMehta info generator: outmost container for label/content pairs */\n
.info-container {\n
}\n
\n
/* DeepaMehta info generator: a label/content pair */\n
.info {\n
}\n
\n
/* DeepaMehta info generator: the label part of a label/content pair */\n
.info-label {\n
\tfont-size: 10px;\n
\tfont-weight: bold;\n
\tcolor: #666666;\n
}\n
\n
/* DeepaMehta info generator: the content part of a label/content pair */\n
.info-content {\n
}\n
\n
/* Kiezatlas: outmost container for notifications */\n
.notification {\n
\tcolor: red;\n
}\n
\n
/* Kiezatlas: an "info" notifications */\n
.notification-info:before {\n
\tcontent: url("../images/notification-info.gif")" ";\n
}\n
\n
/* Kiezatlas: a "warning" notifications */\n
.notification-warning:before {\n
\tcontent: url("../images/notification-warning.gif")" ";\n
}\n
\n
/* Kiezatlas: an "error" notifications */\n
.notification-error:before {\n
\tcontent: url("../images/notification-error.gif")" ";\n
}\n
');
| mukil/kiezatlas1 | db/patches/ka-1.6.1.sql | SQL | gpl-2.0 | 9,552 |
SECTION code_driver
SECTION code_driver_tty
PUBLIC asm_tty_param_b_action
EXTERN asm_tty_state_get_1
EXTERN asm_tty_state_param_store
asm_tty_param_b_action:
; c = action code
; stack = & tty.action
; command code has one parameter and action is invoked
ld de,asm_tty_state_get_1
jp asm_tty_state_param_store
| bitfixer/bitfixer | dg/z88dk/libsrc/_DEVELOPMENT/drivers/tty/state/asm_tty_param_b_action.asm | Assembly | gpl-2.0 | 332 |
using UnityEngine;
using System.Collections;
using System.Collections.Generic;
//helper stuff
namespace M8 {
public struct NGUIExtUtil {
/// <summary>
/// Set the given widget's size to contain given bound. Sets widget's position, size, and pivot to topleft.
/// </summary>
public static void WidgetEncapsulateBoundsLocal(UIWidget widget, Vector2 padding, Bounds bounds) {
widget.pivot = UIWidget.Pivot.TopLeft;
Transform t = widget.cachedTransform;
Vector3 pos = t.localPosition;
Vector3 s = t.localScale;
pos.x = bounds.min.x - padding.x;
s.x = bounds.size.x + padding.x * 2.0f;
pos.y = bounds.max.y + padding.y;
s.y = bounds.size.y + padding.y * 2.0f;
t.localPosition = pos;
t.localScale = s;
}
}
}
| ddionisio/WreckingWhore | Assets/M8/Scripts/NGUIExt/NGUIExtUtil.cs | C# | gpl-2.0 | 874 |
<?php
/**
* @package Eternal_Megamenu
* @author Eternal Friend
* @copyright Copyright 2014
*/
$installer = $this;
$installer->startSetup();
$installer->addAttribute('catalog_category', 'sw_cat_block_type', array(
'group' => 'Menu',
'label' => 'Menu Type',
'note' => "This field is applicable only for top-level categories.",
'type' => 'varchar',
'input' => 'select',
'source' => 'megamenu/category_attribute_source_type_style',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_static_width', array(
'group' => 'Menu',
'label' => 'Static Width',
'note' => "This field is applicable only for top-level categories and the item Menu Type is static width is applied.",
'type' => 'text',
'input' => 'text',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_block_position', array(
'group' => 'Menu',
'label' => 'Menu Position',
'note' => "This field is applicable only for top-level categories.",
'type' => 'varchar',
'input' => 'select',
'source' => 'megamenu/category_attribute_source_type_position',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_block_columns', array(
'group' => 'Menu',
'label' => 'Sub Category Menu Columns',
'note' => "The number of displayed subcategories' column. This field is applicable only for top-level categories.",
'type' => 'varchar',
'input' => 'select',
'source' => 'megamenu/category_attribute_source_block_subcolumns',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_block_top', array(
'group' => 'Menu',
'label' => 'Block Top',
'type' => 'text',
'input' => 'textarea',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => true,
'is_html_allowed_on_front' => true,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_left_block_width', array(
'group' => 'Menu',
'label' => 'Block Left Width (%)',
'note' => "Proportions of Block Left. This field is applicable only for top-level categories.",
'type' => 'text',
'input' => 'select',
'source' => 'megamenu/category_attribute_source_block_columns',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_block_left', array(
'group' => 'Menu',
'label' => 'Block Left',
'note' => "This field is applicable only for top-level categories.",
'type' => 'text',
'input' => 'textarea',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => true,
'is_html_allowed_on_front' => true,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_right_block_width', array(
'group' => 'Menu',
'label' => 'Block Right Width (%)',
'note' => "Proportions Block Right. This field is applicable only for top-level categories.",
'type' => 'text',
'input' => 'select',
'source' => 'megamenu/category_attribute_source_block_columns',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_block_right', array(
'group' => 'Menu',
'label' => 'Block Right',
'note' => "This field is applicable only for top-level categories.",
'type' => 'text',
'input' => 'textarea',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => true,
'is_html_allowed_on_front' => true,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_block_bottom', array(
'group' => 'Menu',
'label' => 'Block Bottom',
'type' => 'text',
'input' => 'textarea',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => true,
'is_html_allowed_on_front' => true,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->addAttribute('catalog_category', 'sw_cat_label', array(
'group' => 'Menu',
'label' => 'Category Label',
'note' => "Labels have to be defined in menu settings",
'type' => 'varchar',
'input' => 'select',
'source' => 'megamenu/category_attribute_source_label_categorylabel',
'visible' => true,
'required' => false,
'backend' => '',
'frontend' => '',
'searchable' => false,
'filterable' => false,
'comparable' => false,
'user_defined' => true,
'visible_on_front' => true,
'wysiwyg_enabled' => false,
'is_html_allowed_on_front' => false,
'global' => Mage_Catalog_Model_Resource_Eav_Attribute::SCOPE_STORE
));
$installer->endSetup(); | miguelangelramirez/magento.dev | app/code/local/Smartwave/Megamenu/sql/megamenu_setup/mysql4-install-1.0.0.php | PHP | gpl-2.0 | 9,246 |
/*
* LOSLocationCluster
*
* Created on 2009
*
* Copyright (c) 2009 LinogistiX GmbH. All rights reserved.
*
* <a href="http://www.linogistix.com/">browse for licence information</a>
*
*/
package de.linogistix.los.location.entityservice;
import javax.ejb.Stateless;
import javax.persistence.NoResultException;
import javax.persistence.Query;
import org.mywms.globals.ServiceExceptionKey;
import org.mywms.service.BasicServiceBean;
import org.mywms.service.EntityNotFoundException;
import de.linogistix.los.location.model.LOSLocationCluster;
/**
* @see de.linogistix.los.location.entityservice.LOSLocationClusterService
*
* @author krane
*/
@Stateless
public class LOSLocationClusterServiceBean
extends BasicServiceBean<LOSLocationCluster>
implements LOSLocationClusterService
{
public LOSLocationCluster createLocationCluster(String name) {
LOSLocationCluster cluster = new LOSLocationCluster();
cluster.setName(name);
manager.persist(cluster);
manager.flush();
return cluster;
}
public LOSLocationCluster getByName(String name) throws EntityNotFoundException {
String sql = "SELECT cl FROM " + LOSLocationCluster.class.getSimpleName() + " cl " +
"WHERE cl.name=:name";
Query query = manager.createQuery(sql);
query.setParameter("name", name);
try {
LOSLocationCluster cluster = (LOSLocationCluster) query.getSingleResult();
return cluster;
}
catch (NoResultException ex) {
throw new EntityNotFoundException(
ServiceExceptionKey.NO_ENTITY_WITH_NAME);
}
}
}
| Jacksson/mywms | server.app/los.location-ejb/src/de/linogistix/los/location/entityservice/LOSLocationClusterServiceBean.java | Java | gpl-2.0 | 1,609 |
<?php
defined('_JEXEC') or die;
JToolBarHelper::cancel();
?>
<table class="adminlist">
<thead>
<tr>
<th>Usuario</th>
<th>Pregunta</th>
<th>Respuesta</th>
<th>Fecha</th>
</tr>
</thead>
<tbody>
<?php
foreach($arrData as $objData){
$strQuestions = "";
if(!empty($objData->question)){
foreach(explode(',', $objData->question) as $strQuestion){
$strQuestions .= $strQuestion.'<br/><br/>';
}
}
$strAnswers = "";
if(!empty($objData->answer)){
foreach(json_decode($objData->answer) as $mixAnswer){
if(is_object($mixAnswer)){
$strAnswers .= json_encode($mixAnswer).'<br/><br/>';
}else{
$strAnswers .= $mixAnswer.'<br/><br/>';
}
}
}
echo "<tr>"
."<th>".strtoupper($objData->username)."</th>"
."<th>".$strQuestions."</th>"
."<th>".$strAnswers."</th>"
."<th>".$objData->date_answer."</th>"
."</tr>";
}
?>
</tbody>
</table>
<?php
$doc = JFactory::getDocument();
$doc->addScript(JURI::base().'../templates/terminales_v1/js/jquery-1.9.0.min.js', "text/javascript");
$doc->addScript(JURI::base().'/components/com_contest/js/contest.js', "text/javascript");
?> | pablopalillo/terminales-medellin | administrator/components/com_contest/tmpl/default_contestAnswer.php | PHP | gpl-2.0 | 1,339 |
#!/usr/bin/perl
#Edited by Jmo 11/09/2015 to use ghostKO/Koala KEGG files
#Edited by Jmo 07/18/14 to change in and parse the feature_acc to accept a KAAS file with multiple acc
use lib $ENV{SCRIPTS};
use ENV;
use DBI;
use Getopt::Std;
use strict;
my %arg;
&getopts('D:i:u:p:', \%arg);
my $host = $ENV{DBSERVER} ? $ENV{DBSERVER} : 'localhost';
my $user = $arg{u} ? $arg{u} : $ENV{USER};
my $password = $arg{p} or die "No password provided.";
my $dbh = DBI->connect("dbi:mysql:host=$host;db=$arg{D}", $user, $password);
my $ko_file = $arg{i};
my $ko_q = "SELECT * FROM egad.ko";
my $koref = $dbh->selectall_hashref($ko_q, 'ko');
if ($ko_file) {
print STDERR "Loading data from $ko_file...\n";
open my $in, $ko_file or die "Can't open $ko_file: $!\n";
## 1. Read in line
while (my $line = <$in>) {
next if ($line =~ /^#/);
chomp $line;
my ($feat_acc,
$ko) = split/\s+/, $line;
&load_ko_annotation($dbh, &get_feature_id_by_accession($dbh,$feat_acc), $ko) if ($ko);
}
} else {
# Get ko hit info from database
print STDERR "Retrieving KO hit data from database $arg{D}...\n";
my $feat_count_q = "SELECT count(feature_id) from sequence_features where feat_type='CDS'";
my $ko_feat_count_q = "SELECT count(distinct feature_id) from feature_evidence where ev_type='KO'";
my ($fc) = $dbh->selectrow_array($feat_count_q);
my ($hfc) = $dbh->selectrow_array($ko_feat_count_q);
print STDERR "I see $hfc features with ko hits out of $fc features total.\nDoes that sound right?(Y/n) ";
my $answer = <STDIN>;
if ($answer =~ /^n/i) { print STDERR " Dying! "; die; }
my $ko_q = "SELECT feature_id, ev_accession, score"
. " FROM feature_evidence"
. " WHERE ev_type='KO'";
print "$ko_q\n";
my $ko_ev_ref = $dbh->selectall_arrayref($ko_q);
foreach my $row (@$ko_ev_ref) {
my $ko_acc = $row->[1];
print "$ko_acc\n";
&load_ko_annotation($dbh, $row->[0], $ko_acc);
}
}
sub load_ko_annotation {
my $dbh = shift;
my $feat_id = shift;
my $ko_acc = shift;
# clear out existing data
my $del_q = "DELETE FROM feature_annotations"
. " WHERE feature_id=$feat_id"
. " AND rank = 5"
. " AND source = 'ghostKOALA'";
$dbh->do($del_q);
my $upd_q = "INSERT feature_annotations"
. " (feature_id, data_type_id, value, rank, source, edit_by)"
. " VALUES ($feat_id, ?, ?, 5,\"ghostKOALA\", USER())"
;
my $sth = $dbh->prepare($upd_q);
if ($koref->{$ko_acc}->{product}) {
$sth->execute(66, $koref->{$ko_acc}->{product});
}
if ($koref->{$ko_acc}->{gene_sym}) {
$sth->execute(35, $koref->{$ko_acc}->{gene_sym});
}
if ($koref->{$ko_acc}->{ec}) {
$sth->execute(1, $koref->{$ko_acc}->{ec});
}
}
exit;
| wichne/ENVscripts | ann_by_ko.pl | Perl | gpl-2.0 | 2,731 |
/*
Neutrino-GUI - DBoxII-Project
Copyright (C) 2001 Steffen Hehn 'McClean'
Homepage: http://dbox.cyberphoria.org/
Kommentar:
Diese GUI wurde von Grund auf neu programmiert und sollte nun vom
Aufbau und auch den Ausbaumoeglichkeiten gut aussehen. Neutrino basiert
auf der Client-Server Idee, diese GUI ist also von der direkten DBox-
Steuerung getrennt. Diese wird dann von Daemons uebernommen.
License: GPL
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <system/localize.h>
#include <system/locals_intern.h>
#include <fstream>
#include <string>
#include <cstring>
static const char * iso639filename = "/share/iso-codes/iso-639.tab";
static const unsigned int max_error_messages = 10;
#if 1
#include <stdlib.h>
#include <stdio.h>
#define ISO639_TABLE_SIZE 489
typedef struct
{
char * iso_639_2_code;
char * name;
} iso639_t;
iso639_t iso639[ISO639_TABLE_SIZE];
int mycompare(const void * a, const void * b)
{
return strcmp(((iso639_t *)a)->iso_639_2_code, ((iso639_t *)b)->iso_639_2_code);
}
void initialize_iso639_map(void)
{
unsigned i = 0;
std::string s, t, v;
std::ifstream in(iso639filename);
if (in.is_open())
{
while (in.peek() == '#')
getline(in, s);
while (in >> s >> t >> v >> std::ws)
{
getline(in, v);
if (i == ISO639_TABLE_SIZE)
{
printf("ISO639 table overflow\n");
goto do_sorting;
}
iso639[i].iso_639_2_code = strdup(s.c_str());
iso639[i].name = strdup(v.c_str());
i++;
if (s != t)
{
if (i == ISO639_TABLE_SIZE)
{
printf("ISO639 table overflow\n");
goto do_sorting;
}
iso639[i].iso_639_2_code = strdup(t.c_str());
// iso639[i].name = strdup(v.c_str());
iso639[i].name = iso639[i - 1].name;
i++;
}
}
if (i != ISO639_TABLE_SIZE)
{
printf("ISO639 table underflow\n");
while(i < ISO639_TABLE_SIZE)
{
iso639[i].iso_639_2_code = iso639[i].name = (char *)iso639filename; // fill with junk
i++;
}
}
do_sorting:
qsort(iso639, ISO639_TABLE_SIZE, sizeof(iso639_t), mycompare);
}
else
printf("Loading %s failed.\n", iso639filename);
}
const char * getISO639Description(const char * const iso)
{
iso639_t tmp;
tmp.iso_639_2_code = (char *)iso;
void * value = bsearch(&tmp, iso639, ISO639_TABLE_SIZE, sizeof(iso639_t), mycompare);
if (value == NULL)
return iso;
else
return ((iso639_t *)value)->name;
}
#else
#include <iostream>
#include <map>
static std::map<std::string, std::string> iso639;
void initialize_iso639_map(void)
{
std::string s, t, u, v;
std::ifstream in(iso639filename);
if (in.is_open())
{
while (in.peek() == '#')
getline(in, s);
while (in >> s >> t >> u >> std::ws)
{
getline(in, v);
iso639[s] = v;
if (s != t)
iso639[t] = v;
}
}
else
std::cout << "Loading " << iso639filename << " failed." << std::endl;
}
const char * getISO639Description(const char * const iso)
{
std::map<std::string, std::string>::const_iterator it = iso639.find(std::string(iso));
if (it == iso639.end())
return iso;
else
return it->second.c_str();
}
#endif
CLocaleManager::CLocaleManager()
{
localeData = new char * [sizeof(locale_real_names)/sizeof(const char *)];
for (unsigned int i = 0; i < (sizeof(locale_real_names)/sizeof(const char *)); i++)
localeData[i] = (char *)locale_real_names[i];
}
CLocaleManager::~CLocaleManager()
{
for (unsigned j = 0; j < (sizeof(locale_real_names)/sizeof(const char *)); j++)
if (localeData[j] != locale_real_names[j])
free(localeData[j]);
delete[] localeData;
}
const char * path[2] = {CONFIGDIR "/locale/", DATADIR "/neutrino/locale/"};
CLocaleManager::loadLocale_ret_t CLocaleManager::loadLocale(const char * const locale)
{
unsigned int i;
FILE * fd;
initialize_iso639_map();
for (i = 0; i < 2; i++)
{
std::string filename = path[i];
filename += locale;
filename += ".locale";
fd = fopen(filename.c_str(), "r");
if (fd)
break;
}
if (i == 2)
{
perror("cannot read locale");
return NO_SUCH_LOCALE;
}
for (unsigned j = 0; j < (sizeof(locale_real_names)/sizeof(const char *)); j++)
if (localeData[j] != locale_real_names[j])
{
free(localeData[j]);
localeData[j] = (char *)locale_real_names[j];
}
char buf[1000];
i = 1;
unsigned int no_missing = 0;
while(!feof(fd))
{
if(fgets(buf,sizeof(buf),fd)!=NULL)
{
char * val = NULL;
char * tmpptr = buf;
for(; (*tmpptr!=10) && (*tmpptr!=13);tmpptr++)
{
if ((*tmpptr == ' ') && (val == NULL))
{
*tmpptr = 0;
val = tmpptr + 1;
}
}
*tmpptr = 0;
std::string text = val ? val : "";
int pos;
do
{
pos = text.find("\\n");
if ( pos!=-1 )
{
text.replace(pos, 2, "\n", 1);
}
} while ( ( pos != -1 ) );
int j;
while (1)
{
j = (i >= (sizeof(locale_real_names)/sizeof(const char *))) ? -1 : strcmp(buf, locale_real_names[i]);
if (j > 0)
{
if (no_missing++ < max_error_messages) {
printf("[%s.locale] missing entry: %s\n", locale, locale_real_names[i]);
if (no_missing == max_error_messages)
printf("[%s.locale] messages for further missing entries will be suppressed\n", locale);
}
i++;
}
else
break;
}
if (j == 0)
{
localeData[i] = strdup(text.c_str());
i++;
}
else
{
printf("[%s.locale] superfluous entry: %s\n", locale, buf);
}
}
}
if (no_missing > 0)
printf("[%s.locale] has %d missing entrys\n", locale, no_missing);
fclose(fd);
#warning TODO: implement real check to determine whether we need a font with more than Basic Latin & Latin-1 Supplement characters
return (
(strcmp(locale, "bosanski") == 0) ||
(strcmp(locale, "ellinika") == 0) ||
(strcmp(locale, "russkij") == 0) ||
(strcmp(locale, "utf8") == 0)
/* utf8.locale is a generic name that can be used for new locales which need characters outside the ISO-8859-1 character set */
) ? UNICODE_FONT : ISO_8859_1_FONT;
}
const char * CLocaleManager::getText(const neutrino_locale_t keyName) const
{
return localeData[keyName];
}
static const neutrino_locale_t locale_weekday[7] =
{
LOCALE_DATE_SUN,
LOCALE_DATE_MON,
LOCALE_DATE_TUE,
LOCALE_DATE_WED,
LOCALE_DATE_THU,
LOCALE_DATE_FRI,
LOCALE_DATE_SAT
};
static const neutrino_locale_t locale_month[12] =
{
LOCALE_DATE_JAN,
LOCALE_DATE_FEB,
LOCALE_DATE_MAR,
LOCALE_DATE_APR,
LOCALE_DATE_MAY,
LOCALE_DATE_JUN,
LOCALE_DATE_JUL,
LOCALE_DATE_AUG,
LOCALE_DATE_SEP,
LOCALE_DATE_OCT,
LOCALE_DATE_NOV,
LOCALE_DATE_DEC
};
neutrino_locale_t CLocaleManager::getMonth(const struct tm * struct_tm_p)
{
return locale_month[struct_tm_p->tm_mon];
}
neutrino_locale_t CLocaleManager::getMonth(const int mon)
{
if(mon > -1 && mon < 12)
return locale_month[mon];
else
return LOCALE_MESSAGEBOX_ERROR;
}
neutrino_locale_t CLocaleManager::getWeekday(const struct tm * struct_tm_p)
{
return locale_weekday[struct_tm_p->tm_wday];
}
neutrino_locale_t CLocaleManager::getWeekday(const int wday)
{
if(wday > -1 && wday < 7)
return locale_weekday[wday];
else
return LOCALE_MESSAGEBOX_ERROR;
}
| UkCvs/commando | apps/tuxbox/neutrino/src/system/localize.cpp | C++ | gpl-2.0 | 7,799 |
(function(A,w){function ma(){if(!c.isReady){try{s.documentElement.doScroll("left")}catch(a){setTimeout(ma,1);return}c.ready()}}function Qa(a,b){b.src?c.ajax({url:b.src,async:false,dataType:"script"}):c.globalEval(b.text||b.textContent||b.innerHTML||"");b.parentNode&&b.parentNode.removeChild(b)}function X(a,b,d,f,e,j){var i=a.length;if(typeof b==="object"){for(var o in b)X(a,o,b[o],f,e,d);return a}if(d!==w){f=!j&&f&&c.isFunction(d);for(o=0;o<i;o++)e(a[o],b,f?d.call(a[o],o,e(a[o],b)):d,j);return a}return i?
e(a[0],b):w}function J(){return(new Date).getTime()}function Y(){return false}function Z(){return true}function na(a,b,d){d[0].type=a;return c.event.handle.apply(b,d)}function oa(a){var b,d=[],f=[],e=arguments,j,i,o,k,n,r;i=c.data(this,"events");if(!(a.liveFired===this||!i||!i.live||a.button&&a.type==="click")){a.liveFired=this;var u=i.live.slice(0);for(k=0;k<u.length;k++){i=u[k];i.origType.replace(O,"")===a.type?f.push(i.selector):u.splice(k--,1)}j=c(a.target).closest(f,a.currentTarget);n=0;for(r=
j.length;n<r;n++)for(k=0;k<u.length;k++){i=u[k];if(j[n].selector===i.selector){o=j[n].elem;f=null;if(i.preType==="mouseenter"||i.preType==="mouseleave")f=c(a.relatedTarget).closest(i.selector)[0];if(!f||f!==o)d.push({elem:o,handleObj:i})}}n=0;for(r=d.length;n<r;n++){j=d[n];a.currentTarget=j.elem;a.data=j.handleObj.data;a.handleObj=j.handleObj;if(j.handleObj.origHandler.apply(j.elem,e)===false){b=false;break}}return b}}function pa(a,b){return"live."+(a&&a!=="*"?a+".":"")+b.replace(/\./g,"`").replace(/ /g,
"&")}function qa(a){return!a||!a.parentNode||a.parentNode.nodeType===11}function ra(a,b){var d=0;b.each(function(){if(this.nodeName===(a[d]&&a[d].nodeName)){var f=c.data(a[d++]),e=c.data(this,f);if(f=f&&f.events){delete e.handle;e.events={};for(var j in f)for(var i in f[j])c.event.add(this,j,f[j][i],f[j][i].data)}}})}function sa(a,b,d){var f,e,j;b=b&&b[0]?b[0].ownerDocument||b[0]:s;if(a.length===1&&typeof a[0]==="string"&&a[0].length<512&&b===s&&!ta.test(a[0])&&(c.support.checkClone||!ua.test(a[0]))){e=
true;if(j=c.fragments[a[0]])if(j!==1)f=j}if(!f){f=b.createDocumentFragment();c.clean(a,b,f,d)}if(e)c.fragments[a[0]]=j?f:1;return{fragment:f,cacheable:e}}function K(a,b){var d={};c.each(va.concat.apply([],va.slice(0,b)),function(){d[this]=a});return d}function wa(a){return"scrollTo"in a&&a.document?a:a.nodeType===9?a.defaultView||a.parentWindow:false}var c=function(a,b){return new c.fn.init(a,b)},Ra=A.jQuery,Sa=A.$,s=A.document,T,Ta=/^[^<]*(<[\w\W]+>)[^>]*$|^#([\w-]+)$/,Ua=/^.[^:#\[\.,]*$/,Va=/\S/,
Wa=/^(\s|\u00A0)+|(\s|\u00A0)+$/g,Xa=/^<(\w+)\s*\/?>(?:<\/\1>)?$/,P=navigator.userAgent,xa=false,Q=[],L,$=Object.prototype.toString,aa=Object.prototype.hasOwnProperty,ba=Array.prototype.push,R=Array.prototype.slice,ya=Array.prototype.indexOf;c.fn=c.prototype={init:function(a,b){var d,f;if(!a)return this;if(a.nodeType){this.context=this[0]=a;this.length=1;return this}if(a==="body"&&!b){this.context=s;this[0]=s.body;this.selector="body";this.length=1;return this}if(typeof a==="string")if((d=Ta.exec(a))&&
(d[1]||!b))if(d[1]){f=b?b.ownerDocument||b:s;if(a=Xa.exec(a))if(c.isPlainObject(b)){a=[s.createElement(a[1])];c.fn.attr.call(a,b,true)}else a=[f.createElement(a[1])];else{a=sa([d[1]],[f]);a=(a.cacheable?a.fragment.cloneNode(true):a.fragment).childNodes}return c.merge(this,a)}else{if(b=s.getElementById(d[2])){if(b.id!==d[2])return T.find(a);this.length=1;this[0]=b}this.context=s;this.selector=a;return this}else if(!b&&/^\w+$/.test(a)){this.selector=a;this.context=s;a=s.getElementsByTagName(a);return c.merge(this,
a)}else return!b||b.jquery?(b||T).find(a):c(b).find(a);else if(c.isFunction(a))return T.ready(a);if(a.selector!==w){this.selector=a.selector;this.context=a.context}return c.makeArray(a,this)},selector:"",jquery:"1.4.2",length:0,size:function(){return this.length},toArray:function(){return R.call(this,0)},get:function(a){return a==null?this.toArray():a<0?this.slice(a)[0]:this[a]},pushStack:function(a,b,d){var f=c();c.isArray(a)?ba.apply(f,a):c.merge(f,a);f.prevObject=this;f.context=this.context;if(b===
"find")f.selector=this.selector+(this.selector?" ":"")+d;else if(b)f.selector=this.selector+"."+b+"("+d+")";return f},each:function(a,b){return c.each(this,a,b)},ready:function(a){c.bindReady();if(c.isReady)a.call(s,c);else Q&&Q.push(a);return this},eq:function(a){return a===-1?this.slice(a):this.slice(a,+a+1)},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},slice:function(){return this.pushStack(R.apply(this,arguments),"slice",R.call(arguments).join(","))},map:function(a){return this.pushStack(c.map(this,
function(b,d){return a.call(b,d,b)}))},end:function(){return this.prevObject||c(null)},push:ba,sort:[].sort,splice:[].splice};c.fn.init.prototype=c.fn;c.extend=c.fn.extend=function(){var a=arguments[0]||{},b=1,d=arguments.length,f=false,e,j,i,o;if(typeof a==="boolean"){f=a;a=arguments[1]||{};b=2}if(typeof a!=="object"&&!c.isFunction(a))a={};if(d===b){a=this;--b}for(;b<d;b++)if((e=arguments[b])!=null)for(j in e){i=a[j];o=e[j];if(a!==o)if(f&&o&&(c.isPlainObject(o)||c.isArray(o))){i=i&&(c.isPlainObject(i)||
c.isArray(i))?i:c.isArray(o)?[]:{};a[j]=c.extend(f,i,o)}else if(o!==w)a[j]=o}return a};c.extend({noConflict:function(a){A.$=Sa;if(a)A.jQuery=Ra;return c},isReady:false,ready:function(){if(!c.isReady){if(!s.body)return setTimeout(c.ready,13);c.isReady=true;if(Q){for(var a,b=0;a=Q[b++];)a.call(s,c);Q=null}c.fn.triggerHandler&&c(s).triggerHandler("ready")}},bindReady:function(){if(!xa){xa=true;if(s.readyState==="complete")return c.ready();if(s.addEventListener){s.addEventListener("DOMContentLoaded",
L,false);A.addEventListener("load",c.ready,false)}else if(s.attachEvent){s.attachEvent("onreadystatechange",L);A.attachEvent("onload",c.ready);var a=false;try{a=A.frameElement==null}catch(b){}s.documentElement.doScroll&&a&&ma()}}},isFunction:function(a){return $.call(a)==="[object Function]"},isArray:function(a){return $.call(a)==="[object Array]"},isPlainObject:function(a){if(!a||$.call(a)!=="[object Object]"||a.nodeType||a.setInterval)return false;if(a.constructor&&!aa.call(a,"constructor")&&!aa.call(a.constructor.prototype,
"isPrototypeOf"))return false;var b;for(b in a);return b===w||aa.call(a,b)},isEmptyObject:function(a){for(var b in a)return false;return true},error:function(a){throw a;},parseJSON:function(a){if(typeof a!=="string"||!a)return null;a=c.trim(a);if(/^[\],:{}\s]*$/.test(a.replace(/\\(?:["\\\/bfnrt]|u[0-9a-fA-F]{4})/g,"@").replace(/"[^"\\\n\r]*"|true|false|null|-?\d+(?:\.\d*)?(?:[eE][+\-]?\d+)?/g,"]").replace(/(?:^|:|,)(?:\s*\[)+/g,"")))return A.JSON&&A.JSON.parse?A.JSON.parse(a):(new Function("return "+
a))();else c.error("Invalid JSON: "+a)},noop:function(){},globalEval:function(a){if(a&&Va.test(a)){var b=s.getElementsByTagName("head")[0]||s.documentElement,d=s.createElement("script");d.type="text/javascript";if(c.support.scriptEval)d.appendChild(s.createTextNode(a));else d.text=a;b.insertBefore(d,b.firstChild);b.removeChild(d)}},nodeName:function(a,b){return a.nodeName&&a.nodeName.toUpperCase()===b.toUpperCase()},each:function(a,b,d){var f,e=0,j=a.length,i=j===w||c.isFunction(a);if(d)if(i)for(f in a){if(b.apply(a[f],
d)===false)break}else for(;e<j;){if(b.apply(a[e++],d)===false)break}else if(i)for(f in a){if(b.call(a[f],f,a[f])===false)break}else for(d=a[0];e<j&&b.call(d,e,d)!==false;d=a[++e]);return a},trim:function(a){return(a||"").replace(Wa,"")},makeArray:function(a,b){b=b||[];if(a!=null)a.length==null||typeof a==="string"||c.isFunction(a)||typeof a!=="function"&&a.setInterval?ba.call(b,a):c.merge(b,a);return b},inArray:function(a,b){if(b.indexOf)return b.indexOf(a);for(var d=0,f=b.length;d<f;d++)if(b[d]===
a)return d;return-1},merge:function(a,b){var d=a.length,f=0;if(typeof b.length==="number")for(var e=b.length;f<e;f++)a[d++]=b[f];else for(;b[f]!==w;)a[d++]=b[f++];a.length=d;return a},grep:function(a,b,d){for(var f=[],e=0,j=a.length;e<j;e++)!d!==!b(a[e],e)&&f.push(a[e]);return f},map:function(a,b,d){for(var f=[],e,j=0,i=a.length;j<i;j++){e=b(a[j],j,d);if(e!=null)f[f.length]=e}return f.concat.apply([],f)},guid:1,proxy:function(a,b,d){if(arguments.length===2)if(typeof b==="string"){d=a;a=d[b];b=w}else if(b&&
!c.isFunction(b)){d=b;b=w}if(!b&&a)b=function(){return a.apply(d||this,arguments)};if(a)b.guid=a.guid=a.guid||b.guid||c.guid++;return b},uaMatch:function(a){a=a.toLowerCase();a=/(webkit)[ \/]([\w.]+)/.exec(a)||/(opera)(?:.*version)?[ \/]([\w.]+)/.exec(a)||/(msie) ([\w.]+)/.exec(a)||!/compatible/.test(a)&&/(mozilla)(?:.*? rv:([\w.]+))?/.exec(a)||[];return{browser:a[1]||"",version:a[2]||"0"}},browser:{}});P=c.uaMatch(P);if(P.browser){c.browser[P.browser]=true;c.browser.version=P.version}if(c.browser.webkit)c.browser.safari=
true;if(ya)c.inArray=function(a,b){return ya.call(b,a)};T=c(s);if(s.addEventListener)L=function(){s.removeEventListener("DOMContentLoaded",L,false);c.ready()};else if(s.attachEvent)L=function(){if(s.readyState==="complete"){s.detachEvent("onreadystatechange",L);c.ready()}};(function(){c.support={};var a=s.documentElement,b=s.createElement("script"),d=s.createElement("div"),f="script"+J();d.style.display="none";d.innerHTML=" <link/><table></table><a href='/a' style='color:red;float:left;opacity:.55;'>a</a><input type='checkbox'/>";
var e=d.getElementsByTagName("*"),j=d.getElementsByTagName("a")[0];if(!(!e||!e.length||!j)){c.support={leadingWhitespace:d.firstChild.nodeType===3,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/red/.test(j.getAttribute("style")),hrefNormalized:j.getAttribute("href")==="/a",opacity:/^0.55$/.test(j.style.opacity),cssFloat:!!j.style.cssFloat,checkOn:d.getElementsByTagName("input")[0].value==="on",optSelected:s.createElement("select").appendChild(s.createElement("option")).selected,
parentNode:d.removeChild(d.appendChild(s.createElement("div"))).parentNode===null,deleteExpando:true,checkClone:false,scriptEval:false,noCloneEvent:true,boxModel:null};b.type="text/javascript";try{b.appendChild(s.createTextNode("window."+f+"=1;"))}catch(i){}a.insertBefore(b,a.firstChild);if(A[f]){c.support.scriptEval=true;delete A[f]}try{delete b.test}catch(o){c.support.deleteExpando=false}a.removeChild(b);if(d.attachEvent&&d.fireEvent){d.attachEvent("onclick",function k(){c.support.noCloneEvent=
false;d.detachEvent("onclick",k)});d.cloneNode(true).fireEvent("onclick")}d=s.createElement("div");d.innerHTML="<input type='radio' name='radiotest' checked='checked'/>";a=s.createDocumentFragment();a.appendChild(d.firstChild);c.support.checkClone=a.cloneNode(true).cloneNode(true).lastChild.checked;c(function(){var k=s.createElement("div");k.style.width=k.style.paddingLeft="1px";s.body.appendChild(k);c.boxModel=c.support.boxModel=k.offsetWidth===2;s.body.removeChild(k).style.display="none"});a=function(k){var n=
s.createElement("div");k="on"+k;var r=k in n;if(!r){n.setAttribute(k,"return;");r=typeof n[k]==="function"}return r};c.support.submitBubbles=a("submit");c.support.changeBubbles=a("change");a=b=d=e=j=null}})();c.props={"for":"htmlFor","class":"className",readonly:"readOnly",maxlength:"maxLength",cellspacing:"cellSpacing",rowspan:"rowSpan",colspan:"colSpan",tabindex:"tabIndex",usemap:"useMap",frameborder:"frameBorder"};var G="jQuery"+J(),Ya=0,za={};c.extend({cache:{},expando:G,noData:{embed:true,object:true,
applet:true},data:function(a,b,d){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var f=a[G],e=c.cache;if(!f&&typeof b==="string"&&d===w)return null;f||(f=++Ya);if(typeof b==="object"){a[G]=f;e[f]=c.extend(true,{},b)}else if(!e[f]){a[G]=f;e[f]={}}a=e[f];if(d!==w)a[b]=d;return typeof b==="string"?a[b]:a}},removeData:function(a,b){if(!(a.nodeName&&c.noData[a.nodeName.toLowerCase()])){a=a==A?za:a;var d=a[G],f=c.cache,e=f[d];if(b){if(e){delete e[b];c.isEmptyObject(e)&&c.removeData(a)}}else{if(c.support.deleteExpando)delete a[c.expando];
else a.removeAttribute&&a.removeAttribute(c.expando);delete f[d]}}}});c.fn.extend({data:function(a,b){if(typeof a==="undefined"&&this.length)return c.data(this[0]);else if(typeof a==="object")return this.each(function(){c.data(this,a)});var d=a.split(".");d[1]=d[1]?"."+d[1]:"";if(b===w){var f=this.triggerHandler("getData"+d[1]+"!",[d[0]]);if(f===w&&this.length)f=c.data(this[0],a);return f===w&&d[1]?this.data(d[0]):f}else return this.trigger("setData"+d[1]+"!",[d[0],b]).each(function(){c.data(this,
a,b)})},removeData:function(a){return this.each(function(){c.removeData(this,a)})}});c.extend({queue:function(a,b,d){if(a){b=(b||"fx")+"queue";var f=c.data(a,b);if(!d)return f||[];if(!f||c.isArray(d))f=c.data(a,b,c.makeArray(d));else f.push(d);return f}},dequeue:function(a,b){b=b||"fx";var d=c.queue(a,b),f=d.shift();if(f==="inprogress")f=d.shift();if(f){b==="fx"&&d.unshift("inprogress");f.call(a,function(){c.dequeue(a,b)})}}});c.fn.extend({queue:function(a,b){if(typeof a!=="string"){b=a;a="fx"}if(b===
w)return c.queue(this[0],a);return this.each(function(){var d=c.queue(this,a,b);a==="fx"&&d[0]!=="inprogress"&&c.dequeue(this,a)})},dequeue:function(a){return this.each(function(){c.dequeue(this,a)})},delay:function(a,b){a=c.fx?c.fx.speeds[a]||a:a;b=b||"fx";return this.queue(b,function(){var d=this;setTimeout(function(){c.dequeue(d,b)},a)})},clearQueue:function(a){return this.queue(a||"fx",[])}});var Aa=/[\n\t]/g,ca=/\s+/,Za=/\r/g,$a=/href|src|style/,ab=/(button|input)/i,bb=/(button|input|object|select|textarea)/i,
cb=/^(a|area)$/i,Ba=/radio|checkbox/;c.fn.extend({attr:function(a,b){return X(this,a,b,true,c.attr)},removeAttr:function(a){return this.each(function(){c.attr(this,a,"");this.nodeType===1&&this.removeAttribute(a)})},addClass:function(a){if(c.isFunction(a))return this.each(function(n){var r=c(this);r.addClass(a.call(this,n,r.attr("class")))});if(a&&typeof a==="string")for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1)if(e.className){for(var j=" "+e.className+" ",
i=e.className,o=0,k=b.length;o<k;o++)if(j.indexOf(" "+b[o]+" ")<0)i+=" "+b[o];e.className=c.trim(i)}else e.className=a}return this},removeClass:function(a){if(c.isFunction(a))return this.each(function(k){var n=c(this);n.removeClass(a.call(this,k,n.attr("class")))});if(a&&typeof a==="string"||a===w)for(var b=(a||"").split(ca),d=0,f=this.length;d<f;d++){var e=this[d];if(e.nodeType===1&&e.className)if(a){for(var j=(" "+e.className+" ").replace(Aa," "),i=0,o=b.length;i<o;i++)j=j.replace(" "+b[i]+" ",
" ");e.className=c.trim(j)}else e.className=""}return this},toggleClass:function(a,b){var d=typeof a,f=typeof b==="boolean";if(c.isFunction(a))return this.each(function(e){var j=c(this);j.toggleClass(a.call(this,e,j.attr("class"),b),b)});return this.each(function(){if(d==="string")for(var e,j=0,i=c(this),o=b,k=a.split(ca);e=k[j++];){o=f?o:!i.hasClass(e);i[o?"addClass":"removeClass"](e)}else if(d==="undefined"||d==="boolean"){this.className&&c.data(this,"__className__",this.className);this.className=
this.className||a===false?"":c.data(this,"__className__")||""}})},hasClass:function(a){a=" "+a+" ";for(var b=0,d=this.length;b<d;b++)if((" "+this[b].className+" ").replace(Aa," ").indexOf(a)>-1)return true;return false},val:function(a){if(a===w){var b=this[0];if(b){if(c.nodeName(b,"option"))return(b.attributes.value||{}).specified?b.value:b.text;if(c.nodeName(b,"select")){var d=b.selectedIndex,f=[],e=b.options;b=b.type==="select-one";if(d<0)return null;var j=b?d:0;for(d=b?d+1:e.length;j<d;j++){var i=
e[j];if(i.selected){a=c(i).val();if(b)return a;f.push(a)}}return f}if(Ba.test(b.type)&&!c.support.checkOn)return b.getAttribute("value")===null?"on":b.value;return(b.value||"").replace(Za,"")}return w}var o=c.isFunction(a);return this.each(function(k){var n=c(this),r=a;if(this.nodeType===1){if(o)r=a.call(this,k,n.val());if(typeof r==="number")r+="";if(c.isArray(r)&&Ba.test(this.type))this.checked=c.inArray(n.val(),r)>=0;else if(c.nodeName(this,"select")){var u=c.makeArray(r);c("option",this).each(function(){this.selected=
c.inArray(c(this).val(),u)>=0});if(!u.length)this.selectedIndex=-1}else this.value=r}})}});c.extend({attrFn:{val:true,css:true,html:true,text:true,data:true,width:true,height:true,offset:true},attr:function(a,b,d,f){if(!a||a.nodeType===3||a.nodeType===8)return w;if(f&&b in c.attrFn)return c(a)[b](d);f=a.nodeType!==1||!c.isXMLDoc(a);var e=d!==w;b=f&&c.props[b]||b;if(a.nodeType===1){var j=$a.test(b);if(b in a&&f&&!j){if(e){b==="type"&&ab.test(a.nodeName)&&a.parentNode&&c.error("type property can't be changed");
a[b]=d}if(c.nodeName(a,"form")&&a.getAttributeNode(b))return a.getAttributeNode(b).nodeValue;if(b==="tabIndex")return(b=a.getAttributeNode("tabIndex"))&&b.specified?b.value:bb.test(a.nodeName)||cb.test(a.nodeName)&&a.href?0:w;return a[b]}if(!c.support.style&&f&&b==="style"){if(e)a.style.cssText=""+d;return a.style.cssText}e&&a.setAttribute(b,""+d);a=!c.support.hrefNormalized&&f&&j?a.getAttribute(b,2):a.getAttribute(b);return a===null?w:a}return c.style(a,b,d)}});var O=/\.(.*)$/,db=function(a){return a.replace(/[^\w\s\.\|`]/g,
function(b){return"\\"+b})};c.event={add:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){if(a.setInterval&&a!==A&&!a.frameElement)a=A;var e,j;if(d.handler){e=d;d=e.handler}if(!d.guid)d.guid=c.guid++;if(j=c.data(a)){var i=j.events=j.events||{},o=j.handle;if(!o)j.handle=o=function(){return typeof c!=="undefined"&&!c.event.triggered?c.event.handle.apply(o.elem,arguments):w};o.elem=a;b=b.split(" ");for(var k,n=0,r;k=b[n++];){j=e?c.extend({},e):{handler:d,data:f};if(k.indexOf(".")>-1){r=k.split(".");
k=r.shift();j.namespace=r.slice(0).sort().join(".")}else{r=[];j.namespace=""}j.type=k;j.guid=d.guid;var u=i[k],z=c.event.special[k]||{};if(!u){u=i[k]=[];if(!z.setup||z.setup.call(a,f,r,o)===false)if(a.addEventListener)a.addEventListener(k,o,false);else a.attachEvent&&a.attachEvent("on"+k,o)}if(z.add){z.add.call(a,j);if(!j.handler.guid)j.handler.guid=d.guid}u.push(j);c.event.global[k]=true}a=null}}},global:{},remove:function(a,b,d,f){if(!(a.nodeType===3||a.nodeType===8)){var e,j=0,i,o,k,n,r,u,z=c.data(a),
C=z&&z.events;if(z&&C){if(b&&b.type){d=b.handler;b=b.type}if(!b||typeof b==="string"&&b.charAt(0)==="."){b=b||"";for(e in C)c.event.remove(a,e+b)}else{for(b=b.split(" ");e=b[j++];){n=e;i=e.indexOf(".")<0;o=[];if(!i){o=e.split(".");e=o.shift();k=new RegExp("(^|\\.)"+c.map(o.slice(0).sort(),db).join("\\.(?:.*\\.)?")+"(\\.|$)")}if(r=C[e])if(d){n=c.event.special[e]||{};for(B=f||0;B<r.length;B++){u=r[B];if(d.guid===u.guid){if(i||k.test(u.namespace)){f==null&&r.splice(B--,1);n.remove&&n.remove.call(a,u)}if(f!=
null)break}}if(r.length===0||f!=null&&r.length===1){if(!n.teardown||n.teardown.call(a,o)===false)Ca(a,e,z.handle);delete C[e]}}else for(var B=0;B<r.length;B++){u=r[B];if(i||k.test(u.namespace)){c.event.remove(a,n,u.handler,B);r.splice(B--,1)}}}if(c.isEmptyObject(C)){if(b=z.handle)b.elem=null;delete z.events;delete z.handle;c.isEmptyObject(z)&&c.removeData(a)}}}}},trigger:function(a,b,d,f){var e=a.type||a;if(!f){a=typeof a==="object"?a[G]?a:c.extend(c.Event(e),a):c.Event(e);if(e.indexOf("!")>=0){a.type=
e=e.slice(0,-1);a.exclusive=true}if(!d){a.stopPropagation();c.event.global[e]&&c.each(c.cache,function(){this.events&&this.events[e]&&c.event.trigger(a,b,this.handle.elem)})}if(!d||d.nodeType===3||d.nodeType===8)return w;a.result=w;a.target=d;b=c.makeArray(b);b.unshift(a)}a.currentTarget=d;(f=c.data(d,"handle"))&&f.apply(d,b);f=d.parentNode||d.ownerDocument;try{if(!(d&&d.nodeName&&c.noData[d.nodeName.toLowerCase()]))if(d["on"+e]&&d["on"+e].apply(d,b)===false)a.result=false}catch(j){}if(!a.isPropagationStopped()&&
f)c.event.trigger(a,b,f,true);else if(!a.isDefaultPrevented()){f=a.target;var i,o=c.nodeName(f,"a")&&e==="click",k=c.event.special[e]||{};if((!k._default||k._default.call(d,a)===false)&&!o&&!(f&&f.nodeName&&c.noData[f.nodeName.toLowerCase()])){try{if(f[e]){if(i=f["on"+e])f["on"+e]=null;c.event.triggered=true;f[e]()}}catch(n){}if(i)f["on"+e]=i;c.event.triggered=false}}},handle:function(a){var b,d,f,e;a=arguments[0]=c.event.fix(a||A.event);a.currentTarget=this;b=a.type.indexOf(".")<0&&!a.exclusive;
if(!b){d=a.type.split(".");a.type=d.shift();f=new RegExp("(^|\\.)"+d.slice(0).sort().join("\\.(?:.*\\.)?")+"(\\.|$)")}e=c.data(this,"events");d=e[a.type];if(e&&d){d=d.slice(0);e=0;for(var j=d.length;e<j;e++){var i=d[e];if(b||f.test(i.namespace)){a.handler=i.handler;a.data=i.data;a.handleObj=i;i=i.handler.apply(this,arguments);if(i!==w){a.result=i;if(i===false){a.preventDefault();a.stopPropagation()}}if(a.isImmediatePropagationStopped())break}}}return a.result},props:"altKey attrChange attrName bubbles button cancelable charCode clientX clientY ctrlKey currentTarget data detail eventPhase fromElement handler keyCode layerX layerY metaKey newValue offsetX offsetY originalTarget pageX pageY prevValue relatedNode relatedTarget screenX screenY shiftKey srcElement target toElement view wheelDelta which".split(" "),
fix:function(a){if(a[G])return a;var b=a;a=c.Event(b);for(var d=this.props.length,f;d;){f=this.props[--d];a[f]=b[f]}if(!a.target)a.target=a.srcElement||s;if(a.target.nodeType===3)a.target=a.target.parentNode;if(!a.relatedTarget&&a.fromElement)a.relatedTarget=a.fromElement===a.target?a.toElement:a.fromElement;if(a.pageX==null&&a.clientX!=null){b=s.documentElement;d=s.body;a.pageX=a.clientX+(b&&b.scrollLeft||d&&d.scrollLeft||0)-(b&&b.clientLeft||d&&d.clientLeft||0);a.pageY=a.clientY+(b&&b.scrollTop||
d&&d.scrollTop||0)-(b&&b.clientTop||d&&d.clientTop||0)}if(!a.which&&(a.charCode||a.charCode===0?a.charCode:a.keyCode))a.which=a.charCode||a.keyCode;if(!a.metaKey&&a.ctrlKey)a.metaKey=a.ctrlKey;if(!a.which&&a.button!==w)a.which=a.button&1?1:a.button&2?3:a.button&4?2:0;return a},guid:1E8,proxy:c.proxy,special:{ready:{setup:c.bindReady,teardown:c.noop},live:{add:function(a){c.event.add(this,a.origType,c.extend({},a,{handler:oa}))},remove:function(a){var b=true,d=a.origType.replace(O,"");c.each(c.data(this,
"events").live||[],function(){if(d===this.origType.replace(O,""))return b=false});b&&c.event.remove(this,a.origType,oa)}},beforeunload:{setup:function(a,b,d){if(this.setInterval)this.onbeforeunload=d;return false},teardown:function(a,b){if(this.onbeforeunload===b)this.onbeforeunload=null}}}};var Ca=s.removeEventListener?function(a,b,d){a.removeEventListener(b,d,false)}:function(a,b,d){a.detachEvent("on"+b,d)};c.Event=function(a){if(!this.preventDefault)return new c.Event(a);if(a&&a.type){this.originalEvent=
a;this.type=a.type}else this.type=a;this.timeStamp=J();this[G]=true};c.Event.prototype={preventDefault:function(){this.isDefaultPrevented=Z;var a=this.originalEvent;if(a){a.preventDefault&&a.preventDefault();a.returnValue=false}},stopPropagation:function(){this.isPropagationStopped=Z;var a=this.originalEvent;if(a){a.stopPropagation&&a.stopPropagation();a.cancelBubble=true}},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=Z;this.stopPropagation()},isDefaultPrevented:Y,isPropagationStopped:Y,
isImmediatePropagationStopped:Y};var Da=function(a){var b=a.relatedTarget;try{for(;b&&b!==this;)b=b.parentNode;if(b!==this){a.type=a.data;c.event.handle.apply(this,arguments)}}catch(d){}},Ea=function(a){a.type=a.data;c.event.handle.apply(this,arguments)};c.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(a,b){c.event.special[a]={setup:function(d){c.event.add(this,b,d&&d.selector?Ea:Da,a)},teardown:function(d){c.event.remove(this,b,d&&d.selector?Ea:Da)}}});if(!c.support.submitBubbles)c.event.special.submit=
{setup:function(){if(this.nodeName.toLowerCase()!=="form"){c.event.add(this,"click.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="submit"||d==="image")&&c(b).closest("form").length)return na("submit",this,arguments)});c.event.add(this,"keypress.specialSubmit",function(a){var b=a.target,d=b.type;if((d==="text"||d==="password")&&c(b).closest("form").length&&a.keyCode===13)return na("submit",this,arguments)})}else return false},teardown:function(){c.event.remove(this,".specialSubmit")}};
if(!c.support.changeBubbles){var da=/textarea|input|select/i,ea,Fa=function(a){var b=a.type,d=a.value;if(b==="radio"||b==="checkbox")d=a.checked;else if(b==="select-multiple")d=a.selectedIndex>-1?c.map(a.options,function(f){return f.selected}).join("-"):"";else if(a.nodeName.toLowerCase()==="select")d=a.selectedIndex;return d},fa=function(a,b){var d=a.target,f,e;if(!(!da.test(d.nodeName)||d.readOnly)){f=c.data(d,"_change_data");e=Fa(d);if(a.type!=="focusout"||d.type!=="radio")c.data(d,"_change_data",
e);if(!(f===w||e===f))if(f!=null||e){a.type="change";return c.event.trigger(a,b,d)}}};c.event.special.change={filters:{focusout:fa,click:function(a){var b=a.target,d=b.type;if(d==="radio"||d==="checkbox"||b.nodeName.toLowerCase()==="select")return fa.call(this,a)},keydown:function(a){var b=a.target,d=b.type;if(a.keyCode===13&&b.nodeName.toLowerCase()!=="textarea"||a.keyCode===32&&(d==="checkbox"||d==="radio")||d==="select-multiple")return fa.call(this,a)},beforeactivate:function(a){a=a.target;c.data(a,
"_change_data",Fa(a))}},setup:function(){if(this.type==="file")return false;for(var a in ea)c.event.add(this,a+".specialChange",ea[a]);return da.test(this.nodeName)},teardown:function(){c.event.remove(this,".specialChange");return da.test(this.nodeName)}};ea=c.event.special.change.filters}s.addEventListener&&c.each({focus:"focusin",blur:"focusout"},function(a,b){function d(f){f=c.event.fix(f);f.type=b;return c.event.handle.call(this,f)}c.event.special[b]={setup:function(){this.addEventListener(a,
d,true)},teardown:function(){this.removeEventListener(a,d,true)}}});c.each(["bind","one"],function(a,b){c.fn[b]=function(d,f,e){if(typeof d==="object"){for(var j in d)this[b](j,f,d[j],e);return this}if(c.isFunction(f)){e=f;f=w}var i=b==="one"?c.proxy(e,function(k){c(this).unbind(k,i);return e.apply(this,arguments)}):e;if(d==="unload"&&b!=="one")this.one(d,f,e);else{j=0;for(var o=this.length;j<o;j++)c.event.add(this[j],d,i,f)}return this}});c.fn.extend({unbind:function(a,b){if(typeof a==="object"&&
!a.preventDefault)for(var d in a)this.unbind(d,a[d]);else{d=0;for(var f=this.length;d<f;d++)c.event.remove(this[d],a,b)}return this},delegate:function(a,b,d,f){return this.live(b,d,f,a)},undelegate:function(a,b,d){return arguments.length===0?this.unbind("live"):this.die(b,null,d,a)},trigger:function(a,b){return this.each(function(){c.event.trigger(a,b,this)})},triggerHandler:function(a,b){if(this[0]){a=c.Event(a);a.preventDefault();a.stopPropagation();c.event.trigger(a,b,this[0]);return a.result}},
toggle:function(a){for(var b=arguments,d=1;d<b.length;)c.proxy(a,b[d++]);return this.click(c.proxy(a,function(f){var e=(c.data(this,"lastToggle"+a.guid)||0)%d;c.data(this,"lastToggle"+a.guid,e+1);f.preventDefault();return b[e].apply(this,arguments)||false}))},hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)}});var Ga={focus:"focusin",blur:"focusout",mouseenter:"mouseover",mouseleave:"mouseout"};c.each(["live","die"],function(a,b){c.fn[b]=function(d,f,e,j){var i,o=0,k,n,r=j||this.selector,
u=j?this:c(this.context);if(c.isFunction(f)){e=f;f=w}for(d=(d||"").split(" ");(i=d[o++])!=null;){j=O.exec(i);k="";if(j){k=j[0];i=i.replace(O,"")}if(i==="hover")d.push("mouseenter"+k,"mouseleave"+k);else{n=i;if(i==="focus"||i==="blur"){d.push(Ga[i]+k);i+=k}else i=(Ga[i]||i)+k;b==="live"?u.each(function(){c.event.add(this,pa(i,r),{data:f,selector:r,handler:e,origType:i,origHandler:e,preType:n})}):u.unbind(pa(i,r),e)}}return this}});c.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error".split(" "),
function(a,b){c.fn[b]=function(d){return d?this.bind(b,d):this.trigger(b)};if(c.attrFn)c.attrFn[b]=true});A.attachEvent&&!A.addEventListener&&A.attachEvent("onunload",function(){for(var a in c.cache)if(c.cache[a].handle)try{c.event.remove(c.cache[a].handle.elem)}catch(b){}});(function(){function a(g){for(var h="",l,m=0;g[m];m++){l=g[m];if(l.nodeType===3||l.nodeType===4)h+=l.nodeValue;else if(l.nodeType!==8)h+=a(l.childNodes)}return h}function b(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];
if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1&&!p){t.sizcache=l;t.sizset=q}if(t.nodeName.toLowerCase()===h){y=t;break}t=t[g]}m[q]=y}}}function d(g,h,l,m,q,p){q=0;for(var v=m.length;q<v;q++){var t=m[q];if(t){t=t[g];for(var y=false;t;){if(t.sizcache===l){y=m[t.sizset];break}if(t.nodeType===1){if(!p){t.sizcache=l;t.sizset=q}if(typeof h!=="string"){if(t===h){y=true;break}}else if(k.filter(h,[t]).length>0){y=t;break}}t=t[g]}m[q]=y}}}var f=/((?:\((?:\([^()]+\)|[^()]+)+\)|\[(?:\[[^[\]]*\]|['"][^'"]*['"]|[^[\]'"]+)+\]|\\.|[^ >+~,(\[\\]+)+|[>+~])(\s*,\s*)?((?:.|\r|\n)*)/g,
e=0,j=Object.prototype.toString,i=false,o=true;[0,0].sort(function(){o=false;return 0});var k=function(g,h,l,m){l=l||[];var q=h=h||s;if(h.nodeType!==1&&h.nodeType!==9)return[];if(!g||typeof g!=="string")return l;for(var p=[],v,t,y,S,H=true,M=x(h),I=g;(f.exec(""),v=f.exec(I))!==null;){I=v[3];p.push(v[1]);if(v[2]){S=v[3];break}}if(p.length>1&&r.exec(g))if(p.length===2&&n.relative[p[0]])t=ga(p[0]+p[1],h);else for(t=n.relative[p[0]]?[h]:k(p.shift(),h);p.length;){g=p.shift();if(n.relative[g])g+=p.shift();
t=ga(g,t)}else{if(!m&&p.length>1&&h.nodeType===9&&!M&&n.match.ID.test(p[0])&&!n.match.ID.test(p[p.length-1])){v=k.find(p.shift(),h,M);h=v.expr?k.filter(v.expr,v.set)[0]:v.set[0]}if(h){v=m?{expr:p.pop(),set:z(m)}:k.find(p.pop(),p.length===1&&(p[0]==="~"||p[0]==="+")&&h.parentNode?h.parentNode:h,M);t=v.expr?k.filter(v.expr,v.set):v.set;if(p.length>0)y=z(t);else H=false;for(;p.length;){var D=p.pop();v=D;if(n.relative[D])v=p.pop();else D="";if(v==null)v=h;n.relative[D](y,v,M)}}else y=[]}y||(y=t);y||k.error(D||
g);if(j.call(y)==="[object Array]")if(H)if(h&&h.nodeType===1)for(g=0;y[g]!=null;g++){if(y[g]&&(y[g]===true||y[g].nodeType===1&&E(h,y[g])))l.push(t[g])}else for(g=0;y[g]!=null;g++)y[g]&&y[g].nodeType===1&&l.push(t[g]);else l.push.apply(l,y);else z(y,l);if(S){k(S,q,l,m);k.uniqueSort(l)}return l};k.uniqueSort=function(g){if(B){i=o;g.sort(B);if(i)for(var h=1;h<g.length;h++)g[h]===g[h-1]&&g.splice(h--,1)}return g};k.matches=function(g,h){return k(g,null,null,h)};k.find=function(g,h,l){var m,q;if(!g)return[];
for(var p=0,v=n.order.length;p<v;p++){var t=n.order[p];if(q=n.leftMatch[t].exec(g)){var y=q[1];q.splice(1,1);if(y.substr(y.length-1)!=="\\"){q[1]=(q[1]||"").replace(/\\/g,"");m=n.find[t](q,h,l);if(m!=null){g=g.replace(n.match[t],"");break}}}}m||(m=h.getElementsByTagName("*"));return{set:m,expr:g}};k.filter=function(g,h,l,m){for(var q=g,p=[],v=h,t,y,S=h&&h[0]&&x(h[0]);g&&h.length;){for(var H in n.filter)if((t=n.leftMatch[H].exec(g))!=null&&t[2]){var M=n.filter[H],I,D;D=t[1];y=false;t.splice(1,1);if(D.substr(D.length-
1)!=="\\"){if(v===p)p=[];if(n.preFilter[H])if(t=n.preFilter[H](t,v,l,p,m,S)){if(t===true)continue}else y=I=true;if(t)for(var U=0;(D=v[U])!=null;U++)if(D){I=M(D,t,U,v);var Ha=m^!!I;if(l&&I!=null)if(Ha)y=true;else v[U]=false;else if(Ha){p.push(D);y=true}}if(I!==w){l||(v=p);g=g.replace(n.match[H],"");if(!y)return[];break}}}if(g===q)if(y==null)k.error(g);else break;q=g}return v};k.error=function(g){throw"Syntax error, unrecognized expression: "+g;};var n=k.selectors={order:["ID","NAME","TAG"],match:{ID:/#((?:[\w\u00c0-\uFFFF-]|\\.)+)/,
CLASS:/\.((?:[\w\u00c0-\uFFFF-]|\\.)+)/,NAME:/\[name=['"]*((?:[\w\u00c0-\uFFFF-]|\\.)+)['"]*\]/,ATTR:/\[\s*((?:[\w\u00c0-\uFFFF-]|\\.)+)\s*(?:(\S?=)\s*(['"]*)(.*?)\3|)\s*\]/,TAG:/^((?:[\w\u00c0-\uFFFF\*-]|\\.)+)/,CHILD:/:(only|nth|last|first)-child(?:\((even|odd|[\dn+-]*)\))?/,POS:/:(nth|eq|gt|lt|first|last|even|odd)(?:\((\d*)\))?(?=[^-]|$)/,PSEUDO:/:((?:[\w\u00c0-\uFFFF-]|\\.)+)(?:\((['"]?)((?:\([^\)]+\)|[^\(\)]*)+)\2\))?/},leftMatch:{},attrMap:{"class":"className","for":"htmlFor"},attrHandle:{href:function(g){return g.getAttribute("href")}},
relative:{"+":function(g,h){var l=typeof h==="string",m=l&&!/\W/.test(h);l=l&&!m;if(m)h=h.toLowerCase();m=0;for(var q=g.length,p;m<q;m++)if(p=g[m]){for(;(p=p.previousSibling)&&p.nodeType!==1;);g[m]=l||p&&p.nodeName.toLowerCase()===h?p||false:p===h}l&&k.filter(h,g,true)},">":function(g,h){var l=typeof h==="string";if(l&&!/\W/.test(h)){h=h.toLowerCase();for(var m=0,q=g.length;m<q;m++){var p=g[m];if(p){l=p.parentNode;g[m]=l.nodeName.toLowerCase()===h?l:false}}}else{m=0;for(q=g.length;m<q;m++)if(p=g[m])g[m]=
l?p.parentNode:p.parentNode===h;l&&k.filter(h,g,true)}},"":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("parentNode",h,m,g,p,l)},"~":function(g,h,l){var m=e++,q=d;if(typeof h==="string"&&!/\W/.test(h)){var p=h=h.toLowerCase();q=b}q("previousSibling",h,m,g,p,l)}},find:{ID:function(g,h,l){if(typeof h.getElementById!=="undefined"&&!l)return(g=h.getElementById(g[1]))?[g]:[]},NAME:function(g,h){if(typeof h.getElementsByName!=="undefined"){var l=[];
h=h.getElementsByName(g[1]);for(var m=0,q=h.length;m<q;m++)h[m].getAttribute("name")===g[1]&&l.push(h[m]);return l.length===0?null:l}},TAG:function(g,h){return h.getElementsByTagName(g[1])}},preFilter:{CLASS:function(g,h,l,m,q,p){g=" "+g[1].replace(/\\/g,"")+" ";if(p)return g;p=0;for(var v;(v=h[p])!=null;p++)if(v)if(q^(v.className&&(" "+v.className+" ").replace(/[\t\n]/g," ").indexOf(g)>=0))l||m.push(v);else if(l)h[p]=false;return false},ID:function(g){return g[1].replace(/\\/g,"")},TAG:function(g){return g[1].toLowerCase()},
CHILD:function(g){if(g[1]==="nth"){var h=/(-?)(\d*)n((?:\+|-)?\d*)/.exec(g[2]==="even"&&"2n"||g[2]==="odd"&&"2n+1"||!/\D/.test(g[2])&&"0n+"+g[2]||g[2]);g[2]=h[1]+(h[2]||1)-0;g[3]=h[3]-0}g[0]=e++;return g},ATTR:function(g,h,l,m,q,p){h=g[1].replace(/\\/g,"");if(!p&&n.attrMap[h])g[1]=n.attrMap[h];if(g[2]==="~=")g[4]=" "+g[4]+" ";return g},PSEUDO:function(g,h,l,m,q){if(g[1]==="not")if((f.exec(g[3])||"").length>1||/^\w/.test(g[3]))g[3]=k(g[3],null,null,h);else{g=k.filter(g[3],h,l,true^q);l||m.push.apply(m,
g);return false}else if(n.match.POS.test(g[0])||n.match.CHILD.test(g[0]))return true;return g},POS:function(g){g.unshift(true);return g}},filters:{enabled:function(g){return g.disabled===false&&g.type!=="hidden"},disabled:function(g){return g.disabled===true},checked:function(g){return g.checked===true},selected:function(g){return g.selected===true},parent:function(g){return!!g.firstChild},empty:function(g){return!g.firstChild},has:function(g,h,l){return!!k(l[3],g).length},header:function(g){return/h\d/i.test(g.nodeName)},
text:function(g){return"text"===g.type},radio:function(g){return"radio"===g.type},checkbox:function(g){return"checkbox"===g.type},file:function(g){return"file"===g.type},password:function(g){return"password"===g.type},submit:function(g){return"submit"===g.type},image:function(g){return"image"===g.type},reset:function(g){return"reset"===g.type},button:function(g){return"button"===g.type||g.nodeName.toLowerCase()==="button"},input:function(g){return/input|select|textarea|button/i.test(g.nodeName)}},
setFilters:{first:function(g,h){return h===0},last:function(g,h,l,m){return h===m.length-1},even:function(g,h){return h%2===0},odd:function(g,h){return h%2===1},lt:function(g,h,l){return h<l[3]-0},gt:function(g,h,l){return h>l[3]-0},nth:function(g,h,l){return l[3]-0===h},eq:function(g,h,l){return l[3]-0===h}},filter:{PSEUDO:function(g,h,l,m){var q=h[1],p=n.filters[q];if(p)return p(g,l,h,m);else if(q==="contains")return(g.textContent||g.innerText||a([g])||"").indexOf(h[3])>=0;else if(q==="not"){h=
h[3];l=0;for(m=h.length;l<m;l++)if(h[l]===g)return false;return true}else k.error("Syntax error, unrecognized expression: "+q)},CHILD:function(g,h){var l=h[1],m=g;switch(l){case "only":case "first":for(;m=m.previousSibling;)if(m.nodeType===1)return false;if(l==="first")return true;m=g;case "last":for(;m=m.nextSibling;)if(m.nodeType===1)return false;return true;case "nth":l=h[2];var q=h[3];if(l===1&&q===0)return true;h=h[0];var p=g.parentNode;if(p&&(p.sizcache!==h||!g.nodeIndex)){var v=0;for(m=p.firstChild;m;m=
m.nextSibling)if(m.nodeType===1)m.nodeIndex=++v;p.sizcache=h}g=g.nodeIndex-q;return l===0?g===0:g%l===0&&g/l>=0}},ID:function(g,h){return g.nodeType===1&&g.getAttribute("id")===h},TAG:function(g,h){return h==="*"&&g.nodeType===1||g.nodeName.toLowerCase()===h},CLASS:function(g,h){return(" "+(g.className||g.getAttribute("class"))+" ").indexOf(h)>-1},ATTR:function(g,h){var l=h[1];g=n.attrHandle[l]?n.attrHandle[l](g):g[l]!=null?g[l]:g.getAttribute(l);l=g+"";var m=h[2];h=h[4];return g==null?m==="!=":m===
"="?l===h:m==="*="?l.indexOf(h)>=0:m==="~="?(" "+l+" ").indexOf(h)>=0:!h?l&&g!==false:m==="!="?l!==h:m==="^="?l.indexOf(h)===0:m==="$="?l.substr(l.length-h.length)===h:m==="|="?l===h||l.substr(0,h.length+1)===h+"-":false},POS:function(g,h,l,m){var q=n.setFilters[h[2]];if(q)return q(g,l,h,m)}}},r=n.match.POS;for(var u in n.match){n.match[u]=new RegExp(n.match[u].source+/(?![^\[]*\])(?![^\(]*\))/.source);n.leftMatch[u]=new RegExp(/(^(?:.|\r|\n)*?)/.source+n.match[u].source.replace(/\\(\d+)/g,function(g,
h){return"\\"+(h-0+1)}))}var z=function(g,h){g=Array.prototype.slice.call(g,0);if(h){h.push.apply(h,g);return h}return g};try{Array.prototype.slice.call(s.documentElement.childNodes,0)}catch(C){z=function(g,h){h=h||[];if(j.call(g)==="[object Array]")Array.prototype.push.apply(h,g);else if(typeof g.length==="number")for(var l=0,m=g.length;l<m;l++)h.push(g[l]);else for(l=0;g[l];l++)h.push(g[l]);return h}}var B;if(s.documentElement.compareDocumentPosition)B=function(g,h){if(!g.compareDocumentPosition||
!h.compareDocumentPosition){if(g==h)i=true;return g.compareDocumentPosition?-1:1}g=g.compareDocumentPosition(h)&4?-1:g===h?0:1;if(g===0)i=true;return g};else if("sourceIndex"in s.documentElement)B=function(g,h){if(!g.sourceIndex||!h.sourceIndex){if(g==h)i=true;return g.sourceIndex?-1:1}g=g.sourceIndex-h.sourceIndex;if(g===0)i=true;return g};else if(s.createRange)B=function(g,h){if(!g.ownerDocument||!h.ownerDocument){if(g==h)i=true;return g.ownerDocument?-1:1}var l=g.ownerDocument.createRange(),m=
h.ownerDocument.createRange();l.setStart(g,0);l.setEnd(g,0);m.setStart(h,0);m.setEnd(h,0);g=l.compareBoundaryPoints(Range.START_TO_END,m);if(g===0)i=true;return g};(function(){var g=s.createElement("div"),h="script"+(new Date).getTime();g.innerHTML="<a name='"+h+"'/>";var l=s.documentElement;l.insertBefore(g,l.firstChild);if(s.getElementById(h)){n.find.ID=function(m,q,p){if(typeof q.getElementById!=="undefined"&&!p)return(q=q.getElementById(m[1]))?q.id===m[1]||typeof q.getAttributeNode!=="undefined"&&
q.getAttributeNode("id").nodeValue===m[1]?[q]:w:[]};n.filter.ID=function(m,q){var p=typeof m.getAttributeNode!=="undefined"&&m.getAttributeNode("id");return m.nodeType===1&&p&&p.nodeValue===q}}l.removeChild(g);l=g=null})();(function(){var g=s.createElement("div");g.appendChild(s.createComment(""));if(g.getElementsByTagName("*").length>0)n.find.TAG=function(h,l){l=l.getElementsByTagName(h[1]);if(h[1]==="*"){h=[];for(var m=0;l[m];m++)l[m].nodeType===1&&h.push(l[m]);l=h}return l};g.innerHTML="<a href='#'></a>";
if(g.firstChild&&typeof g.firstChild.getAttribute!=="undefined"&&g.firstChild.getAttribute("href")!=="#")n.attrHandle.href=function(h){return h.getAttribute("href",2)};g=null})();s.querySelectorAll&&function(){var g=k,h=s.createElement("div");h.innerHTML="<p class='TEST'></p>";if(!(h.querySelectorAll&&h.querySelectorAll(".TEST").length===0)){k=function(m,q,p,v){q=q||s;if(!v&&q.nodeType===9&&!x(q))try{return z(q.querySelectorAll(m),p)}catch(t){}return g(m,q,p,v)};for(var l in g)k[l]=g[l];h=null}}();
(function(){var g=s.createElement("div");g.innerHTML="<div class='test e'></div><div class='test'></div>";if(!(!g.getElementsByClassName||g.getElementsByClassName("e").length===0)){g.lastChild.className="e";if(g.getElementsByClassName("e").length!==1){n.order.splice(1,0,"CLASS");n.find.CLASS=function(h,l,m){if(typeof l.getElementsByClassName!=="undefined"&&!m)return l.getElementsByClassName(h[1])};g=null}}})();var E=s.compareDocumentPosition?function(g,h){return!!(g.compareDocumentPosition(h)&16)}:
function(g,h){return g!==h&&(g.contains?g.contains(h):true)},x=function(g){return(g=(g?g.ownerDocument||g:0).documentElement)?g.nodeName!=="HTML":false},ga=function(g,h){var l=[],m="",q;for(h=h.nodeType?[h]:h;q=n.match.PSEUDO.exec(g);){m+=q[0];g=g.replace(n.match.PSEUDO,"")}g=n.relative[g]?g+"*":g;q=0;for(var p=h.length;q<p;q++)k(g,h[q],l);return k.filter(m,l)};c.find=k;c.expr=k.selectors;c.expr[":"]=c.expr.filters;c.unique=k.uniqueSort;c.text=a;c.isXMLDoc=x;c.contains=E})();var eb=/Until$/,fb=/^(?:parents|prevUntil|prevAll)/,
gb=/,/;R=Array.prototype.slice;var Ia=function(a,b,d){if(c.isFunction(b))return c.grep(a,function(e,j){return!!b.call(e,j,e)===d});else if(b.nodeType)return c.grep(a,function(e){return e===b===d});else if(typeof b==="string"){var f=c.grep(a,function(e){return e.nodeType===1});if(Ua.test(b))return c.filter(b,f,!d);else b=c.filter(b,f)}return c.grep(a,function(e){return c.inArray(e,b)>=0===d})};c.fn.extend({find:function(a){for(var b=this.pushStack("","find",a),d=0,f=0,e=this.length;f<e;f++){d=b.length;
c.find(a,this[f],b);if(f>0)for(var j=d;j<b.length;j++)for(var i=0;i<d;i++)if(b[i]===b[j]){b.splice(j--,1);break}}return b},has:function(a){var b=c(a);return this.filter(function(){for(var d=0,f=b.length;d<f;d++)if(c.contains(this,b[d]))return true})},not:function(a){return this.pushStack(Ia(this,a,false),"not",a)},filter:function(a){return this.pushStack(Ia(this,a,true),"filter",a)},is:function(a){return!!a&&c.filter(a,this).length>0},closest:function(a,b){if(c.isArray(a)){var d=[],f=this[0],e,j=
{},i;if(f&&a.length){e=0;for(var o=a.length;e<o;e++){i=a[e];j[i]||(j[i]=c.expr.match.POS.test(i)?c(i,b||this.context):i)}for(;f&&f.ownerDocument&&f!==b;){for(i in j){e=j[i];if(e.jquery?e.index(f)>-1:c(f).is(e)){d.push({selector:i,elem:f});delete j[i]}}f=f.parentNode}}return d}var k=c.expr.match.POS.test(a)?c(a,b||this.context):null;return this.map(function(n,r){for(;r&&r.ownerDocument&&r!==b;){if(k?k.index(r)>-1:c(r).is(a))return r;r=r.parentNode}return null})},index:function(a){if(!a||typeof a===
"string")return c.inArray(this[0],a?c(a):this.parent().children());return c.inArray(a.jquery?a[0]:a,this)},add:function(a,b){a=typeof a==="string"?c(a,b||this.context):c.makeArray(a);b=c.merge(this.get(),a);return this.pushStack(qa(a[0])||qa(b[0])?b:c.unique(b))},andSelf:function(){return this.add(this.prevObject)}});c.each({parent:function(a){return(a=a.parentNode)&&a.nodeType!==11?a:null},parents:function(a){return c.dir(a,"parentNode")},parentsUntil:function(a,b,d){return c.dir(a,"parentNode",
d)},next:function(a){return c.nth(a,2,"nextSibling")},prev:function(a){return c.nth(a,2,"previousSibling")},nextAll:function(a){return c.dir(a,"nextSibling")},prevAll:function(a){return c.dir(a,"previousSibling")},nextUntil:function(a,b,d){return c.dir(a,"nextSibling",d)},prevUntil:function(a,b,d){return c.dir(a,"previousSibling",d)},siblings:function(a){return c.sibling(a.parentNode.firstChild,a)},children:function(a){return c.sibling(a.firstChild)},contents:function(a){return c.nodeName(a,"iframe")?
a.contentDocument||a.contentWindow.document:c.makeArray(a.childNodes)}},function(a,b){c.fn[a]=function(d,f){var e=c.map(this,b,d);eb.test(a)||(f=d);if(f&&typeof f==="string")e=c.filter(f,e);e=this.length>1?c.unique(e):e;if((this.length>1||gb.test(f))&&fb.test(a))e=e.reverse();return this.pushStack(e,a,R.call(arguments).join(","))}});c.extend({filter:function(a,b,d){if(d)a=":not("+a+")";return c.find.matches(a,b)},dir:function(a,b,d){var f=[];for(a=a[b];a&&a.nodeType!==9&&(d===w||a.nodeType!==1||!c(a).is(d));){a.nodeType===
1&&f.push(a);a=a[b]}return f},nth:function(a,b,d){b=b||1;for(var f=0;a;a=a[d])if(a.nodeType===1&&++f===b)break;return a},sibling:function(a,b){for(var d=[];a;a=a.nextSibling)a.nodeType===1&&a!==b&&d.push(a);return d}});var Ja=/ jQuery\d+="(?:\d+|null)"/g,V=/^\s+/,Ka=/(<([\w:]+)[^>]*?)\/>/g,hb=/^(?:area|br|col|embed|hr|img|input|link|meta|param)$/i,La=/<([\w:]+)/,ib=/<tbody/i,jb=/<|&#?\w+;/,ta=/<script|<object|<embed|<option|<style/i,ua=/checked\s*(?:[^=]|=\s*.checked.)/i,Ma=function(a,b,d){return hb.test(d)?
a:b+"></"+d+">"},F={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],area:[1,"<map>","</map>"],_default:[0,"",""]};F.optgroup=F.option;F.tbody=F.tfoot=F.colgroup=F.caption=F.thead;F.th=F.td;if(!c.support.htmlSerialize)F._default=[1,"div<div>","</div>"];c.fn.extend({text:function(a){if(c.isFunction(a))return this.each(function(b){var d=
c(this);d.text(a.call(this,b,d.text()))});if(typeof a!=="object"&&a!==w)return this.empty().append((this[0]&&this[0].ownerDocument||s).createTextNode(a));return c.text(this)},wrapAll:function(a){if(c.isFunction(a))return this.each(function(d){c(this).wrapAll(a.call(this,d))});if(this[0]){var b=c(a,this[0].ownerDocument).eq(0).clone(true);this[0].parentNode&&b.insertBefore(this[0]);b.map(function(){for(var d=this;d.firstChild&&d.firstChild.nodeType===1;)d=d.firstChild;return d}).append(this)}return this},
wrapInner:function(a){if(c.isFunction(a))return this.each(function(b){c(this).wrapInner(a.call(this,b))});return this.each(function(){var b=c(this),d=b.contents();d.length?d.wrapAll(a):b.append(a)})},wrap:function(a){return this.each(function(){c(this).wrapAll(a)})},unwrap:function(){return this.parent().each(function(){c.nodeName(this,"body")||c(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.appendChild(a)})},
prepend:function(){return this.domManip(arguments,true,function(a){this.nodeType===1&&this.insertBefore(a,this.firstChild)})},before:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,this)});else if(arguments.length){var a=c(arguments[0]);a.push.apply(a,this.toArray());return this.pushStack(a,"before",arguments)}},after:function(){if(this[0]&&this[0].parentNode)return this.domManip(arguments,false,function(b){this.parentNode.insertBefore(b,
this.nextSibling)});else if(arguments.length){var a=this.pushStack(this,"after",arguments);a.push.apply(a,c(arguments[0]).toArray());return a}},remove:function(a,b){for(var d=0,f;(f=this[d])!=null;d++)if(!a||c.filter(a,[f]).length){if(!b&&f.nodeType===1){c.cleanData(f.getElementsByTagName("*"));c.cleanData([f])}f.parentNode&&f.parentNode.removeChild(f)}return this},empty:function(){for(var a=0,b;(b=this[a])!=null;a++)for(b.nodeType===1&&c.cleanData(b.getElementsByTagName("*"));b.firstChild;)b.removeChild(b.firstChild);
return this},clone:function(a){var b=this.map(function(){if(!c.support.noCloneEvent&&!c.isXMLDoc(this)){var d=this.outerHTML,f=this.ownerDocument;if(!d){d=f.createElement("div");d.appendChild(this.cloneNode(true));d=d.innerHTML}return c.clean([d.replace(Ja,"").replace(/=([^="'>\s]+\/)>/g,'="$1">').replace(V,"")],f)[0]}else return this.cloneNode(true)});if(a===true){ra(this,b);ra(this.find("*"),b.find("*"))}return b},html:function(a){if(a===w)return this[0]&&this[0].nodeType===1?this[0].innerHTML.replace(Ja,
""):null;else if(typeof a==="string"&&!ta.test(a)&&(c.support.leadingWhitespace||!V.test(a))&&!F[(La.exec(a)||["",""])[1].toLowerCase()]){a=a.replace(Ka,Ma);try{for(var b=0,d=this.length;b<d;b++)if(this[b].nodeType===1){c.cleanData(this[b].getElementsByTagName("*"));this[b].innerHTML=a}}catch(f){this.empty().append(a)}}else c.isFunction(a)?this.each(function(e){var j=c(this),i=j.html();j.empty().append(function(){return a.call(this,e,i)})}):this.empty().append(a);return this},replaceWith:function(a){if(this[0]&&
this[0].parentNode){if(c.isFunction(a))return this.each(function(b){var d=c(this),f=d.html();d.replaceWith(a.call(this,b,f))});if(typeof a!=="string")a=c(a).detach();return this.each(function(){var b=this.nextSibling,d=this.parentNode;c(this).remove();b?c(b).before(a):c(d).append(a)})}else return this.pushStack(c(c.isFunction(a)?a():a),"replaceWith",a)},detach:function(a){return this.remove(a,true)},domManip:function(a,b,d){function f(u){return c.nodeName(u,"table")?u.getElementsByTagName("tbody")[0]||
u.appendChild(u.ownerDocument.createElement("tbody")):u}var e,j,i=a[0],o=[],k;if(!c.support.checkClone&&arguments.length===3&&typeof i==="string"&&ua.test(i))return this.each(function(){c(this).domManip(a,b,d,true)});if(c.isFunction(i))return this.each(function(u){var z=c(this);a[0]=i.call(this,u,b?z.html():w);z.domManip(a,b,d)});if(this[0]){e=i&&i.parentNode;e=c.support.parentNode&&e&&e.nodeType===11&&e.childNodes.length===this.length?{fragment:e}:sa(a,this,o);k=e.fragment;if(j=k.childNodes.length===
1?(k=k.firstChild):k.firstChild){b=b&&c.nodeName(j,"tr");for(var n=0,r=this.length;n<r;n++)d.call(b?f(this[n],j):this[n],n>0||e.cacheable||this.length>1?k.cloneNode(true):k)}o.length&&c.each(o,Qa)}return this}});c.fragments={};c.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){c.fn[a]=function(d){var f=[];d=c(d);var e=this.length===1&&this[0].parentNode;if(e&&e.nodeType===11&&e.childNodes.length===1&&d.length===1){d[b](this[0]);
return this}else{e=0;for(var j=d.length;e<j;e++){var i=(e>0?this.clone(true):this).get();c.fn[b].apply(c(d[e]),i);f=f.concat(i)}return this.pushStack(f,a,d.selector)}}});c.extend({clean:function(a,b,d,f){b=b||s;if(typeof b.createElement==="undefined")b=b.ownerDocument||b[0]&&b[0].ownerDocument||s;for(var e=[],j=0,i;(i=a[j])!=null;j++){if(typeof i==="number")i+="";if(i){if(typeof i==="string"&&!jb.test(i))i=b.createTextNode(i);else if(typeof i==="string"){i=i.replace(Ka,Ma);var o=(La.exec(i)||["",
""])[1].toLowerCase(),k=F[o]||F._default,n=k[0],r=b.createElement("div");for(r.innerHTML=k[1]+i+k[2];n--;)r=r.lastChild;if(!c.support.tbody){n=ib.test(i);o=o==="table"&&!n?r.firstChild&&r.firstChild.childNodes:k[1]==="<table>"&&!n?r.childNodes:[];for(k=o.length-1;k>=0;--k)c.nodeName(o[k],"tbody")&&!o[k].childNodes.length&&o[k].parentNode.removeChild(o[k])}!c.support.leadingWhitespace&&V.test(i)&&r.insertBefore(b.createTextNode(V.exec(i)[0]),r.firstChild);i=r.childNodes}if(i.nodeType)e.push(i);else e=
c.merge(e,i)}}if(d)for(j=0;e[j];j++)if(f&&c.nodeName(e[j],"script")&&(!e[j].type||e[j].type.toLowerCase()==="text/javascript"))f.push(e[j].parentNode?e[j].parentNode.removeChild(e[j]):e[j]);else{e[j].nodeType===1&&e.splice.apply(e,[j+1,0].concat(c.makeArray(e[j].getElementsByTagName("script"))));d.appendChild(e[j])}return e},cleanData:function(a){for(var b,d,f=c.cache,e=c.event.special,j=c.support.deleteExpando,i=0,o;(o=a[i])!=null;i++)if(d=o[c.expando]){b=f[d];if(b.events)for(var k in b.events)e[k]?
c.event.remove(o,k):Ca(o,k,b.handle);if(j)delete o[c.expando];else o.removeAttribute&&o.removeAttribute(c.expando);delete f[d]}}});var kb=/z-?index|font-?weight|opacity|zoom|line-?height/i,Na=/alpha\([^)]*\)/,Oa=/opacity=([^)]*)/,ha=/float/i,ia=/-([a-z])/ig,lb=/([A-Z])/g,mb=/^-?\d+(?:px)?$/i,nb=/^-?\d/,ob={position:"absolute",visibility:"hidden",display:"block"},pb=["Left","Right"],qb=["Top","Bottom"],rb=s.defaultView&&s.defaultView.getComputedStyle,Pa=c.support.cssFloat?"cssFloat":"styleFloat",ja=
function(a,b){return b.toUpperCase()};c.fn.css=function(a,b){return X(this,a,b,true,function(d,f,e){if(e===w)return c.curCSS(d,f);if(typeof e==="number"&&!kb.test(f))e+="px";c.style(d,f,e)})};c.extend({style:function(a,b,d){if(!a||a.nodeType===3||a.nodeType===8)return w;if((b==="width"||b==="height")&&parseFloat(d)<0)d=w;var f=a.style||a,e=d!==w;if(!c.support.opacity&&b==="opacity"){if(e){f.zoom=1;b=parseInt(d,10)+""==="NaN"?"":"alpha(opacity="+d*100+")";a=f.filter||c.curCSS(a,"filter")||"";f.filter=
Na.test(a)?a.replace(Na,b):b}return f.filter&&f.filter.indexOf("opacity=")>=0?parseFloat(Oa.exec(f.filter)[1])/100+"":""}if(ha.test(b))b=Pa;b=b.replace(ia,ja);if(e)f[b]=d;return f[b]},css:function(a,b,d,f){if(b==="width"||b==="height"){var e,j=b==="width"?pb:qb;function i(){e=b==="width"?a.offsetWidth:a.offsetHeight;f!=="border"&&c.each(j,function(){f||(e-=parseFloat(c.curCSS(a,"padding"+this,true))||0);if(f==="margin")e+=parseFloat(c.curCSS(a,"margin"+this,true))||0;else e-=parseFloat(c.curCSS(a,
"border"+this+"Width",true))||0})}a.offsetWidth!==0?i():c.swap(a,ob,i);return Math.max(0,Math.round(e))}return c.curCSS(a,b,d)},curCSS:function(a,b,d){var f,e=a.style;if(!c.support.opacity&&b==="opacity"&&a.currentStyle){f=Oa.test(a.currentStyle.filter||"")?parseFloat(RegExp.$1)/100+"":"";return f===""?"1":f}if(ha.test(b))b=Pa;if(!d&&e&&e[b])f=e[b];else if(rb){if(ha.test(b))b="float";b=b.replace(lb,"-$1").toLowerCase();e=a.ownerDocument.defaultView;if(!e)return null;if(a=e.getComputedStyle(a,null))f=
a.getPropertyValue(b);if(b==="opacity"&&f==="")f="1"}else if(a.currentStyle){d=b.replace(ia,ja);f=a.currentStyle[b]||a.currentStyle[d];if(!mb.test(f)&&nb.test(f)){b=e.left;var j=a.runtimeStyle.left;a.runtimeStyle.left=a.currentStyle.left;e.left=d==="fontSize"?"1em":f||0;f=e.pixelLeft+"px";e.left=b;a.runtimeStyle.left=j}}return f},swap:function(a,b,d){var f={};for(var e in b){f[e]=a.style[e];a.style[e]=b[e]}d.call(a);for(e in b)a.style[e]=f[e]}});if(c.expr&&c.expr.filters){c.expr.filters.hidden=function(a){var b=
a.offsetWidth,d=a.offsetHeight,f=a.nodeName.toLowerCase()==="tr";return b===0&&d===0&&!f?true:b>0&&d>0&&!f?false:c.curCSS(a,"display")==="none"};c.expr.filters.visible=function(a){return!c.expr.filters.hidden(a)}}var sb=J(),tb=/<script(.|\s)*?\/script>/gi,ub=/select|textarea/i,vb=/color|date|datetime|email|hidden|month|number|password|range|search|tel|text|time|url|week/i,N=/=\?(&|$)/,ka=/\?/,wb=/(\?|&)_=.*?(&|$)/,xb=/^(\w+:)?\/\/([^\/?#]+)/,yb=/%20/g,zb=c.fn.load;c.fn.extend({load:function(a,b,d){if(typeof a!==
"string")return zb.call(this,a);else if(!this.length)return this;var f=a.indexOf(" ");if(f>=0){var e=a.slice(f,a.length);a=a.slice(0,f)}f="GET";if(b)if(c.isFunction(b)){d=b;b=null}else if(typeof b==="object"){b=c.param(b,c.ajaxSettings.traditional);f="POST"}var j=this;c.ajax({url:a,type:f,dataType:"html",data:b,complete:function(i,o){if(o==="success"||o==="notmodified")j.html(e?c("<div />").append(i.responseText.replace(tb,"")).find(e):i.responseText);d&&j.each(d,[i.responseText,o,i])}});return this},
serialize:function(){return c.param(this.serializeArray())},serializeArray:function(){return this.map(function(){return this.elements?c.makeArray(this.elements):this}).filter(function(){return this.name&&!this.disabled&&(this.checked||ub.test(this.nodeName)||vb.test(this.type))}).map(function(a,b){a=c(this).val();return a==null?null:c.isArray(a)?c.map(a,function(d){return{name:b.name,value:d}}):{name:b.name,value:a}}).get()}});c.each("ajaxStart ajaxStop ajaxComplete ajaxError ajaxSuccess ajaxSend".split(" "),
function(a,b){c.fn[b]=function(d){return this.bind(b,d)}});c.extend({get:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b=null}return c.ajax({type:"GET",url:a,data:b,success:d,dataType:f})},getScript:function(a,b){return c.get(a,null,b,"script")},getJSON:function(a,b,d){return c.get(a,b,d,"json")},post:function(a,b,d,f){if(c.isFunction(b)){f=f||d;d=b;b={}}return c.ajax({type:"POST",url:a,data:b,success:d,dataType:f})},ajaxSetup:function(a){c.extend(c.ajaxSettings,a)},ajaxSettings:{url:location.href,
global:true,type:"GET",contentType:"application/x-www-form-urlencoded",processData:true,async:true,xhr:A.XMLHttpRequest&&(A.location.protocol!=="file:"||!A.ActiveXObject)?function(){return new A.XMLHttpRequest}:function(){try{return new A.ActiveXObject("Microsoft.XMLHTTP")}catch(a){}},accepts:{xml:"application/xml, text/xml",html:"text/html",script:"text/javascript, application/javascript",json:"application/json, text/javascript",text:"text/plain",_default:"*/*"}},lastModified:{},etag:{},ajax:function(a){function b(){e.success&&
e.success.call(k,o,i,x);e.global&&f("ajaxSuccess",[x,e])}function d(){e.complete&&e.complete.call(k,x,i);e.global&&f("ajaxComplete",[x,e]);e.global&&!--c.active&&c.event.trigger("ajaxStop")}function f(q,p){(e.context?c(e.context):c.event).trigger(q,p)}var e=c.extend(true,{},c.ajaxSettings,a),j,i,o,k=a&&a.context||e,n=e.type.toUpperCase();if(e.data&&e.processData&&typeof e.data!=="string")e.data=c.param(e.data,e.traditional);if(e.dataType==="jsonp"){if(n==="GET")N.test(e.url)||(e.url+=(ka.test(e.url)?
"&":"?")+(e.jsonp||"callback")+"=?");else if(!e.data||!N.test(e.data))e.data=(e.data?e.data+"&":"")+(e.jsonp||"callback")+"=?";e.dataType="json"}if(e.dataType==="json"&&(e.data&&N.test(e.data)||N.test(e.url))){j=e.jsonpCallback||"jsonp"+sb++;if(e.data)e.data=(e.data+"").replace(N,"="+j+"$1");e.url=e.url.replace(N,"="+j+"$1");e.dataType="script";A[j]=A[j]||function(q){o=q;b();d();A[j]=w;try{delete A[j]}catch(p){}z&&z.removeChild(C)}}if(e.dataType==="script"&&e.cache===null)e.cache=false;if(e.cache===
false&&n==="GET"){var r=J(),u=e.url.replace(wb,"$1_="+r+"$2");e.url=u+(u===e.url?(ka.test(e.url)?"&":"?")+"_="+r:"")}if(e.data&&n==="GET")e.url+=(ka.test(e.url)?"&":"?")+e.data;e.global&&!c.active++&&c.event.trigger("ajaxStart");r=(r=xb.exec(e.url))&&(r[1]&&r[1]!==location.protocol||r[2]!==location.host);if(e.dataType==="script"&&n==="GET"&&r){var z=s.getElementsByTagName("head")[0]||s.documentElement,C=s.createElement("script");C.src=e.url;if(e.scriptCharset)C.charset=e.scriptCharset;if(!j){var B=
false;C.onload=C.onreadystatechange=function(){if(!B&&(!this.readyState||this.readyState==="loaded"||this.readyState==="complete")){B=true;b();d();C.onload=C.onreadystatechange=null;z&&C.parentNode&&z.removeChild(C)}}}z.insertBefore(C,z.firstChild);return w}var E=false,x=e.xhr();if(x){e.username?x.open(n,e.url,e.async,e.username,e.password):x.open(n,e.url,e.async);try{if(e.data||a&&a.contentType)x.setRequestHeader("Content-Type",e.contentType);if(e.ifModified){c.lastModified[e.url]&&x.setRequestHeader("If-Modified-Since",
c.lastModified[e.url]);c.etag[e.url]&&x.setRequestHeader("If-None-Match",c.etag[e.url])}r||x.setRequestHeader("X-Requested-With","XMLHttpRequest");x.setRequestHeader("Accept",e.dataType&&e.accepts[e.dataType]?e.accepts[e.dataType]+", */*":e.accepts._default)}catch(ga){}if(e.beforeSend&&e.beforeSend.call(k,x,e)===false){e.global&&!--c.active&&c.event.trigger("ajaxStop");x.abort();return false}e.global&&f("ajaxSend",[x,e]);var g=x.onreadystatechange=function(q){if(!x||x.readyState===0||q==="abort"){E||
d();E=true;if(x)x.onreadystatechange=c.noop}else if(!E&&x&&(x.readyState===4||q==="timeout")){E=true;x.onreadystatechange=c.noop;i=q==="timeout"?"timeout":!c.httpSuccess(x)?"error":e.ifModified&&c.httpNotModified(x,e.url)?"notmodified":"success";var p;if(i==="success")try{o=c.httpData(x,e.dataType,e)}catch(v){i="parsererror";p=v}if(i==="success"||i==="notmodified")j||b();else c.handleError(e,x,i,p);d();q==="timeout"&&x.abort();if(e.async)x=null}};try{var h=x.abort;x.abort=function(){x&&h.call(x);
g("abort")}}catch(l){}e.async&&e.timeout>0&&setTimeout(function(){x&&!E&&g("timeout")},e.timeout);try{x.send(n==="POST"||n==="PUT"||n==="DELETE"?e.data:null)}catch(m){c.handleError(e,x,null,m);d()}e.async||g();return x}},handleError:function(a,b,d,f){if(a.error)a.error.call(a.context||a,b,d,f);if(a.global)(a.context?c(a.context):c.event).trigger("ajaxError",[b,a,f])},active:0,httpSuccess:function(a){try{return!a.status&&location.protocol==="file:"||a.status>=200&&a.status<300||a.status===304||a.status===
1223||a.status===0}catch(b){}return false},httpNotModified:function(a,b){var d=a.getResponseHeader("Last-Modified"),f=a.getResponseHeader("Etag");if(d)c.lastModified[b]=d;if(f)c.etag[b]=f;return a.status===304||a.status===0},httpData:function(a,b,d){var f=a.getResponseHeader("content-type")||"",e=b==="xml"||!b&&f.indexOf("xml")>=0;a=e?a.responseXML:a.responseText;e&&a.documentElement.nodeName==="parsererror"&&c.error("parsererror");if(d&&d.dataFilter)a=d.dataFilter(a,b);if(typeof a==="string")if(b===
"json"||!b&&f.indexOf("json")>=0)a=c.parseJSON(a);else if(b==="script"||!b&&f.indexOf("javascript")>=0)c.globalEval(a);return a},param:function(a,b){function d(i,o){if(c.isArray(o))c.each(o,function(k,n){b||/\[\]$/.test(i)?f(i,n):d(i+"["+(typeof n==="object"||c.isArray(n)?k:"")+"]",n)});else!b&&o!=null&&typeof o==="object"?c.each(o,function(k,n){d(i+"["+k+"]",n)}):f(i,o)}function f(i,o){o=c.isFunction(o)?o():o;e[e.length]=encodeURIComponent(i)+"="+encodeURIComponent(o)}var e=[];if(b===w)b=c.ajaxSettings.traditional;
if(c.isArray(a)||a.jquery)c.each(a,function(){f(this.name,this.value)});else for(var j in a)d(j,a[j]);return e.join("&").replace(yb,"+")}});var la={},Ab=/toggle|show|hide/,Bb=/^([+-]=)?([\d+-.]+)(.*)$/,W,va=[["height","marginTop","marginBottom","paddingTop","paddingBottom"],["width","marginLeft","marginRight","paddingLeft","paddingRight"],["opacity"]];c.fn.extend({show:function(a,b){if(a||a===0)return this.animate(K("show",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");
this[a].style.display=d||"";if(c.css(this[a],"display")==="none"){d=this[a].nodeName;var f;if(la[d])f=la[d];else{var e=c("<"+d+" />").appendTo("body");f=e.css("display");if(f==="none")f="block";e.remove();la[d]=f}c.data(this[a],"olddisplay",f)}}a=0;for(b=this.length;a<b;a++)this[a].style.display=c.data(this[a],"olddisplay")||"";return this}},hide:function(a,b){if(a||a===0)return this.animate(K("hide",3),a,b);else{a=0;for(b=this.length;a<b;a++){var d=c.data(this[a],"olddisplay");!d&&d!=="none"&&c.data(this[a],
"olddisplay",c.css(this[a],"display"))}a=0;for(b=this.length;a<b;a++)this[a].style.display="none";return this}},_toggle:c.fn.toggle,toggle:function(a,b){var d=typeof a==="boolean";if(c.isFunction(a)&&c.isFunction(b))this._toggle.apply(this,arguments);else a==null||d?this.each(function(){var f=d?a:c(this).is(":hidden");c(this)[f?"show":"hide"]()}):this.animate(K("toggle",3),a,b);return this},fadeTo:function(a,b,d){return this.filter(":hidden").css("opacity",0).show().end().animate({opacity:b},a,d)},
animate:function(a,b,d,f){var e=c.speed(b,d,f);if(c.isEmptyObject(a))return this.each(e.complete);return this[e.queue===false?"each":"queue"](function(){var j=c.extend({},e),i,o=this.nodeType===1&&c(this).is(":hidden"),k=this;for(i in a){var n=i.replace(ia,ja);if(i!==n){a[n]=a[i];delete a[i];i=n}if(a[i]==="hide"&&o||a[i]==="show"&&!o)return j.complete.call(this);if((i==="height"||i==="width")&&this.style){j.display=c.css(this,"display");j.overflow=this.style.overflow}if(c.isArray(a[i])){(j.specialEasing=
j.specialEasing||{})[i]=a[i][1];a[i]=a[i][0]}}if(j.overflow!=null)this.style.overflow="hidden";j.curAnim=c.extend({},a);c.each(a,function(r,u){var z=new c.fx(k,j,r);if(Ab.test(u))z[u==="toggle"?o?"show":"hide":u](a);else{var C=Bb.exec(u),B=z.cur(true)||0;if(C){u=parseFloat(C[2]);var E=C[3]||"px";if(E!=="px"){k.style[r]=(u||1)+E;B=(u||1)/z.cur(true)*B;k.style[r]=B+E}if(C[1])u=(C[1]==="-="?-1:1)*u+B;z.custom(B,u,E)}else z.custom(B,u,"")}});return true})},stop:function(a,b){var d=c.timers;a&&this.queue([]);
this.each(function(){for(var f=d.length-1;f>=0;f--)if(d[f].elem===this){b&&d[f](true);d.splice(f,1)}});b||this.dequeue();return this}});c.each({slideDown:K("show",1),slideUp:K("hide",1),slideToggle:K("toggle",1),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"}},function(a,b){c.fn[a]=function(d,f){return this.animate(b,d,f)}});c.extend({speed:function(a,b,d){var f=a&&typeof a==="object"?a:{complete:d||!d&&b||c.isFunction(a)&&a,duration:a,easing:d&&b||b&&!c.isFunction(b)&&b};f.duration=c.fx.off?0:typeof f.duration===
"number"?f.duration:c.fx.speeds[f.duration]||c.fx.speeds._default;f.old=f.complete;f.complete=function(){f.queue!==false&&c(this).dequeue();c.isFunction(f.old)&&f.old.call(this)};return f},easing:{linear:function(a,b,d,f){return d+f*a},swing:function(a,b,d,f){return(-Math.cos(a*Math.PI)/2+0.5)*f+d}},timers:[],fx:function(a,b,d){this.options=b;this.elem=a;this.prop=d;if(!b.orig)b.orig={}}});c.fx.prototype={update:function(){this.options.step&&this.options.step.call(this.elem,this.now,this);(c.fx.step[this.prop]||
c.fx.step._default)(this);if((this.prop==="height"||this.prop==="width")&&this.elem.style)this.elem.style.display="block"},cur:function(a){if(this.elem[this.prop]!=null&&(!this.elem.style||this.elem.style[this.prop]==null))return this.elem[this.prop];return(a=parseFloat(c.css(this.elem,this.prop,a)))&&a>-10000?a:parseFloat(c.curCSS(this.elem,this.prop))||0},custom:function(a,b,d){function f(j){return e.step(j)}this.startTime=J();this.start=a;this.end=b;this.unit=d||this.unit||"px";this.now=this.start;
this.pos=this.state=0;var e=this;f.elem=this.elem;if(f()&&c.timers.push(f)&&!W)W=setInterval(c.fx.tick,13)},show:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.show=true;this.custom(this.prop==="width"||this.prop==="height"?1:0,this.cur());c(this.elem).show()},hide:function(){this.options.orig[this.prop]=c.style(this.elem,this.prop);this.options.hide=true;this.custom(this.cur(),0)},step:function(a){var b=J(),d=true;if(a||b>=this.options.duration+this.startTime){this.now=
this.end;this.pos=this.state=1;this.update();this.options.curAnim[this.prop]=true;for(var f in this.options.curAnim)if(this.options.curAnim[f]!==true)d=false;if(d){if(this.options.display!=null){this.elem.style.overflow=this.options.overflow;a=c.data(this.elem,"olddisplay");this.elem.style.display=a?a:this.options.display;if(c.css(this.elem,"display")==="none")this.elem.style.display="block"}this.options.hide&&c(this.elem).hide();if(this.options.hide||this.options.show)for(var e in this.options.curAnim)c.style(this.elem,
e,this.options.orig[e]);this.options.complete.call(this.elem)}return false}else{e=b-this.startTime;this.state=e/this.options.duration;a=this.options.easing||(c.easing.swing?"swing":"linear");this.pos=c.easing[this.options.specialEasing&&this.options.specialEasing[this.prop]||a](this.state,e,0,1,this.options.duration);this.now=this.start+(this.end-this.start)*this.pos;this.update()}return true}};c.extend(c.fx,{tick:function(){for(var a=c.timers,b=0;b<a.length;b++)a[b]()||a.splice(b--,1);a.length||
c.fx.stop()},stop:function(){clearInterval(W);W=null},speeds:{slow:600,fast:200,_default:400},step:{opacity:function(a){c.style(a.elem,"opacity",a.now)},_default:function(a){if(a.elem.style&&a.elem.style[a.prop]!=null)a.elem.style[a.prop]=(a.prop==="width"||a.prop==="height"?Math.max(0,a.now):a.now)+a.unit;else a.elem[a.prop]=a.now}}});if(c.expr&&c.expr.filters)c.expr.filters.animated=function(a){return c.grep(c.timers,function(b){return a===b.elem}).length};c.fn.offset="getBoundingClientRect"in s.documentElement?
function(a){var b=this[0];if(a)return this.each(function(e){c.offset.setOffset(this,a,e)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);var d=b.getBoundingClientRect(),f=b.ownerDocument;b=f.body;f=f.documentElement;return{top:d.top+(self.pageYOffset||c.support.boxModel&&f.scrollTop||b.scrollTop)-(f.clientTop||b.clientTop||0),left:d.left+(self.pageXOffset||c.support.boxModel&&f.scrollLeft||b.scrollLeft)-(f.clientLeft||b.clientLeft||0)}}:function(a){var b=
this[0];if(a)return this.each(function(r){c.offset.setOffset(this,a,r)});if(!b||!b.ownerDocument)return null;if(b===b.ownerDocument.body)return c.offset.bodyOffset(b);c.offset.initialize();var d=b.offsetParent,f=b,e=b.ownerDocument,j,i=e.documentElement,o=e.body;f=(e=e.defaultView)?e.getComputedStyle(b,null):b.currentStyle;for(var k=b.offsetTop,n=b.offsetLeft;(b=b.parentNode)&&b!==o&&b!==i;){if(c.offset.supportsFixedPosition&&f.position==="fixed")break;j=e?e.getComputedStyle(b,null):b.currentStyle;
k-=b.scrollTop;n-=b.scrollLeft;if(b===d){k+=b.offsetTop;n+=b.offsetLeft;if(c.offset.doesNotAddBorder&&!(c.offset.doesAddBorderForTableAndCells&&/^t(able|d|h)$/i.test(b.nodeName))){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=d;d=b.offsetParent}if(c.offset.subtractsBorderForOverflowNotVisible&&j.overflow!=="visible"){k+=parseFloat(j.borderTopWidth)||0;n+=parseFloat(j.borderLeftWidth)||0}f=j}if(f.position==="relative"||f.position==="static"){k+=o.offsetTop;n+=o.offsetLeft}if(c.offset.supportsFixedPosition&&
f.position==="fixed"){k+=Math.max(i.scrollTop,o.scrollTop);n+=Math.max(i.scrollLeft,o.scrollLeft)}return{top:k,left:n}};c.offset={initialize:function(){var a=s.body,b=s.createElement("div"),d,f,e,j=parseFloat(c.curCSS(a,"marginTop",true))||0;c.extend(b.style,{position:"absolute",top:0,left:0,margin:0,border:0,width:"1px",height:"1px",visibility:"hidden"});b.innerHTML="<div style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;'><div></div></div><table style='position:absolute;top:0;left:0;margin:0;border:5px solid #000;padding:0;width:1px;height:1px;' cellpadding='0' cellspacing='0'><tr><td></td></tr></table>";
a.insertBefore(b,a.firstChild);d=b.firstChild;f=d.firstChild;e=d.nextSibling.firstChild.firstChild;this.doesNotAddBorder=f.offsetTop!==5;this.doesAddBorderForTableAndCells=e.offsetTop===5;f.style.position="fixed";f.style.top="20px";this.supportsFixedPosition=f.offsetTop===20||f.offsetTop===15;f.style.position=f.style.top="";d.style.overflow="hidden";d.style.position="relative";this.subtractsBorderForOverflowNotVisible=f.offsetTop===-5;this.doesNotIncludeMarginInBodyOffset=a.offsetTop!==j;a.removeChild(b);
c.offset.initialize=c.noop},bodyOffset:function(a){var b=a.offsetTop,d=a.offsetLeft;c.offset.initialize();if(c.offset.doesNotIncludeMarginInBodyOffset){b+=parseFloat(c.curCSS(a,"marginTop",true))||0;d+=parseFloat(c.curCSS(a,"marginLeft",true))||0}return{top:b,left:d}},setOffset:function(a,b,d){if(/static/.test(c.curCSS(a,"position")))a.style.position="relative";var f=c(a),e=f.offset(),j=parseInt(c.curCSS(a,"top",true),10)||0,i=parseInt(c.curCSS(a,"left",true),10)||0;if(c.isFunction(b))b=b.call(a,
d,e);d={top:b.top-e.top+j,left:b.left-e.left+i};"using"in b?b.using.call(a,d):f.css(d)}};c.fn.extend({position:function(){if(!this[0])return null;var a=this[0],b=this.offsetParent(),d=this.offset(),f=/^body|html$/i.test(b[0].nodeName)?{top:0,left:0}:b.offset();d.top-=parseFloat(c.curCSS(a,"marginTop",true))||0;d.left-=parseFloat(c.curCSS(a,"marginLeft",true))||0;f.top+=parseFloat(c.curCSS(b[0],"borderTopWidth",true))||0;f.left+=parseFloat(c.curCSS(b[0],"borderLeftWidth",true))||0;return{top:d.top-
f.top,left:d.left-f.left}},offsetParent:function(){return this.map(function(){for(var a=this.offsetParent||s.body;a&&!/^body|html$/i.test(a.nodeName)&&c.css(a,"position")==="static";)a=a.offsetParent;return a})}});c.each(["Left","Top"],function(a,b){var d="scroll"+b;c.fn[d]=function(f){var e=this[0],j;if(!e)return null;if(f!==w)return this.each(function(){if(j=wa(this))j.scrollTo(!a?f:c(j).scrollLeft(),a?f:c(j).scrollTop());else this[d]=f});else return(j=wa(e))?"pageXOffset"in j?j[a?"pageYOffset":
"pageXOffset"]:c.support.boxModel&&j.document.documentElement[d]||j.document.body[d]:e[d]}});c.each(["Height","Width"],function(a,b){var d=b.toLowerCase();c.fn["inner"+b]=function(){return this[0]?c.css(this[0],d,false,"padding"):null};c.fn["outer"+b]=function(f){return this[0]?c.css(this[0],d,false,f?"margin":"border"):null};c.fn[d]=function(f){var e=this[0];if(!e)return f==null?null:this;if(c.isFunction(f))return this.each(function(j){var i=c(this);i[d](f.call(this,j,i[d]()))});return"scrollTo"in
e&&e.document?e.document.compatMode==="CSS1Compat"&&e.document.documentElement["client"+b]||e.document.body["client"+b]:e.nodeType===9?Math.max(e.documentElement["client"+b],e.body["scroll"+b],e.documentElement["scroll"+b],e.body["offset"+b],e.documentElement["offset"+b]):f===w?c.css(e,d):this.css(d,typeof f==="string"?f:f+"px")}});A.jQuery=A.$=c})(window);
$(function(){
$("#cpic").fadeIn("slow");
});
| CASforever/CASforever.github.io | js/jquery.js | JavaScript | gpl-2.0 | 71,861 |
<?php namespace MpLoader\Admin;
use MpLoader\Admin\Menu ;
/**
* Description of PostTypeMenu
*
* @author studio
*/
class PostMenus {
public function __construct() {
}
private $display_published = true,
$display_pending = true,
$display_draft = true,
$display_schedule = true,
$post_types = array('post' => 'Posts', 'page' => 'Pages'),
$list_count = 5,
$node_meta;
public function set_display_published($display_published) {
$this->display_published = $display_published;
return $this;
}
public function set_display_pending($display_pending) {
$this->display_pending = $display_pending;
}
public function set_display_draft($display_draft) {
$this->display_draft = $display_draft;
return $this;
}
public function set_display_schedule($display_schedule) {
$this->display_schedule = $display_schedule;
return $this;
}
public function set_post_types($post_types) {
$this->post_types = $post_types;
return $this;
}
public function set_list_count($list_count) {
$this->list_count = $list_count;
return $this;
}
public function set_node_meta($node_meta) {
$this->node_meta = $node_meta;
return $this;
}
/**
* Factory method
* @return \Adminbar_PostMenus
*/
public static function factory(){
$factory = new PostMenus();
return $factory;
}
/**
* Create the nodes
*/
public function create_nodes() {
foreach ($this->post_types as $post_type => $title) {
// TODO localize note titles
if ($this->display_published):
$this->list_count = 10;
$this->nodes($post_type, 'publish', $title);
endif;
if ($this->display_pending):
$this->nodes($post_type, 'pending', 'Pending',TRUE);
endif;
if ($this->display_schedule):
$this->nodes($post_type, 'future', 'Scheduled',TRUE);
endif;
if ($this->display_draft):
$this->nodes($post_type, 'draft', 'Draft',TRUE);
endif;
}
}
/**
*
* @param type $post_type
* @param type $post_status
* @param type $node_title
* @param bool $is_seperator
*/
public function nodes($post_type, $post_status, $node_title, $is_seperator = FALSE) {
$menu_node = Menu::factory();
$node_id = $post_type . '-menu';
$node_href = trailingslashit(admin_url()) . 'edit.php?post_type=' . $post_type;
//create parent node
$menu_node->set_node_id($node_id)
->set_node_href($node_href)
->set_node_title($node_title);
/**
* feeling lazy so auto create meta properties if I didn't
*
*/
if (empty($this->node_meta)):
$this->node_meta = array(
'class' => $node_id,
'title' => ucfirst($node_title)
);
$menu_node->set_node_meta($this->node_meta);
endif;
$seperator_id = $node_id;
//create seperator node
if ($is_seperator == TRUE):
//create seperator node
$seperator_id = $post_status. '-' . $post_type;
$menu_node->set_node_id($seperator_id)
->set_node_parent($node_id)
->set_node_href(NULL);
endif;
$menu_node->add_node();
// get the post data
$post_data = $this->data($post_type, $post_status, $this->list_count);
//create node items by looping through the data
foreach ($post_data as $data):
$item_href = esc_url(get_edit_post_link($data->ID));
$item_id = $seperator_id . '-' . $data->ID;
Menu::factory()->set_node_parent($seperator_id)->node_item($item_id, $item_href, $data->post_title);
endforeach;
}
/**
*
* @global type $wpdb
* @global type $post
* @param type $post_type
* @param type $post_status
* @param type $items
* @return type
*/
private function data($post_type, $post_status, $items) {
global $wpdb, $post;
$qry = "
SELECT $wpdb->posts.ID, $wpdb->posts.post_title
FROM $wpdb->posts
WHERE $wpdb->posts.post_status = '$post_status'
AND
$wpdb->posts.post_type = '$post_type'
ORDER BY $wpdb->posts.ID DESC LIMIT $items";
$query = $wpdb->get_results($qry, OBJECT);
return $query;
}
}
| CTSATLAS/wordpress | wp-content/plugins/wp.autoload/src/Admin/PostMenus.php | PHP | gpl-2.0 | 4,716 |
/*
* xHCI host controller driver
*
* Copyright (C) 2008 Intel Corp.
*
* Author: Sarah Sharp
* Some code borrowed from the Linux EHCI driver.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#ifndef __LINUX_XHCI_HCD_H
#define __LINUX_XHCI_HCD_H
#include <linux/usb.h>
#include <linux/timer.h>
#include <linux/kernel.h>
#include <linux/usb/hcd.h>
/* Code sharing between pci-quirks and xhci hcd */
#include "xhci-ext-caps.h"
#include "pci-quirks.h"
/* xHCI PCI Configuration Registers */
#define XHCI_SBRN_OFFSET (0x60)
/* Max number of USB devices for any host controller - limit in section 6.1 */
#define MAX_HC_SLOTS 256
/* Section 5.3.3 - MaxPorts */
#define MAX_HC_PORTS 127
/*
* xHCI register interface.
* This corresponds to the eXtensible Host Controller Interface (xHCI)
* Revision 0.95 specification
*/
/**
* struct xhci_cap_regs - xHCI Host Controller Capability Registers.
* @hc_capbase: length of the capabilities register and HC version number
* @hcs_params1: HCSPARAMS1 - Structural Parameters 1
* @hcs_params2: HCSPARAMS2 - Structural Parameters 2
* @hcs_params3: HCSPARAMS3 - Structural Parameters 3
* @hcc_params: HCCPARAMS - Capability Parameters
* @db_off: DBOFF - Doorbell array offset
* @run_regs_off: RTSOFF - Runtime register space offset
*/
struct xhci_cap_regs {
__le32 hc_capbase;
__le32 hcs_params1;
__le32 hcs_params2;
__le32 hcs_params3;
__le32 hcc_params;
__le32 db_off;
__le32 run_regs_off;
/* Reserved up to (CAPLENGTH - 0x1C) */
};
/* hc_capbase bitmasks */
/* bits 7:0 - how long is the Capabilities register */
#define HC_LENGTH(p) XHCI_HC_LENGTH(p)
/* bits 31:16 */
#define HC_VERSION(p) (((p) >> 16) & 0xffff)
/* HCSPARAMS1 - hcs_params1 - bitmasks */
/* bits 0:7, Max Device Slots */
#define HCS_MAX_SLOTS(p) (((p) >> 0) & 0xff)
#define HCS_SLOTS_MASK 0xff
/* bits 8:18, Max Interrupters */
#define HCS_MAX_INTRS(p) (((p) >> 8) & 0x7ff)
/* bits 24:31, Max Ports - max value is 0x7F = 127 ports */
#define HCS_MAX_PORTS(p) (((p) >> 24) & 0x7f)
/* HCSPARAMS2 - hcs_params2 - bitmasks */
/* bits 0:3, frames or uframes that SW needs to queue transactions
* ahead of the HW to meet periodic deadlines */
#define HCS_IST(p) (((p) >> 0) & 0xf)
/* bits 4:7, max number of Event Ring segments */
#define HCS_ERST_MAX(p) (((p) >> 4) & 0xf)
/* bit 26 Scratchpad restore - for save/restore HW state - not used yet */
/* bits 27:31 number of Scratchpad buffers SW must allocate for the HW */
#define HCS_MAX_SCRATCHPAD(p) (((p) >> 27) & 0x1f)
/* HCSPARAMS3 - hcs_params3 - bitmasks */
/* bits 0:7, Max U1 to U0 latency for the roothub ports */
#define HCS_U1_LATENCY(p) (((p) >> 0) & 0xff)
/* bits 16:31, Max U2 to U0 latency for the roothub ports */
#define HCS_U2_LATENCY(p) (((p) >> 16) & 0xffff)
/* HCCPARAMS - hcc_params - bitmasks */
/* true: HC can use 64-bit address pointers */
#define HCC_64BIT_ADDR(p) ((p) & (1 << 0))
/* true: HC can do bandwidth negotiation */
#define HCC_BANDWIDTH_NEG(p) ((p) & (1 << 1))
/* true: HC uses 64-byte Device Context structures
* FIXME 64-byte context structures aren't supported yet.
*/
#define HCC_64BYTE_CONTEXT(p) ((p) & (1 << 2))
/* true: HC has port power switches */
#define HCC_PPC(p) ((p) & (1 << 3))
/* true: HC has port indicators */
#define HCS_INDICATOR(p) ((p) & (1 << 4))
/* true: HC has Light HC Reset Capability */
#define HCC_LIGHT_RESET(p) ((p) & (1 << 5))
/* true: HC supports latency tolerance messaging */
#define HCC_LTC(p) ((p) & (1 << 6))
/* true: no secondary Stream ID Support */
#define HCC_NSS(p) ((p) & (1 << 7))
/* Max size for Primary Stream Arrays - 2^(n+1), where n is bits 12:15 */
#define HCC_MAX_PSA(p) (1 << ((((p) >> 12) & 0xf) + 1))
/* Extended Capabilities pointer from PCI base - section 5.3.6 */
#define HCC_EXT_CAPS(p) XHCI_HCC_EXT_CAPS(p)
/* db_off bitmask - bits 0:1 reserved */
#define DBOFF_MASK (~0x3)
/* run_regs_off bitmask - bits 0:4 reserved */
#define RTSOFF_MASK (~0x1f)
/* Number of registers per port */
#define NUM_PORT_REGS 4
/**
* struct xhci_op_regs - xHCI Host Controller Operational Registers.
* @command: USBCMD - xHC command register
* @status: USBSTS - xHC status register
* @page_size: This indicates the page size that the host controller
* supports. If bit n is set, the HC supports a page size
* of 2^(n+12), up to a 128MB page size.
* 4K is the minimum page size.
* @cmd_ring: CRP - 64-bit Command Ring Pointer
* @dcbaa_ptr: DCBAAP - 64-bit Device Context Base Address Array Pointer
* @config_reg: CONFIG - Configure Register
* @port_status_base: PORTSCn - base address for Port Status and Control
* Each port has a Port Status and Control register,
* followed by a Port Power Management Status and Control
* register, a Port Link Info register, and a reserved
* register.
* @port_power_base: PORTPMSCn - base address for
* Port Power Management Status and Control
* @port_link_base: PORTLIn - base address for Port Link Info (current
* Link PM state and control) for USB 2.1 and USB 3.0
* devices.
*/
struct xhci_op_regs {
__le32 command;
__le32 status;
__le32 page_size;
__le32 reserved1;
__le32 reserved2;
__le32 dev_notification;
__le64 cmd_ring;
/* rsvd: offset 0x20-2F */
__le32 reserved3[4];
__le64 dcbaa_ptr;
__le32 config_reg;
/* rsvd: offset 0x3C-3FF */
__le32 reserved4[241];
/* port 1 registers, which serve as a base address for other ports */
__le32 port_status_base;
__le32 port_power_base;
__le32 port_link_base;
__le32 reserved5;
/* registers for ports 2-255 */
__le32 reserved6[NUM_PORT_REGS*254];
};
/* USBCMD - USB command - command bitmasks */
/* start/stop HC execution - do not write unless HC is halted*/
#define CMD_RUN XHCI_CMD_RUN
/* Reset HC - resets internal HC state machine and all registers (except
* PCI config regs). HC does NOT drive a USB reset on the downstream ports.
* The xHCI driver must reinitialize the xHC after setting this bit.
*/
#define CMD_RESET (1 << 1)
/* Event Interrupt Enable - a '1' allows interrupts from the host controller */
#define CMD_EIE XHCI_CMD_EIE
/* Host System Error Interrupt Enable - get out-of-band signal for HC errors */
#define CMD_HSEIE XHCI_CMD_HSEIE
/* bits 4:6 are reserved (and should be preserved on writes). */
/* light reset (port status stays unchanged) - reset completed when this is 0 */
#define CMD_LRESET (1 << 7)
/* host controller save/restore state. */
#define CMD_CSS (1 << 8)
#define CMD_CRS (1 << 9)
/* Enable Wrap Event - '1' means xHC generates an event when MFINDEX wraps. */
#define CMD_EWE XHCI_CMD_EWE
/* MFINDEX power management - '1' means xHC can stop MFINDEX counter if all root
* hubs are in U3 (selective suspend), disconnect, disabled, or powered-off.
* '0' means the xHC can power it off if all ports are in the disconnect,
* disabled, or powered-off state.
*/
#define CMD_PM_INDEX (1 << 11)
/* bits 12:31 are reserved (and should be preserved on writes). */
/* IMAN - Interrupt Management Register */
#define IMAN_IE (1 << 1)
#define IMAN_IP (1 << 0)
/* USBSTS - USB status - status bitmasks */
/* HC not running - set to 1 when run/stop bit is cleared. */
#define STS_HALT XHCI_STS_HALT
/* serious error, e.g. PCI parity error. The HC will clear the run/stop bit. */
#define STS_FATAL (1 << 2)
/* event interrupt - clear this prior to clearing any IP flags in IR set*/
#define STS_EINT (1 << 3)
/* port change detect */
#define STS_PORT (1 << 4)
/* bits 5:7 reserved and zeroed */
/* save state status - '1' means xHC is saving state */
#define STS_SAVE (1 << 8)
/* restore state status - '1' means xHC is restoring state */
#define STS_RESTORE (1 << 9)
/* true: save or restore error */
#define STS_SRE (1 << 10)
/* true: Controller Not Ready to accept doorbell or op reg writes after reset */
#define STS_CNR XHCI_STS_CNR
/* true: internal Host Controller Error - SW needs to reset and reinitialize */
#define STS_HCE (1 << 12)
/* bits 13:31 reserved and should be preserved */
/*
* DNCTRL - Device Notification Control Register - dev_notification bitmasks
* Generate a device notification event when the HC sees a transaction with a
* notification type that matches a bit set in this bit field.
*/
#define DEV_NOTE_MASK (0xffff)
#define ENABLE_DEV_NOTE(x) (1 << (x))
/* Most of the device notification types should only be used for debug.
* SW does need to pay attention to function wake notifications.
*/
#define DEV_NOTE_FWAKE ENABLE_DEV_NOTE(1)
/* CRCR - Command Ring Control Register - cmd_ring bitmasks */
/* bit 0 is the command ring cycle state */
/* stop ring operation after completion of the currently executing command */
#define CMD_RING_PAUSE (1 << 1)
/* stop ring immediately - abort the currently executing command */
#define CMD_RING_ABORT (1 << 2)
/* true: command ring is running */
#define CMD_RING_RUNNING (1 << 3)
/* bits 4:5 reserved and should be preserved */
/* Command Ring pointer - bit mask for the lower 32 bits. */
#define CMD_RING_RSVD_BITS (0x3f)
/* CONFIG - Configure Register - config_reg bitmasks */
/* bits 0:7 - maximum number of device slots enabled (NumSlotsEn) */
#define MAX_DEVS(p) ((p) & 0xff)
/* bits 8:31 - reserved and should be preserved */
/* PORTSC - Port Status and Control Register - port_status_base bitmasks */
/* true: device connected */
#define PORT_CONNECT (1 << 0)
/* true: port enabled */
#define PORT_PE (1 << 1)
/* bit 2 reserved and zeroed */
/* true: port has an over-current condition */
#define PORT_OC (1 << 3)
/* true: port reset signaling asserted */
#define PORT_RESET (1 << 4)
/* Port Link State - bits 5:8
* A read gives the current link PM state of the port,
* a write with Link State Write Strobe set sets the link state.
*/
#define PORT_PLS_MASK (0xf << 5)
#define XDEV_U0 (0x0 << 5)
#define XDEV_U2 (0x2 << 5)
#define XDEV_U3 (0x3 << 5)
#define XDEV_RESUME (0xf << 5)
/* true: port has power (see HCC_PPC) */
#define PORT_POWER (1 << 9)
/* bits 10:13 indicate device speed:
* 0 - undefined speed - port hasn't be initialized by a reset yet
* 1 - full speed
* 2 - low speed
* 3 - high speed
* 4 - super speed
* 5-15 reserved
*/
#define DEV_SPEED_MASK (0xf << 10)
#define XDEV_FS (0x1 << 10)
#define XDEV_LS (0x2 << 10)
#define XDEV_HS (0x3 << 10)
#define XDEV_SS (0x4 << 10)
#define DEV_UNDEFSPEED(p) (((p) & DEV_SPEED_MASK) == (0x0<<10))
#define DEV_FULLSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_FS)
#define DEV_LOWSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_LS)
#define DEV_HIGHSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_HS)
#define DEV_SUPERSPEED(p) (((p) & DEV_SPEED_MASK) == XDEV_SS)
/* Bits 20:23 in the Slot Context are the speed for the device */
#define SLOT_SPEED_FS (XDEV_FS << 10)
#define SLOT_SPEED_LS (XDEV_LS << 10)
#define SLOT_SPEED_HS (XDEV_HS << 10)
#define SLOT_SPEED_SS (XDEV_SS << 10)
/* Port Indicator Control */
#define PORT_LED_OFF (0 << 14)
#define PORT_LED_AMBER (1 << 14)
#define PORT_LED_GREEN (2 << 14)
#define PORT_LED_MASK (3 << 14)
/* Port Link State Write Strobe - set this when changing link state */
#define PORT_LINK_STROBE (1 << 16)
/* true: connect status change */
#define PORT_CSC (1 << 17)
/* true: port enable change */
#define PORT_PEC (1 << 18)
/* true: warm reset for a USB 3.0 device is done. A "hot" reset puts the port
* into an enabled state, and the device into the default state. A "warm" reset
* also resets the link, forcing the device through the link training sequence.
* SW can also look at the Port Reset register to see when warm reset is done.
*/
#define PORT_WRC (1 << 19)
/* true: over-current change */
#define PORT_OCC (1 << 20)
/* true: reset change - 1 to 0 transition of PORT_RESET */
#define PORT_RC (1 << 21)
/* port link status change - set on some port link state transitions:
* Transition Reason
* ------------------------------------------------------------------------------
* - U3 to Resume Wakeup signaling from a device
* - Resume to Recovery to U0 USB 3.0 device resume
* - Resume to U0 USB 2.0 device resume
* - U3 to Recovery to U0 Software resume of USB 3.0 device complete
* - U3 to U0 Software resume of USB 2.0 device complete
* - U2 to U0 L1 resume of USB 2.1 device complete
* - U0 to U0 (???) L1 entry rejection by USB 2.1 device
* - U0 to disabled L1 entry error with USB 2.1 device
* - Any state to inactive Error on USB 3.0 port
*/
#define PORT_PLC (1 << 22)
/* port configure error change - port failed to configure its link partner */
#define PORT_CEC (1 << 23)
/* Cold Attach Status - xHC can set this bit to report device attached during
* Sx state. Warm port reset should be perfomed to clear this bit and move port
* to connected state.
*/
#define PORT_CAS (1 << 24)
/* wake on connect (enable) */
#define PORT_WKCONN_E (1 << 25)
/* wake on disconnect (enable) */
#define PORT_WKDISC_E (1 << 26)
/* wake on over-current (enable) */
#define PORT_WKOC_E (1 << 27)
/* bits 28:29 reserved */
/* true: device is removable - for USB 3.0 roothub emulation */
#define PORT_DEV_REMOVE (1 << 30)
/* Initiate a warm port reset - complete when PORT_WRC is '1' */
#define PORT_WR (1 << 31)
/* We mark duplicate entries with -1 */
#define DUPLICATE_ENTRY ((u8)(-1))
/* Port Power Management Status and Control - port_power_base bitmasks */
/* Inactivity timer value for transitions into U1, in microseconds.
* Timeout can be up to 127us. 0xFF means an infinite timeout.
*/
#define PORT_U1_TIMEOUT(p) ((p) & 0xff)
/* Inactivity timer value for transitions into U2 */
#define PORT_U2_TIMEOUT(p) (((p) & 0xff) << 8)
/* Bits 24:31 for port testing */
/* USB2 Protocol PORTSPMSC */
#define PORT_L1S_MASK 7
#define PORT_L1S_SUCCESS 1
#define PORT_RWE (1 << 3)
#define PORT_HIRD(p) (((p) & 0xf) << 4)
#define PORT_HIRD_MASK (0xf << 4)
#define PORT_L1DS(p) (((p) & 0xff) << 8)
#define PORT_HLE (1 << 16)
/**
* struct xhci_intr_reg - Interrupt Register Set
* @irq_pending: IMAN - Interrupt Management Register. Used to enable
* interrupts and check for pending interrupts.
* @irq_control: IMOD - Interrupt Moderation Register.
* Used to throttle interrupts.
* @erst_size: Number of segments in the Event Ring Segment Table (ERST).
* @erst_base: ERST base address.
* @erst_dequeue: Event ring dequeue pointer.
*
* Each interrupter (defined by a MSI-X vector) has an event ring and an Event
* Ring Segment Table (ERST) associated with it. The event ring is comprised of
* multiple segments of the same size. The HC places events on the ring and
* "updates the Cycle bit in the TRBs to indicate to software the current
* position of the Enqueue Pointer." The HCD (Linux) processes those events and
* updates the dequeue pointer.
*/
struct xhci_intr_reg {
__le32 irq_pending;
__le32 irq_control;
__le32 erst_size;
__le32 rsvd;
__le64 erst_base;
__le64 erst_dequeue;
};
/* irq_pending bitmasks */
#define ER_IRQ_PENDING(p) ((p) & 0x1)
/* bits 2:31 need to be preserved */
/* THIS IS BUGGY - FIXME - IP IS WRITE 1 TO CLEAR */
#define ER_IRQ_CLEAR(p) ((p) & 0xfffffffe)
#define ER_IRQ_ENABLE(p) ((ER_IRQ_CLEAR(p)) | 0x2)
#define ER_IRQ_DISABLE(p) ((ER_IRQ_CLEAR(p)) & ~(0x2))
/* irq_control bitmasks */
/* Minimum interval between interrupts (in 250ns intervals). The interval
* between interrupts will be longer if there are no events on the event ring.
* Default is 4000 (1 ms).
*/
#define ER_IRQ_INTERVAL_MASK (0xffff)
/* Counter used to count down the time to the next interrupt - HW use only */
#define ER_IRQ_COUNTER_MASK (0xffff << 16)
/* erst_size bitmasks */
/* Preserve bits 16:31 of erst_size */
#define ERST_SIZE_MASK (0xffff << 16)
/* erst_dequeue bitmasks */
/* Dequeue ERST Segment Index (DESI) - Segment number (or alias)
* where the current dequeue pointer lies. This is an optional HW hint.
*/
#define ERST_DESI_MASK (0x7)
/* Event Handler Busy (EHB) - is the event ring scheduled to be serviced by
* a work queue (or delayed service routine)?
*/
#define ERST_EHB (1 << 3)
#define ERST_PTR_MASK (0xf)
/**
* struct xhci_run_regs
* @microframe_index:
* MFINDEX - current microframe number
*
* Section 5.5 Host Controller Runtime Registers:
* "Software should read and write these registers using only Dword (32 bit)
* or larger accesses"
*/
struct xhci_run_regs {
__le32 microframe_index;
__le32 rsvd[7];
struct xhci_intr_reg ir_set[128];
};
/**
* struct doorbell_array
*
* Bits 0 - 7: Endpoint target
* Bits 8 - 15: RsvdZ
* Bits 16 - 31: Stream ID
*
* Section 5.6
*/
struct xhci_doorbell_array {
__le32 doorbell[256];
};
#define DB_VALUE(ep, stream) ((((ep) + 1) & 0xff) | ((stream) << 16))
#define DB_VALUE_HOST 0x00000000
/**
* struct xhci_protocol_caps
* @revision: major revision, minor revision, capability ID,
* and next capability pointer.
* @name_string: Four ASCII characters to say which spec this xHC
* follows, typically "USB ".
* @port_info: Port offset, count, and protocol-defined information.
*/
struct xhci_protocol_caps {
u32 revision;
u32 name_string;
u32 port_info;
};
#define XHCI_EXT_PORT_MAJOR(x) (((x) >> 24) & 0xff)
#define XHCI_EXT_PORT_OFF(x) ((x) & 0xff)
#define XHCI_EXT_PORT_COUNT(x) (((x) >> 8) & 0xff)
/**
* struct xhci_container_ctx
* @type: Type of context. Used to calculated offsets to contained contexts.
* @size: Size of the context data
* @bytes: The raw context data given to HW
* @dma: dma address of the bytes
*
* Represents either a Device or Input context. Holds a pointer to the raw
* memory used for the context (bytes) and dma address of it (dma).
*/
struct xhci_container_ctx {
unsigned type;
#define XHCI_CTX_TYPE_DEVICE 0x1
#define XHCI_CTX_TYPE_INPUT 0x2
int size;
u8 *bytes;
dma_addr_t dma;
};
/**
* struct xhci_slot_ctx
* @dev_info: Route string, device speed, hub info, and last valid endpoint
* @dev_info2: Max exit latency for device number, root hub port number
* @tt_info: tt_info is used to construct split transaction tokens
* @dev_state: slot state and device address
*
* Slot Context - section 6.2.1.1. This assumes the HC uses 32-byte context
* structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
* reserved at the end of the slot context for HC internal use.
*/
struct xhci_slot_ctx {
__le32 dev_info;
__le32 dev_info2;
__le32 tt_info;
__le32 dev_state;
/* offset 0x10 to 0x1f reserved for HC internal use */
__le32 reserved[4];
};
/* dev_info bitmasks */
/* Route String - 0:19 */
#define ROUTE_STRING_MASK (0xfffff)
/* Device speed - values defined by PORTSC Device Speed field - 20:23 */
#define DEV_SPEED (0xf << 20)
/* bit 24 reserved */
/* Is this LS/FS device connected through a HS hub? - bit 25 */
#define DEV_MTT (0x1 << 25)
/* Set if the device is a hub - bit 26 */
#define DEV_HUB (0x1 << 26)
/* Index of the last valid endpoint context in this device context - 27:31 */
#define LAST_CTX_MASK (0x1f << 27)
#define LAST_CTX(p) ((p) << 27)
#define LAST_CTX_TO_EP_NUM(p) (((p) >> 27) - 1)
#define SLOT_FLAG (1 << 0)
#define EP0_FLAG (1 << 1)
/* dev_info2 bitmasks */
/* Max Exit Latency (ms) - worst case time to wake up all links in dev path */
#define MAX_EXIT (0xffff)
/* Root hub port number that is needed to access the USB device */
#define ROOT_HUB_PORT(p) (((p) & 0xff) << 16)
#define DEVINFO_TO_ROOT_HUB_PORT(p) (((p) >> 16) & 0xff)
/* Maximum number of ports under a hub device */
#define XHCI_MAX_PORTS(p) (((p) & 0xff) << 24)
/* tt_info bitmasks */
/*
* TT Hub Slot ID - for low or full speed devices attached to a high-speed hub
* The Slot ID of the hub that isolates the high speed signaling from
* this low or full-speed device. '0' if attached to root hub port.
*/
#define TT_SLOT (0xff)
/*
* The number of the downstream facing port of the high-speed hub
* '0' if the device is not low or full speed.
*/
#define TT_PORT (0xff << 8)
#define TT_THINK_TIME(p) (((p) & 0x3) << 16)
/* dev_state bitmasks */
/* USB device address - assigned by the HC */
#define DEV_ADDR_MASK (0xff)
/* bits 8:26 reserved */
/* Slot state */
#define SLOT_STATE (0x1f << 27)
#define GET_SLOT_STATE(p) (((p) & (0x1f << 27)) >> 27)
#define SLOT_STATE_DISABLED 0
#define SLOT_STATE_ENABLED SLOT_STATE_DISABLED
#define SLOT_STATE_DEFAULT 1
#define SLOT_STATE_ADDRESSED 2
#define SLOT_STATE_CONFIGURED 3
/**
* struct xhci_ep_ctx
* @ep_info: endpoint state, streams, mult, and interval information.
* @ep_info2: information on endpoint type, max packet size, max burst size,
* error count, and whether the HC will force an event for all
* transactions.
* @deq: 64-bit ring dequeue pointer address. If the endpoint only
* defines one stream, this points to the endpoint transfer ring.
* Otherwise, it points to a stream context array, which has a
* ring pointer for each flow.
* @tx_info:
* Average TRB lengths for the endpoint ring and
* max payload within an Endpoint Service Interval Time (ESIT).
*
* Endpoint Context - section 6.2.1.2. This assumes the HC uses 32-byte context
* structures. If the HC uses 64-byte contexts, there is an additional 32 bytes
* reserved at the end of the endpoint context for HC internal use.
*/
struct xhci_ep_ctx {
__le32 ep_info;
__le32 ep_info2;
__le64 deq;
__le32 tx_info;
/* offset 0x14 - 0x1f reserved for HC internal use */
__le32 reserved[3];
};
/* ep_info bitmasks */
/*
* Endpoint State - bits 0:2
* 0 - disabled
* 1 - running
* 2 - halted due to halt condition - ok to manipulate endpoint ring
* 3 - stopped
* 4 - TRB error
* 5-7 - reserved
*/
#define EP_STATE_MASK (0xf)
#define EP_STATE_DISABLED 0
#define EP_STATE_RUNNING 1
#define EP_STATE_HALTED 2
#define EP_STATE_STOPPED 3
#define EP_STATE_ERROR 4
/* Mult - Max number of burtst within an interval, in EP companion desc. */
#define EP_MULT(p) (((p) & 0x3) << 8)
#define CTX_TO_EP_MULT(p) (((p) >> 8) & 0x3)
/* bits 10:14 are Max Primary Streams */
/* bit 15 is Linear Stream Array */
/* Interval - period between requests to an endpoint - 125u increments. */
#define EP_INTERVAL(p) (((p) & 0xff) << 16)
#define EP_INTERVAL_TO_UFRAMES(p) (1 << (((p) >> 16) & 0xff))
#define CTX_TO_EP_INTERVAL(p) (((p) >> 16) & 0xff)
#define EP_MAXPSTREAMS_MASK (0x1f << 10)
#define EP_MAXPSTREAMS(p) (((p) << 10) & EP_MAXPSTREAMS_MASK)
/* Endpoint is set up with a Linear Stream Array (vs. Secondary Stream Array) */
#define EP_HAS_LSA (1 << 15)
/* ep_info2 bitmasks */
/*
* Force Event - generate transfer events for all TRBs for this endpoint
* This will tell the HC to ignore the IOC and ISP flags (for debugging only).
*/
#define FORCE_EVENT (0x1)
#define ERROR_COUNT(p) (((p) & 0x3) << 1)
#define CTX_TO_EP_TYPE(p) (((p) >> 3) & 0x7)
#define EP_TYPE(p) ((p) << 3)
#define ISOC_OUT_EP 1
#define BULK_OUT_EP 2
#define INT_OUT_EP 3
#define CTRL_EP 4
#define ISOC_IN_EP 5
#define BULK_IN_EP 6
#define INT_IN_EP 7
/* bit 6 reserved */
/* bit 7 is Host Initiate Disable - for disabling stream selection */
#define MAX_BURST(p) (((p)&0xff) << 8)
#define CTX_TO_MAX_BURST(p) (((p) >> 8) & 0xff)
#define MAX_PACKET(p) (((p)&0xffff) << 16)
#define MAX_PACKET_MASK (0xffff << 16)
#define MAX_PACKET_DECODED(p) (((p) >> 16) & 0xffff)
/* Get max packet size from ep desc. Bit 10..0 specify the max packet size.
* USB2.0 spec 9.6.6.
*/
#define GET_MAX_PACKET(p) ((p) & 0x7ff)
/* tx_info bitmasks */
#define AVG_TRB_LENGTH_FOR_EP(p) ((p) & 0xffff)
#define MAX_ESIT_PAYLOAD_FOR_EP(p) (((p) & 0xffff) << 16)
#define CTX_TO_MAX_ESIT_PAYLOAD(p) (((p) >> 16) & 0xffff)
/* deq bitmasks */
#define EP_CTX_CYCLE_MASK (1 << 0)
/**
* struct xhci_input_control_context
* Input control context; see section 6.2.5.
*
* @drop_context: set the bit of the endpoint context you want to disable
* @add_context: set the bit of the endpoint context you want to enable
*/
struct xhci_input_control_ctx {
__le32 drop_flags;
__le32 add_flags;
__le32 rsvd2[6];
};
#define EP_IS_ADDED(ctrl_ctx, i) \
(le32_to_cpu(ctrl_ctx->add_flags) & (1 << (i + 1)))
#define EP_IS_DROPPED(ctrl_ctx, i) \
(le32_to_cpu(ctrl_ctx->drop_flags) & (1 << (i + 1)))
/* Represents everything that is needed to issue a command on the command ring.
* It's useful to pre-allocate these for commands that cannot fail due to
* out-of-memory errors, like freeing streams.
*/
struct xhci_command {
/* Input context for changing device state */
struct xhci_container_ctx *in_ctx;
u32 status;
/* If completion is null, no one is waiting on this command
* and the structure can be freed after the command completes.
*/
struct completion *completion;
union xhci_trb *command_trb;
struct list_head cmd_list;
};
/* drop context bitmasks */
#define DROP_EP(x) (0x1 << x)
/* add context bitmasks */
#define ADD_EP(x) (0x1 << x)
struct xhci_stream_ctx {
/* 64-bit stream ring address, cycle state, and stream type */
__le64 stream_ring;
/* offset 0x14 - 0x1f reserved for HC internal use */
__le32 reserved[2];
};
/* Stream Context Types (section 6.4.1) - bits 3:1 of stream ctx deq ptr */
#define SCT_FOR_CTX(p) (((p) << 1) & 0x7)
/* Secondary stream array type, dequeue pointer is to a transfer ring */
#define SCT_SEC_TR 0
/* Primary stream array type, dequeue pointer is to a transfer ring */
#define SCT_PRI_TR 1
/* Dequeue pointer is for a secondary stream array (SSA) with 8 entries */
#define SCT_SSA_8 2
#define SCT_SSA_16 3
#define SCT_SSA_32 4
#define SCT_SSA_64 5
#define SCT_SSA_128 6
#define SCT_SSA_256 7
/* Assume no secondary streams for now */
struct xhci_stream_info {
struct xhci_ring **stream_rings;
/* Number of streams, including stream 0 (which drivers can't use) */
unsigned int num_streams;
/* The stream context array may be bigger than
* the number of streams the driver asked for
*/
struct xhci_stream_ctx *stream_ctx_array;
unsigned int num_stream_ctxs;
dma_addr_t ctx_array_dma;
/* For mapping physical TRB addresses to segments in stream rings */
struct radix_tree_root trb_address_map;
struct xhci_command *free_streams_command;
};
#define SMALL_STREAM_ARRAY_SIZE 256
#define MEDIUM_STREAM_ARRAY_SIZE 1024
/* Some Intel xHCI host controllers need software to keep track of the bus
* bandwidth. Keep track of endpoint info here. Each root port is allocated
* the full bus bandwidth. We must also treat TTs (including each port under a
* multi-TT hub) as a separate bandwidth domain. The direct memory interface
* (DMI) also limits the total bandwidth (across all domains) that can be used.
*/
struct xhci_bw_info {
/* ep_interval is zero-based */
unsigned int ep_interval;
/* mult and num_packets are one-based */
unsigned int mult;
unsigned int num_packets;
unsigned int max_packet_size;
unsigned int max_esit_payload;
unsigned int type;
};
/* "Block" sizes in bytes the hardware uses for different device speeds.
* The logic in this part of the hardware limits the number of bits the hardware
* can use, so must represent bandwidth in a less precise manner to mimic what
* the scheduler hardware computes.
*/
#define FS_BLOCK 1
#define HS_BLOCK 4
#define SS_BLOCK 16
#define DMI_BLOCK 32
/* Each device speed has a protocol overhead (CRC, bit stuffing, etc) associated
* with each byte transferred. SuperSpeed devices have an initial overhead to
* set up bursts. These are in blocks, see above. LS overhead has already been
* translated into FS blocks.
*/
#define DMI_OVERHEAD 8
#define DMI_OVERHEAD_BURST 4
#define SS_OVERHEAD 8
#define SS_OVERHEAD_BURST 32
#define HS_OVERHEAD 26
#define FS_OVERHEAD 20
#define LS_OVERHEAD 128
/* The TTs need to claim roughly twice as much bandwidth (94 bytes per
* microframe ~= 24Mbps) of the HS bus as the devices can actually use because
* of overhead associated with split transfers crossing microframe boundaries.
* 31 blocks is pure protocol overhead.
*/
#define TT_HS_OVERHEAD (31 + 94)
#define TT_DMI_OVERHEAD (25 + 12)
/* Bandwidth limits in blocks */
#define FS_BW_LIMIT 1285
#define TT_BW_LIMIT 1320
#define HS_BW_LIMIT 1607
#define SS_BW_LIMIT_IN 3906
#define DMI_BW_LIMIT_IN 3906
#define SS_BW_LIMIT_OUT 3906
#define DMI_BW_LIMIT_OUT 3906
/* Percentage of bus bandwidth reserved for non-periodic transfers */
#define FS_BW_RESERVED 10
#define HS_BW_RESERVED 20
#define SS_BW_RESERVED 10
struct xhci_virt_ep {
struct xhci_ring *ring;
/* Related to endpoints that are configured to use stream IDs only */
struct xhci_stream_info *stream_info;
/* Temporary storage in case the configure endpoint command fails and we
* have to restore the device state to the previous state
*/
struct xhci_ring *new_ring;
unsigned int ep_state;
#define SET_DEQ_PENDING (1 << 0)
#define EP_HALTED (1 << 1) /* For stall handling */
#define EP_HALT_PENDING (1 << 2) /* For URB cancellation */
/* Transitioning the endpoint to using streams, don't enqueue URBs */
#define EP_GETTING_STREAMS (1 << 3)
#define EP_HAS_STREAMS (1 << 4)
/* Transitioning the endpoint to not using streams, don't enqueue URBs */
#define EP_GETTING_NO_STREAMS (1 << 5)
/* ---- Related to URB cancellation ---- */
struct list_head cancelled_td_list;
/* The TRB that was last reported in a stopped endpoint ring */
union xhci_trb *stopped_trb;
struct xhci_td *stopped_td;
unsigned int stopped_stream;
/* Watchdog timer for stop endpoint command to cancel URBs */
struct timer_list stop_cmd_timer;
int stop_cmds_pending;
struct xhci_hcd *xhci;
/* Dequeue pointer and dequeue segment for a submitted Set TR Dequeue
* command. We'll need to update the ring's dequeue segment and dequeue
* pointer after the command completes.
*/
struct xhci_segment *queued_deq_seg;
union xhci_trb *queued_deq_ptr;
/*
* Sometimes the xHC can not process isochronous endpoint ring quickly
* enough, and it will miss some isoc tds on the ring and generate
* a Missed Service Error Event.
* Set skip flag when receive a Missed Service Error Event and
* process the missed tds on the endpoint ring.
*/
bool skip;
/* Bandwidth checking storage */
struct xhci_bw_info bw_info;
struct list_head bw_endpoint_list;
};
enum xhci_overhead_type {
LS_OVERHEAD_TYPE = 0,
FS_OVERHEAD_TYPE,
HS_OVERHEAD_TYPE,
};
struct xhci_interval_bw {
unsigned int num_packets;
/* Sorted by max packet size.
* Head of the list is the greatest max packet size.
*/
struct list_head endpoints;
/* How many endpoints of each speed are present. */
unsigned int overhead[3];
};
#define XHCI_MAX_INTERVAL 16
struct xhci_interval_bw_table {
unsigned int interval0_esit_payload;
struct xhci_interval_bw interval_bw[XHCI_MAX_INTERVAL];
/* Includes reserved bandwidth for async endpoints */
unsigned int bw_used;
unsigned int ss_bw_in;
unsigned int ss_bw_out;
};
struct xhci_virt_device {
struct usb_device *udev;
/*
* Commands to the hardware are passed an "input context" that
* tells the hardware what to change in its data structures.
* The hardware will return changes in an "output context" that
* software must allocate for the hardware. We need to keep
* track of input and output contexts separately because
* these commands might fail and we don't trust the hardware.
*/
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
/* Rings saved to ensure old alt settings can be re-instated */
struct xhci_ring **ring_cache;
int num_rings_cached;
/* Store xHC assigned device address */
int address;
#define XHCI_MAX_RINGS_CACHED 31
struct xhci_virt_ep eps[31];
struct completion cmd_completion;
/* Status of the last command issued for this device */
u32 cmd_status;
struct list_head cmd_list;
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;
struct xhci_tt_bw_info *tt_info;
};
/*
* For each roothub, keep track of the bandwidth information for each periodic
* interval.
*
* If a high speed hub is attached to the roothub, each TT associated with that
* hub is a separate bandwidth domain. The interval information for the
* endpoints on the devices under that TT will appear in the TT structure.
*/
struct xhci_root_port_bw_info {
struct list_head tts;
unsigned int num_active_tts;
struct xhci_interval_bw_table bw_table;
};
struct xhci_tt_bw_info {
struct list_head tt_list;
int slot_id;
int ttport;
struct xhci_interval_bw_table bw_table;
int active_eps;
};
/**
* struct xhci_device_context_array
* @dev_context_ptr array of 64-bit DMA addresses for device contexts
*/
struct xhci_device_context_array {
/* 64-bit device addresses; we only write 32-bit addresses */
__le64 dev_context_ptrs[MAX_HC_SLOTS];
/* private xHCD pointers */
dma_addr_t dma;
};
/* TODO: write function to set the 64-bit device DMA address */
/*
* TODO: change this to be dynamically sized at HC mem init time since the HC
* might not be able to handle the maximum number of devices possible.
*/
struct xhci_transfer_event {
/* 64-bit buffer address, or immediate data */
__le64 buffer;
__le32 transfer_len;
/* This field is interpreted differently based on the type of TRB */
__le32 flags;
};
/* Transfer event TRB length bit mask */
/* bits 0:23 */
#define EVENT_TRB_LEN(p) ((p) & 0xffffff)
/** Transfer Event bit fields **/
#define TRB_TO_EP_ID(p) (((p) >> 16) & 0x1f)
/* Completion Code - only applicable for some types of TRBs */
#define COMP_CODE_MASK (0xff << 24)
#define GET_COMP_CODE(p) (((p) & COMP_CODE_MASK) >> 24)
#define COMP_SUCCESS 1
/* Data Buffer Error */
#define COMP_DB_ERR 2
/* Babble Detected Error */
#define COMP_BABBLE 3
/* USB Transaction Error */
#define COMP_TX_ERR 4
/* TRB Error - some TRB field is invalid */
#define COMP_TRB_ERR 5
/* Stall Error - USB device is stalled */
#define COMP_STALL 6
/* Resource Error - HC doesn't have memory for that device configuration */
#define COMP_ENOMEM 7
/* Bandwidth Error - not enough room in schedule for this dev config */
#define COMP_BW_ERR 8
/* No Slots Available Error - HC ran out of device slots */
#define COMP_ENOSLOTS 9
/* Invalid Stream Type Error */
#define COMP_STREAM_ERR 10
/* Slot Not Enabled Error - doorbell rung for disabled device slot */
#define COMP_EBADSLT 11
/* Endpoint Not Enabled Error */
#define COMP_EBADEP 12
/* Short Packet */
#define COMP_SHORT_TX 13
/* Ring Underrun - doorbell rung for an empty isoc OUT ep ring */
#define COMP_UNDERRUN 14
/* Ring Overrun - isoc IN ep ring is empty when ep is scheduled to RX */
#define COMP_OVERRUN 15
/* Virtual Function Event Ring Full Error */
#define COMP_VF_FULL 16
/* Parameter Error - Context parameter is invalid */
#define COMP_EINVAL 17
/* Bandwidth Overrun Error - isoc ep exceeded its allocated bandwidth */
#define COMP_BW_OVER 18
/* Context State Error - illegal context state transition requested */
#define COMP_CTX_STATE 19
/* No Ping Response Error - HC didn't get PING_RESPONSE in time to TX */
#define COMP_PING_ERR 20
/* Event Ring is full */
#define COMP_ER_FULL 21
/* Incompatible Device Error */
#define COMP_DEV_ERR 22
/* Missed Service Error - HC couldn't service an isoc ep within interval */
#define COMP_MISSED_INT 23
/* Successfully stopped command ring */
#define COMP_CMD_STOP 24
/* Successfully aborted current command and stopped command ring */
#define COMP_CMD_ABORT 25
/* Stopped - transfer was terminated by a stop endpoint command */
#define COMP_STOP 26
/* Same as COMP_EP_STOPPED, but the transferred length in the event is invalid */
#define COMP_STOP_INVAL 27
/* Control Abort Error - Debug Capability - control pipe aborted */
#define COMP_DBG_ABORT 28
/* Max Exit Latency Too Large Error */
#define COMP_MEL_ERR 29
/* TRB type 30 reserved */
/* Isoc Buffer Overrun - an isoc IN ep sent more data than could fit in TD */
#define COMP_BUFF_OVER 31
/* Event Lost Error - xHC has an "internal event overrun condition" */
#define COMP_ISSUES 32
/* Undefined Error - reported when other error codes don't apply */
#define COMP_UNKNOWN 33
/* Invalid Stream ID Error */
#define COMP_STRID_ERR 34
/* Secondary Bandwidth Error - may be returned by a Configure Endpoint cmd */
#define COMP_2ND_BW_ERR 35
/* Split Transaction Error */
#define COMP_SPLIT_ERR 36
struct xhci_link_trb {
/* 64-bit segment pointer*/
__le64 segment_ptr;
__le32 intr_target;
__le32 control;
};
/* control bitfields */
#define LINK_TOGGLE (0x1<<1)
/* Command completion event TRB */
struct xhci_event_cmd {
/* Pointer to command TRB, or the value passed by the event data trb */
__le64 cmd_trb;
__le32 status;
__le32 flags;
};
/* flags bitmasks */
/* bits 16:23 are the virtual function ID */
/* bits 24:31 are the slot ID */
#define TRB_TO_SLOT_ID(p) (((p) & (0xff<<24)) >> 24)
#define SLOT_ID_FOR_TRB(p) (((p) & 0xff) << 24)
/* Stop Endpoint TRB - ep_index to endpoint ID for this TRB */
#define TRB_TO_EP_INDEX(p) ((((p) & (0x1f << 16)) >> 16) - 1)
#define EP_ID_FOR_TRB(p) ((((p) + 1) & 0x1f) << 16)
#define SUSPEND_PORT_FOR_TRB(p) (((p) & 1) << 23)
#define TRB_TO_SUSPEND_PORT(p) (((p) & (1 << 23)) >> 23)
#define LAST_EP_INDEX 30
/* Set TR Dequeue Pointer command TRB fields */
#define TRB_TO_STREAM_ID(p) ((((p) & (0xffff << 16)) >> 16))
#define STREAM_ID_FOR_TRB(p) ((((p)) & 0xffff) << 16)
/* Port Status Change Event TRB fields */
/* Port ID - bits 31:24 */
#define GET_PORT_ID(p) (((p) & (0xff << 24)) >> 24)
/* Normal TRB fields */
/* transfer_len bitmasks - bits 0:16 */
#define TRB_LEN(p) ((p) & 0x1ffff)
/* Interrupter Target - which MSI-X vector to target the completion event at */
#define TRB_INTR_TARGET(p) (((p) & 0x3ff) << 22)
#define GET_INTR_TARGET(p) (((p) >> 22) & 0x3ff)
#define TRB_TBC(p) (((p) & 0x3) << 7)
#define TRB_TLBPC(p) (((p) & 0xf) << 16)
/* Cycle bit - indicates TRB ownership by HC or HCD */
#define TRB_CYCLE (1<<0)
/*
* Force next event data TRB to be evaluated before task switch.
* Used to pass OS data back after a TD completes.
*/
#define TRB_ENT (1<<1)
/* Interrupt on short packet */
#define TRB_ISP (1<<2)
/* Set PCIe no snoop attribute */
#define TRB_NO_SNOOP (1<<3)
/* Chain multiple TRBs into a TD */
#define TRB_CHAIN (1<<4)
/* Interrupt on completion */
#define TRB_IOC (1<<5)
/* The buffer pointer contains immediate data */
#define TRB_IDT (1<<6)
/* Block Event Interrupt */
#define TRB_BEI (1<<9)
/* Control transfer TRB specific fields */
#define TRB_DIR_IN (1<<16)
#define TRB_TX_TYPE(p) ((p) << 16)
#define TRB_DATA_OUT 2
#define TRB_DATA_IN 3
/* Isochronous TRB specific fields */
#define TRB_SIA (1<<31)
struct xhci_generic_trb {
__le32 field[4];
};
union xhci_trb {
struct xhci_link_trb link;
struct xhci_transfer_event trans_event;
struct xhci_event_cmd event_cmd;
struct xhci_generic_trb generic;
};
/* TRB bit mask */
#define TRB_TYPE_BITMASK (0xfc00)
#define TRB_TYPE(p) ((p) << 10)
#define TRB_FIELD_TO_TYPE(p) (((p) & TRB_TYPE_BITMASK) >> 10)
/* TRB type IDs */
/* bulk, interrupt, isoc scatter/gather, and control data stage */
#define TRB_NORMAL 1
/* setup stage for control transfers */
#define TRB_SETUP 2
/* data stage for control transfers */
#define TRB_DATA 3
/* status stage for control transfers */
#define TRB_STATUS 4
/* isoc transfers */
#define TRB_ISOC 5
/* TRB for linking ring segments */
#define TRB_LINK 6
#define TRB_EVENT_DATA 7
/* Transfer Ring No-op (not for the command ring) */
#define TRB_TR_NOOP 8
/* Command TRBs */
/* Enable Slot Command */
#define TRB_ENABLE_SLOT 9
/* Disable Slot Command */
#define TRB_DISABLE_SLOT 10
/* Address Device Command */
#define TRB_ADDR_DEV 11
/* Configure Endpoint Command */
#define TRB_CONFIG_EP 12
/* Evaluate Context Command */
#define TRB_EVAL_CONTEXT 13
/* Reset Endpoint Command */
#define TRB_RESET_EP 14
/* Stop Transfer Ring Command */
#define TRB_STOP_RING 15
/* Set Transfer Ring Dequeue Pointer Command */
#define TRB_SET_DEQ 16
/* Reset Device Command */
#define TRB_RESET_DEV 17
/* Force Event Command (opt) */
#define TRB_FORCE_EVENT 18
/* Negotiate Bandwidth Command (opt) */
#define TRB_NEG_BANDWIDTH 19
/* Set Latency Tolerance Value Command (opt) */
#define TRB_SET_LT 20
/* Get port bandwidth Command */
#define TRB_GET_BW 21
/* Force Header Command - generate a transaction or link management packet */
#define TRB_FORCE_HEADER 22
/* No-op Command - not for transfer rings */
#define TRB_CMD_NOOP 23
/* TRB IDs 24-31 reserved */
/* Event TRBS */
/* Transfer Event */
#define TRB_TRANSFER 32
/* Command Completion Event */
#define TRB_COMPLETION 33
/* Port Status Change Event */
#define TRB_PORT_STATUS 34
/* Bandwidth Request Event (opt) */
#define TRB_BANDWIDTH_EVENT 35
/* Doorbell Event (opt) */
#define TRB_DOORBELL 36
/* Host Controller Event */
#define TRB_HC_EVENT 37
/* Device Notification Event - device sent function wake notification */
#define TRB_DEV_NOTE 38
/* MFINDEX Wrap Event - microframe counter wrapped */
#define TRB_MFINDEX_WRAP 39
/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
/* Nec vendor-specific command completion event. */
#define TRB_NEC_CMD_COMP 48
/* Get NEC firmware revision. */
#define TRB_NEC_GET_FW 49
#define TRB_TYPE_LINK(x) (((x) & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK))
/* Above, but for __le32 types -- can avoid work by swapping constants: */
#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
cpu_to_le32(TRB_TYPE(TRB_LINK)))
#define TRB_TYPE_NOOP_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \
cpu_to_le32(TRB_TYPE(TRB_TR_NOOP)))
#define NEC_FW_MINOR(p) (((p) >> 0) & 0xff)
#define NEC_FW_MAJOR(p) (((p) >> 8) & 0xff)
/*
* TRBS_PER_SEGMENT must be a multiple of 4,
* since the command ring is 64-byte aligned.
* It must also be greater than 16.
*/
#define TRBS_PER_SEGMENT 64
/* Allow two commands + a link TRB, along with any reserved command TRBs */
#define MAX_RSVD_CMD_TRBS (TRBS_PER_SEGMENT - 3)
#define SEGMENT_SIZE (TRBS_PER_SEGMENT*16)
#define SEGMENT_SHIFT (__ffs(SEGMENT_SIZE))
/* TRB buffer pointers can't cross 64KB boundaries */
#define TRB_MAX_BUFF_SHIFT 16
#define TRB_MAX_BUFF_SIZE (1 << TRB_MAX_BUFF_SHIFT)
struct xhci_segment {
union xhci_trb *trbs;
/* private to HCD */
struct xhci_segment *next;
dma_addr_t dma;
};
struct xhci_td {
struct list_head td_list;
struct list_head cancelled_td_list;
struct urb *urb;
struct xhci_segment *start_seg;
union xhci_trb *first_trb;
union xhci_trb *last_trb;
};
/* xHCI command default timeout value */
#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ)
/* command descriptor */
struct xhci_cd {
struct list_head cancel_cmd_list;
struct xhci_command *command;
union xhci_trb *cmd_trb;
};
struct xhci_dequeue_state {
struct xhci_segment *new_deq_seg;
union xhci_trb *new_deq_ptr;
int new_cycle_state;
};
enum xhci_ring_type {
TYPE_CTRL = 0,
TYPE_ISOC,
TYPE_BULK,
TYPE_INTR,
TYPE_STREAM,
TYPE_COMMAND,
TYPE_EVENT,
};
struct xhci_ring {
struct xhci_segment *first_seg;
struct xhci_segment *last_seg;
union xhci_trb *enqueue;
struct xhci_segment *enq_seg;
unsigned int enq_updates;
union xhci_trb *dequeue;
struct xhci_segment *deq_seg;
unsigned int deq_updates;
struct list_head td_list;
/*
* Write the cycle state into the TRB cycle field to give ownership of
* the TRB to the host controller (if we are the producer), or to check
* if we own the TRB (if we are the consumer). See section 4.9.1.
*/
u32 cycle_state;
unsigned int stream_id;
unsigned int num_segs;
unsigned int num_trbs_free;
unsigned int num_trbs_free_temp;
enum xhci_ring_type type;
bool last_td_was_short;
};
struct xhci_erst_entry {
/* 64-bit event ring segment address */
__le64 seg_addr;
__le32 seg_size;
/* Set to zero */
__le32 rsvd;
};
struct xhci_erst {
struct xhci_erst_entry *entries;
unsigned int num_entries;
/* xhci->event_ring keeps track of segment dma addresses */
dma_addr_t erst_dma_addr;
/* Num entries the ERST can contain */
unsigned int erst_size;
};
struct xhci_scratchpad {
u64 *sp_array;
dma_addr_t sp_dma;
void **sp_buffers;
dma_addr_t *sp_dma_buffers;
};
struct urb_priv {
int length;
int td_cnt;
struct xhci_td *td[0];
};
/*
* Each segment table entry is 4*32bits long. 1K seems like an ok size:
* (1K bytes * 8bytes/bit) / (4*32 bits) = 64 segment entries in the table,
* meaning 64 ring segments.
* Initial allocated size of the ERST, in number of entries */
#define ERST_NUM_SEGS 1
/* Initial allocated size of the ERST, in number of entries */
#define ERST_SIZE 64
/* Initial number of event segment rings allocated */
#define ERST_ENTRIES 1
/* Poll every 60 seconds */
#define POLL_TIMEOUT 60
/* Stop endpoint command timeout (secs) for URB cancellation watchdog timer */
#define XHCI_STOP_EP_CMD_TIMEOUT 5
/* XXX: Make these module parameters */
struct s3_save {
u32 command;
u32 dev_nt;
u64 dcbaa_ptr;
u32 config_reg;
u32 irq_pending;
u32 irq_control;
u32 erst_size;
u64 erst_base;
u64 erst_dequeue;
};
/* Use for lpm */
struct dev_info {
u32 dev_id;
struct list_head list;
};
struct xhci_bus_state {
unsigned long bus_suspended;
unsigned long next_statechange;
/* Port suspend arrays are indexed by the portnum of the fake roothub */
/* ports suspend status arrays - max 31 ports for USB2, 15 for USB3 */
u32 port_c_suspend;
u32 suspended_ports;
u32 port_remote_wakeup;
unsigned long resume_done[USB_MAXCHILDREN];
/* which ports have started to resume */
unsigned long resuming_ports;
};
static inline unsigned int hcd_index(struct usb_hcd *hcd)
{
if (hcd->speed == HCD_USB3)
return 0;
else
return 1;
}
/* There is one xhci_hcd structure per controller */
struct xhci_hcd {
struct usb_hcd *main_hcd;
struct usb_hcd *shared_hcd;
/* glue to PCI and HCD framework */
struct xhci_cap_regs __iomem *cap_regs;
struct xhci_op_regs __iomem *op_regs;
struct xhci_run_regs __iomem *run_regs;
struct xhci_doorbell_array __iomem *dba;
/* Our HCD's current interrupter register set */
struct xhci_intr_reg __iomem *ir_set;
/* Cached register copies of read-only HC data */
__u32 hcs_params1;
__u32 hcs_params2;
__u32 hcs_params3;
__u32 hcc_params;
spinlock_t lock;
/* packed release number */
u8 sbrn;
u16 hci_version;
u8 max_slots;
u8 max_interrupters;
u8 max_ports;
u8 isoc_threshold;
int event_ring_max;
int addr_64;
/* 4KB min, 128MB max */
int page_size;
/* Valid values are 12 to 20, inclusive */
int page_shift;
/* msi-x vectors */
int msix_count;
struct msix_entry *msix_entries;
/* data structures */
struct xhci_device_context_array *dcbaa;
struct xhci_ring *cmd_ring;
unsigned int cmd_ring_state;
#define CMD_RING_STATE_RUNNING (1 << 0)
#define CMD_RING_STATE_ABORTED (1 << 1)
#define CMD_RING_STATE_STOPPED (1 << 2)
struct list_head cancel_cmd_list;
unsigned int cmd_ring_reserved_trbs;
struct xhci_ring *event_ring;
struct xhci_erst erst;
/* Scratchpad */
struct xhci_scratchpad *scratchpad;
/* Store LPM test failed devices' information */
struct list_head lpm_failed_devs;
/* slot enabling and address device helpers */
struct completion addr_dev;
int slot_id;
/* Internal mirror of the HW's dcbaa */
struct xhci_virt_device *devs[MAX_HC_SLOTS];
/* For keeping track of bandwidth domains per roothub. */
struct xhci_root_port_bw_info *rh_bw;
/* DMA pools */
struct dma_pool *device_pool;
struct dma_pool *segment_pool;
struct dma_pool *small_streams_pool;
struct dma_pool *medium_streams_pool;
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
/* Poll the rings - for debugging */
struct timer_list event_ring_timer;
int zombie;
#endif
/* Host controller watchdog timer structures */
unsigned int xhc_state;
u32 command;
struct s3_save s3;
/* Host controller is dying - not responding to commands. "I'm not dead yet!"
*
* xHC interrupts have been disabled and a watchdog timer will (or has already)
* halt the xHCI host, and complete all URBs with an -ESHUTDOWN code. Any code
* that sees this status (other than the timer that set it) should stop touching
* hardware immediately. Interrupt handlers should return immediately when
* they see this status (any time they drop and re-acquire xhci->lock).
* xhci_urb_dequeue() should call usb_hcd_check_unlink_urb() and return without
* putting the TD on the canceled list, etc.
*
* There are no reports of xHCI host controllers that display this issue.
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
/* Statistics */
int error_bitmask;
unsigned int quirks;
#define XHCI_LINK_TRB_QUIRK (1 << 0)
#define XHCI_RESET_EP_QUIRK (1 << 1)
#define XHCI_NEC_HOST (1 << 2)
#define XHCI_AMD_PLL_FIX (1 << 3)
#define XHCI_SPURIOUS_SUCCESS (1 << 4)
/*
* Certain Intel host controllers have a limit to the number of endpoint
* contexts they can handle. Ideally, they would signal that they can't handle
* anymore endpoint contexts by returning a Resource Error for the Configure
* Endpoint command, but they don't. Instead they expect software to keep track
* of the number of active endpoints for them, across configure endpoint
* commands, reset device commands, disable slot commands, and address device
* commands.
*/
#define XHCI_EP_LIMIT_QUIRK (1 << 5)
#define XHCI_BROKEN_MSI (1 << 6)
#define XHCI_RESET_ON_RESUME (1 << 7)
#define XHCI_SW_BW_CHECKING (1 << 8)
#define XHCI_AMD_0x96_HOST (1 << 9)
#define XHCI_TRUST_TX_LENGTH (1 << 10)
#define XHCI_SPURIOUS_REBOOT (1 << 13)
#define XHCI_COMP_MODE_QUIRK (1 << 14)
#define XHCI_AVOID_BEI (1 << 15)
#define XHCI_PLAT (1 << 16)
#define XHCI_SLOW_SUSPEND (1 << 17)
#define XHCI_SPURIOUS_WAKEUP (1 << 18)
/*
* In Synopsis DWC3 controller, PORTSC register access involves multiple clock
* domains. When the software does a PORTSC write, handshakes are needed
* across these clock domains. This results in long access times, especially
* for USB 2.0 ports. In order to solve this issue, when the PORTSC write
* operations happen on the system bus, the command is latched and system bus
* is released immediately. However, the real PORTSC write access will take
* some time internally to complete. If the software quickly does a read to the
* PORTSC, some fields (port status change related fields like OCC, etc.) may
* not have correct value due to the current way of handling these bits.
*
* The workaround is to give some delay (5 mac2_clk -> UTMI clock = 60 MHz ->
* (16.66 ns x 5 = 84ns) ~100ns after writing to the PORTSC register.
*/
#define XHCI_PORTSC_DELAY (1 << 10)
#define XHCI_TRUST_TX_LENGTH (1 << 10)
/*
* In Synopsis DWC3 controller, XHCI RESET takes some time complete. If PIPE
* RESET is not complete by the time USBCMD.RUN bit is set then HC fails to
* carry out SS transfers.
*
* The workaround is to give worst case pipe delay ~350us after resetting HC
*/
#define XHCI_RESET_DELAY (1 << 11)
#define XHCI_SPURIOUS_REBOOT (1 << 13)
#define XHCI_COMP_MODE_QUIRK (1 << 14)
#define XHCI_AVOID_BEI (1 << 15)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
struct xhci_bus_state bus_state[2];
/* Is each xHCI roothub port a USB 3.0, USB 2.0, or USB 1.1 port? */
u8 *port_array;
/* Array of pointers to USB 3.0 PORTSC registers */
__le32 __iomem **usb3_ports;
unsigned int num_usb3_ports;
/* Array of pointers to USB 2.0 PORTSC registers */
__le32 __iomem **usb2_ports;
unsigned int num_usb2_ports;
/* support xHCI 0.96 spec USB2 software LPM */
unsigned sw_lpm_support:1;
/* support xHCI 1.0 spec USB2 hardware LPM */
unsigned hw_lpm_support:1;
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
};
/* convert between an HCD pointer and the corresponding EHCI_HCD */
static inline struct xhci_hcd *hcd_to_xhci(struct usb_hcd *hcd)
{
return *((struct xhci_hcd **) (hcd->hcd_priv));
}
static inline struct usb_hcd *xhci_to_hcd(struct xhci_hcd *xhci)
{
return xhci->main_hcd;
}
#ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
#define XHCI_DEBUG 1
#else
#define XHCI_DEBUG 0
#endif
#define xhci_dbg(xhci, fmt, args...) \
do { if (XHCI_DEBUG) dev_dbg(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
#define xhci_info(xhci, fmt, args...) \
do { if (XHCI_DEBUG) dev_info(xhci_to_hcd(xhci)->self.controller , fmt , ## args); } while (0)
#define xhci_err(xhci, fmt, args...) \
dev_err(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
#define xhci_warn(xhci, fmt, args...) \
dev_warn(xhci_to_hcd(xhci)->self.controller , fmt , ## args)
/* TODO: copied from ehci.h - can be refactored? */
/* xHCI spec says all registers are little endian */
static inline unsigned int xhci_readl(const struct xhci_hcd *xhci,
__le32 __iomem *regs)
{
return readl(regs);
}
static inline void xhci_writel(struct xhci_hcd *xhci,
const unsigned int val, __le32 __iomem *regs)
{
writel(val, regs);
}
/*
* Registers should always be accessed with double word or quad word accesses.
*
* Some xHCI implementations may support 64-bit address pointers. Registers
* with 64-bit address pointers should be written to with dword accesses by
* writing the low dword first (ptr[0]), then the high dword (ptr[1]) second.
* xHCI implementations that do not support 64-bit address pointers will ignore
* the high dword, and write order is irrelevant.
*/
static inline u64 xhci_read_64(const struct xhci_hcd *xhci,
__le64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u64 val_lo = readl(ptr);
u64 val_hi = readl(ptr + 1);
return val_lo + (val_hi << 32);
}
static inline void xhci_write_64(struct xhci_hcd *xhci,
const u64 val, __le64 __iomem *regs)
{
__u32 __iomem *ptr = (__u32 __iomem *) regs;
u32 val_lo = lower_32_bits(val);
u32 val_hi = upper_32_bits(val);
writel(val_lo, ptr);
writel(val_hi, ptr + 1);
}
static inline int xhci_link_trb_quirk(struct xhci_hcd *xhci)
{
return xhci->quirks & XHCI_LINK_TRB_QUIRK;
}
/* xHCI debugging */
void xhci_print_ir_set(struct xhci_hcd *xhci, int set_num);
void xhci_print_registers(struct xhci_hcd *xhci);
void xhci_dbg_regs(struct xhci_hcd *xhci);
void xhci_print_run_regs(struct xhci_hcd *xhci);
void xhci_print_trb_offsets(struct xhci_hcd *xhci, union xhci_trb *trb);
void xhci_debug_trb(struct xhci_hcd *xhci, union xhci_trb *trb);
void xhci_debug_segment(struct xhci_hcd *xhci, struct xhci_segment *seg);
void xhci_debug_ring(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_erst(struct xhci_hcd *xhci, struct xhci_erst *erst);
void xhci_dbg_cmd_ptrs(struct xhci_hcd *xhci);
void xhci_dbg_ring_ptrs(struct xhci_hcd *xhci, struct xhci_ring *ring);
void xhci_dbg_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int last_ep);
char *xhci_get_slot_state(struct xhci_hcd *xhci,
struct xhci_container_ctx *ctx);
void xhci_dbg_ep_rings(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_virt_ep *ep);
/* xHCI memory management */
void xhci_mem_cleanup(struct xhci_hcd *xhci);
int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags);
void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id);
int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, struct usb_device *udev, gfp_t flags);
int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev);
void xhci_copy_ep0_dequeue_into_input_ctx(struct xhci_hcd *xhci,
struct usb_device *udev);
unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc);
unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index);
unsigned int xhci_last_valid_endpoint(u32 added_ctxs);
void xhci_endpoint_zero(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, struct usb_host_endpoint *ep);
void xhci_drop_ep_from_interval_table(struct xhci_hcd *xhci,
struct xhci_bw_info *ep_bw,
struct xhci_interval_bw_table *bw_table,
struct usb_device *udev,
struct xhci_virt_ep *virt_ep,
struct xhci_tt_bw_info *tt_info);
void xhci_update_tt_active_eps(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
int old_active_eps);
void xhci_clear_endpoint_bw_info(struct xhci_bw_info *bw_info);
void xhci_update_bw_info(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_input_control_ctx *ctrl_ctx,
struct xhci_virt_device *virt_dev);
void xhci_endpoint_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx,
unsigned int ep_index);
void xhci_slot_copy(struct xhci_hcd *xhci,
struct xhci_container_ctx *in_ctx,
struct xhci_container_ctx *out_ctx);
int xhci_endpoint_init(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev,
struct usb_device *udev, struct usb_host_endpoint *ep,
gfp_t mem_flags);
void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring);
int xhci_ring_expansion(struct xhci_hcd *xhci, struct xhci_ring *ring,
unsigned int num_trbs, gfp_t flags);
void xhci_free_or_cache_endpoint_ring(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
unsigned int ep_index);
struct xhci_stream_info *xhci_alloc_stream_info(struct xhci_hcd *xhci,
unsigned int num_stream_ctxs,
unsigned int num_streams, gfp_t flags);
void xhci_free_stream_info(struct xhci_hcd *xhci,
struct xhci_stream_info *stream_info);
void xhci_setup_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_stream_info *stream_info);
void xhci_setup_no_streams_ep_input_ctx(struct xhci_hcd *xhci,
struct xhci_ep_ctx *ep_ctx,
struct xhci_virt_ep *ep);
void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev, bool drop_control_ep);
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
struct xhci_ring *xhci_stream_id_to_ring(
struct xhci_virt_device *dev,
unsigned int ep_index,
unsigned int stream_id);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_in_ctx, bool allocate_completion,
gfp_t mem_flags);
void xhci_urb_free_priv(struct xhci_hcd *xhci, struct urb_priv *urb_priv);
void xhci_free_command(struct xhci_hcd *xhci,
struct xhci_command *command);
#ifdef CONFIG_PCI
/* xHCI PCI glue */
int xhci_register_pci(void);
void xhci_unregister_pci(void);
#else
static inline int xhci_register_pci(void) { return 0; }
static inline void xhci_unregister_pci(void) {}
#endif
struct xhci_plat_data {
unsigned vendor;
unsigned revision;
};
#if defined(CONFIG_USB_XHCI_PLATFORM) \
|| defined(CONFIG_USB_XHCI_PLATFORM_MODULE)
int xhci_register_plat(void);
void xhci_unregister_plat(void);
#else
static inline int xhci_register_plat(void)
{ return 0; }
static inline void xhci_unregister_plat(void)
{ }
#endif
/* xHCI host controller glue */
typedef void (*xhci_get_quirks_t)(struct device *, struct xhci_hcd *);
int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
u32 mask, u32 done, int usec);
void xhci_quiesce(struct xhci_hcd *xhci);
int xhci_halt(struct xhci_hcd *xhci);
int xhci_reset(struct xhci_hcd *xhci);
int xhci_init(struct usb_hcd *hcd);
int xhci_run(struct usb_hcd *hcd);
void xhci_stop(struct usb_hcd *hcd);
void xhci_shutdown(struct usb_hcd *hcd);
int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
#ifdef CONFIG_PM
int xhci_suspend(struct xhci_hcd *xhci);
int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
#else
#define xhci_suspend NULL
#define xhci_resume NULL
#endif
int xhci_get_frame(struct usb_hcd *hcd);
irqreturn_t xhci_irq(struct usb_hcd *hcd);
irqreturn_t xhci_msi_irq(int irq, struct usb_hcd *hcd);
int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_alloc_tt_info(struct xhci_hcd *xhci,
struct xhci_virt_device *virt_dev,
struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
unsigned int num_streams, gfp_t mem_flags);
int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint **eps, unsigned int num_eps,
gfp_t mem_flags);
int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_update_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_set_usb2_hardware_lpm(struct usb_hcd *hcd,
struct usb_device *udev, int enable);
int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
struct usb_tt *tt, gfp_t mem_flags);
int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags);
int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status);
int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, struct usb_host_endpoint *ep);
void xhci_endpoint_reset(struct usb_hcd *hcd, struct usb_host_endpoint *ep);
int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
/* xHCI ring, segment, TRB, and TD functions */
dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg, union xhci_trb *trb);
struct xhci_segment *trb_in_td(struct xhci_segment *start_seg,
union xhci_trb *start_trb, union xhci_trb *end_trb,
dma_addr_t suspect_dma);
int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code);
void xhci_ring_cmd_db(struct xhci_hcd *xhci);
int xhci_queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id);
int xhci_queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_vendor_command(struct xhci_hcd *xhci,
u32 field1, u32 field2, u32 field3, u32 field4);
int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index, int suspend);
int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_intr_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb,
int slot_id, unsigned int ep_index);
int xhci_queue_isoc_tx_prepare(struct xhci_hcd *xhci, gfp_t mem_flags,
struct urb *urb, int slot_id, unsigned int ep_index);
int xhci_queue_configure_endpoint(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id, bool command_must_succeed);
int xhci_queue_evaluate_context(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr,
u32 slot_id);
int xhci_queue_reset_ep(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index);
int xhci_queue_reset_device(struct xhci_hcd *xhci, u32 slot_id);
void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id, struct xhci_td *cur_td,
struct xhci_dequeue_state *state);
void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
unsigned int stream_id,
struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
struct usb_device *udev, unsigned int ep_index);
void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci,
unsigned int slot_id, unsigned int ep_index,
struct xhci_dequeue_state *deq_state);
void xhci_stop_endpoint_command_watchdog(unsigned long arg);
int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command,
union xhci_trb *cmd_trb);
void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id);
union xhci_trb *xhci_find_next_enqueue(struct xhci_ring *ring);
/* xHCI roothub code */
void xhci_set_link_state(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 link_state);
void xhci_test_and_clear_bit(struct xhci_hcd *xhci, __le32 __iomem **port_array,
int port_id, u32 port_bit);
int xhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue, u16 wIndex,
char *buf, u16 wLength);
int xhci_hub_status_data(struct usb_hcd *hcd, char *buf);
#ifdef CONFIG_PM
int xhci_bus_suspend(struct usb_hcd *hcd);
int xhci_bus_resume(struct usb_hcd *hcd);
#else
#define xhci_bus_suspend NULL
#define xhci_bus_resume NULL
#endif /* CONFIG_PM */
u32 xhci_port_state_to_neutral(u32 state);
int xhci_find_slot_id_by_port(struct usb_hcd *hcd, struct xhci_hcd *xhci,
u16 port);
void xhci_ring_device(struct xhci_hcd *xhci, int slot_id);
/* xHCI contexts */
struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx);
struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, struct xhci_container_ctx *ctx, unsigned int ep_index);
#endif /* __LINUX_XHCI_HCD_H */
| cnexus/NexTKernel-d2spr | drivers/usb/host/xhci.h | C | gpl-2.0 | 65,720 |
<?php
/***********************************************************************************
************************************************************************************
*** ***
*** XTC Template Framework helper 1.3.1 ***
*** ***
*** Copyright (c) 2010, 2011, 2012, 2013, 2014 ***
*** Monev Software LLC, All Rights Reserved ***
*** ***
*** This program is free software; you can redistribute it and/or modify ***
*** it under the terms of the GNU General Public License as published by ***
*** the Free Software Foundation; either version 2 of the License, or ***
*** (at your option) any later version. ***
*** ***
*** This program is distributed in the hope that it will be useful, ***
*** but WITHOUT ANY WARRANTY; without even the implied warranty of ***
*** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ***
*** GNU General Public License for more details. ***
*** ***
*** You should have received a copy of the GNU General Public License ***
*** along with this program; if not, write to the Free Software ***
*** Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ***
*** ***
*** See COPYRIGHT.txt for more information. ***
*** See LICENSE.txt for more information. ***
*** ***
*** www.joomlaxtc.com ***
*** ***
************************************************************************************
***********************************************************************************/
defined('_JEXEC') or die;
jimport( 'joomla.application.component.view');
class xtcViewAbout extends JViewLegacy {
function display( $tpl = null ) {
parent::display($tpl);
}
} | crosslink/huai-joomla3 | administrator/components/com_jxtc/views/about/view.html.php | PHP | gpl-2.0 | 2,652 |
//Copyright Paul Reiche, Fred Ford. 1992-2002
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
/****************************************************************************
* FILE: random.h
* DESC: definitions and externs for random number generators
*
* HISTORY: Created 6/ 6/1989
* LAST CHANGED:
*
* Copyright (c) 1989, Robert Leyland and Scott Anderson
****************************************************************************/
/* ----------------------------DEFINES------------------------------------ */
#ifndef SLOW_N_STUPID
#define TABLE_SIZE 1117 /* a "nice" prime number */
#define _FAST_ fast_random()
#else /* FAST_N_UGLY */
#define TABLE_SIZE ( (1 << 10) - 1 )
#define _FAST_ ( random_table[ fast_index++ & TABLE_SIZE ] )
#endif
#define FASTRAND(n) ( (int) ( (unsigned int)_FAST_ % (n) ) )
#define SFASTRAND(n) ( (int)_FAST_ % (n) )
#define AND_FASTRAND(n) ( (int)_FAST_ & (n) )
#define RAND(n) ( (int) ( (unsigned int)Random() % (n) ) )
#define SRAND(n) ( (int)Random() % (n) )
#define AND_RAND(n) ( (int)Random() & (n) )
#define INDEXED_RANDOM(x) (random_table[x])
/* ----------------------------GLOBALS/EXTERNS---------------------------- */
extern DWORD random_table[TABLE_SIZE];
extern COUNT fast_index; /* fast random cycling index */
| videogamepreservation/starcontrol2 | src/sc2code/libs/math/random.h | C | gpl-2.0 | 1,951 |
/*
* Copyright (C) 2005-2015 Junjiro R. Okajima
*
* This program, aufs is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
/*
* handling file/dir, and address_space operation
*/
#ifdef CONFIG_AUFS_DEBUG
#include <linux/migrate.h>
#endif
#include <linux/pagemap.h>
#include "aufs.h"
/* drop flags for writing */
unsigned int au_file_roflags(unsigned int flags)
{
flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC);
flags |= O_RDONLY | O_NOATIME;
return flags;
}
/* common functions to regular file and dir */
struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags,
struct file *file, int force_wr)
{
struct file *h_file;
struct dentry *h_dentry;
struct inode *h_inode;
struct super_block *sb;
struct au_branch *br;
struct path h_path;
int err;
/* a race condition can happen between open and unlink/rmdir */
h_file = ERR_PTR(-ENOENT);
h_dentry = au_h_dptr(dentry, bindex);
if (au_test_nfsd() && (!h_dentry || d_is_negative(h_dentry)))
goto out;
h_inode = d_inode(h_dentry);
spin_lock(&h_dentry->d_lock);
err = (!d_unhashed(dentry) && d_unlinked(h_dentry))
/* || !d_inode(dentry)->i_nlink */
;
spin_unlock(&h_dentry->d_lock);
if (unlikely(err))
goto out;
sb = dentry->d_sb;
br = au_sbr(sb, bindex);
err = au_br_test_oflag(flags, br);
h_file = ERR_PTR(err);
if (unlikely(err))
goto out;
/* drop flags for writing */
if (au_test_ro(sb, bindex, d_inode(dentry))) {
if (force_wr && !(flags & O_WRONLY))
force_wr = 0;
flags = au_file_roflags(flags);
if (force_wr) {
h_file = ERR_PTR(-EROFS);
flags = au_file_roflags(flags);
if (unlikely(vfsub_native_ro(h_inode)
|| IS_APPEND(h_inode)))
goto out;
flags &= ~O_ACCMODE;
flags |= O_WRONLY;
}
}
flags &= ~O_CREAT;
atomic_inc(&br->br_count);
h_path.dentry = h_dentry;
h_path.mnt = au_br_mnt(br);
h_file = vfsub_dentry_open(&h_path, flags);
if (IS_ERR(h_file))
goto out_br;
if (flags & __FMODE_EXEC) {
err = deny_write_access(h_file);
if (unlikely(err)) {
fput(h_file);
h_file = ERR_PTR(err);
goto out_br;
}
}
fsnotify_open(h_file);
goto out; /* success */
out_br:
atomic_dec(&br->br_count);
out:
return h_file;
}
static int au_cmoo(struct dentry *dentry)
{
int err, cmoo;
unsigned int udba;
struct path h_path;
struct au_pin pin;
struct au_cp_generic cpg = {
.dentry = dentry,
.bdst = -1,
.bsrc = -1,
.len = -1,
.pin = &pin,
.flags = AuCpup_DTIME | AuCpup_HOPEN
};
struct inode *delegated;
struct super_block *sb;
struct au_sbinfo *sbinfo;
struct au_fhsm *fhsm;
pid_t pid;
struct au_branch *br;
struct dentry *parent;
struct au_hinode *hdir;
DiMustWriteLock(dentry);
IiMustWriteLock(d_inode(dentry));
err = 0;
if (IS_ROOT(dentry))
goto out;
cpg.bsrc = au_dbstart(dentry);
if (!cpg.bsrc)
goto out;
sb = dentry->d_sb;
sbinfo = au_sbi(sb);
fhsm = &sbinfo->si_fhsm;
pid = au_fhsm_pid(fhsm);
if (pid
&& (current->pid == pid
|| current->real_parent->pid == pid))
goto out;
br = au_sbr(sb, cpg.bsrc);
cmoo = au_br_cmoo(br->br_perm);
if (!cmoo)
goto out;
if (!d_is_reg(dentry))
cmoo &= AuBrAttr_COO_ALL;
if (!cmoo)
goto out;
parent = dget_parent(dentry);
di_write_lock_parent(parent);
err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1);
cpg.bdst = err;
if (unlikely(err < 0)) {
err = 0; /* there is no upper writable branch */
goto out_dgrade;
}
AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst);
/* do not respect the coo attrib for the target branch */
err = au_cpup_dirs(dentry, cpg.bdst);
if (unlikely(err))
goto out_dgrade;
di_downgrade_lock(parent, AuLock_IR);
udba = au_opt_udba(sb);
err = au_pin(&pin, dentry, cpg.bdst, udba,
AuPin_DI_LOCKED | AuPin_MNT_WRITE);
if (unlikely(err))
goto out_parent;
err = au_sio_cpup_simple(&cpg);
au_unpin(&pin);
if (unlikely(err))
goto out_parent;
if (!(cmoo & AuBrWAttr_MOO))
goto out_parent; /* success */
err = au_pin(&pin, dentry, cpg.bsrc, udba,
AuPin_DI_LOCKED | AuPin_MNT_WRITE);
if (unlikely(err))
goto out_parent;
h_path.mnt = au_br_mnt(br);
h_path.dentry = au_h_dptr(dentry, cpg.bsrc);
hdir = au_hi(d_inode(parent), cpg.bsrc);
delegated = NULL;
err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1);
au_unpin(&pin);
/* todo: keep h_dentry or not? */
if (unlikely(err == -EWOULDBLOCK)) {
pr_warn("cannot retry for NFSv4 delegation"
" for an internal unlink\n");
iput(delegated);
}
if (unlikely(err)) {
pr_err("unlink %pd after coo failed (%d), ignored\n",
dentry, err);
err = 0;
}
goto out_parent; /* success */
out_dgrade:
di_downgrade_lock(parent, AuLock_IR);
out_parent:
di_read_unlock(parent, AuLock_IR);
dput(parent);
out:
AuTraceErr(err);
return err;
}
int au_do_open(struct file *file, struct au_do_open_args *args)
{
int err, no_lock = args->no_lock;
struct dentry *dentry;
struct au_finfo *finfo;
if (!no_lock)
err = au_finfo_init(file, args->fidir);
else {
lockdep_off();
err = au_finfo_init(file, args->fidir);
lockdep_on();
}
if (unlikely(err))
goto out;
dentry = file->f_path.dentry;
AuDebugOn(IS_ERR_OR_NULL(dentry));
if (!no_lock) {
di_write_lock_child(dentry);
err = au_cmoo(dentry);
di_downgrade_lock(dentry, AuLock_IR);
if (!err)
err = args->open(file, vfsub_file_flags(file), NULL);
di_read_unlock(dentry, AuLock_IR);
} else {
err = au_cmoo(dentry);
if (!err)
err = args->open(file, vfsub_file_flags(file),
args->h_file);
if (!err && au_fbstart(file) != au_dbstart(dentry))
/*
* cmoo happens after h_file was opened.
* need to refresh file later.
*/
atomic_dec(&au_fi(file)->fi_generation);
}
finfo = au_fi(file);
if (!err) {
finfo->fi_file = file;
au_sphl_add(&finfo->fi_hlist,
&au_sbi(file->f_path.dentry->d_sb)->si_files);
}
if (!no_lock)
fi_write_unlock(file);
else {
lockdep_off();
fi_write_unlock(file);
lockdep_on();
}
if (unlikely(err)) {
finfo->fi_hdir = NULL;
au_finfo_fin(file);
}
out:
return err;
}
int au_reopen_nondir(struct file *file)
{
int err;
aufs_bindex_t bstart;
struct dentry *dentry;
struct file *h_file, *h_file_tmp;
dentry = file->f_path.dentry;
bstart = au_dbstart(dentry);
h_file_tmp = NULL;
if (au_fbstart(file) == bstart) {
h_file = au_hf_top(file);
if (file->f_mode == h_file->f_mode)
return 0; /* success */
h_file_tmp = h_file;
get_file(h_file_tmp);
au_set_h_fptr(file, bstart, NULL);
}
AuDebugOn(au_fi(file)->fi_hdir);
/*
* it can happen
* file exists on both of rw and ro
* open --> dbstart and fbstart are both 0
* prepend a branch as rw, "rw" become ro
* remove rw/file
* delete the top branch, "rw" becomes rw again
* --> dbstart is 1, fbstart is still 0
* write --> fbstart is 0 but dbstart is 1
*/
/* AuDebugOn(au_fbstart(file) < bstart); */
h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC,
file, /*force_wr*/0);
err = PTR_ERR(h_file);
if (IS_ERR(h_file)) {
if (h_file_tmp) {
atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count);
au_set_h_fptr(file, bstart, h_file_tmp);
h_file_tmp = NULL;
}
goto out; /* todo: close all? */
}
err = 0;
au_set_fbstart(file, bstart);
au_set_h_fptr(file, bstart, h_file);
au_update_figen(file);
/* todo: necessary? */
/* file->f_ra = h_file->f_ra; */
out:
if (h_file_tmp)
fput(h_file_tmp);
return err;
}
/* ---------------------------------------------------------------------- */
static int au_reopen_wh(struct file *file, aufs_bindex_t btgt,
struct dentry *hi_wh)
{
int err;
aufs_bindex_t bstart;
struct au_dinfo *dinfo;
struct dentry *h_dentry;
struct au_hdentry *hdp;
dinfo = au_di(file->f_path.dentry);
AuRwMustWriteLock(&dinfo->di_rwsem);
bstart = dinfo->di_bstart;
dinfo->di_bstart = btgt;
hdp = dinfo->di_hdentry;
h_dentry = hdp[0 + btgt].hd_dentry;
hdp[0 + btgt].hd_dentry = hi_wh;
err = au_reopen_nondir(file);
hdp[0 + btgt].hd_dentry = h_dentry;
dinfo->di_bstart = bstart;
return err;
}
static int au_ready_to_write_wh(struct file *file, loff_t len,
aufs_bindex_t bcpup, struct au_pin *pin)
{
int err;
struct inode *inode, *h_inode;
struct dentry *h_dentry, *hi_wh;
struct au_cp_generic cpg = {
.dentry = file->f_path.dentry,
.bdst = bcpup,
.bsrc = -1,
.len = len,
.pin = pin
};
au_update_dbstart(cpg.dentry);
inode = d_inode(cpg.dentry);
h_inode = NULL;
if (au_dbstart(cpg.dentry) <= bcpup
&& au_dbend(cpg.dentry) >= bcpup) {
h_dentry = au_h_dptr(cpg.dentry, bcpup);
if (h_dentry && d_is_positive(h_dentry))
h_inode = d_inode(h_dentry);
}
hi_wh = au_hi_wh(inode, bcpup);
if (!hi_wh && !h_inode)
err = au_sio_cpup_wh(&cpg, file);
else
/* already copied-up after unlink */
err = au_reopen_wh(file, bcpup, hi_wh);
if (!err
&& (inode->i_nlink > 1
|| (inode->i_state & I_LINKABLE))
&& au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK))
au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup));
return err;
}
/*
* prepare the @file for writing.
*/
int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin)
{
int err;
aufs_bindex_t dbstart;
struct dentry *parent;
struct inode *inode;
struct super_block *sb;
struct file *h_file;
struct au_cp_generic cpg = {
.dentry = file->f_path.dentry,
.bdst = -1,
.bsrc = -1,
.len = len,
.pin = pin,
.flags = AuCpup_DTIME
};
sb = cpg.dentry->d_sb;
inode = d_inode(cpg.dentry);
cpg.bsrc = au_fbstart(file);
err = au_test_ro(sb, cpg.bsrc, inode);
if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) {
err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE,
/*flags*/0);
goto out;
}
/* need to cpup or reopen */
parent = dget_parent(cpg.dentry);
di_write_lock_parent(parent);
err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
cpg.bdst = err;
if (unlikely(err < 0))
goto out_dgrade;
err = 0;
if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) {
err = au_cpup_dirs(cpg.dentry, cpg.bdst);
if (unlikely(err))
goto out_dgrade;
}
err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
AuPin_DI_LOCKED | AuPin_MNT_WRITE);
if (unlikely(err))
goto out_dgrade;
dbstart = au_dbstart(cpg.dentry);
if (dbstart <= cpg.bdst)
cpg.bsrc = cpg.bdst;
if (dbstart <= cpg.bdst /* just reopen */
|| !d_unhashed(cpg.dentry) /* copyup and reopen */
) {
h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0);
if (IS_ERR(h_file))
err = PTR_ERR(h_file);
else {
di_downgrade_lock(parent, AuLock_IR);
if (dbstart > cpg.bdst)
err = au_sio_cpup_simple(&cpg);
if (!err)
err = au_reopen_nondir(file);
au_h_open_post(cpg.dentry, cpg.bsrc, h_file);
}
} else { /* copyup as wh and reopen */
/*
* since writable hfsplus branch is not supported,
* h_open_pre/post() are unnecessary.
*/
err = au_ready_to_write_wh(file, len, cpg.bdst, pin);
di_downgrade_lock(parent, AuLock_IR);
}
if (!err) {
au_pin_set_parent_lflag(pin, /*lflag*/0);
goto out_dput; /* success */
}
au_unpin(pin);
goto out_unlock;
out_dgrade:
di_downgrade_lock(parent, AuLock_IR);
out_unlock:
di_read_unlock(parent, AuLock_IR);
out_dput:
dput(parent);
out:
return err;
}
/* ---------------------------------------------------------------------- */
int au_do_flush(struct file *file, fl_owner_t id,
int (*flush)(struct file *file, fl_owner_t id))
{
int err;
struct super_block *sb;
struct inode *inode;
inode = file_inode(file);
sb = inode->i_sb;
si_noflush_read_lock(sb);
fi_read_lock(file);
ii_read_lock_child(inode);
err = flush(file, id);
au_cpup_attr_timesizes(inode);
ii_read_unlock(inode);
fi_read_unlock(file);
si_read_unlock(sb);
return err;
}
/* ---------------------------------------------------------------------- */
static int au_file_refresh_by_inode(struct file *file, int *need_reopen)
{
int err;
struct au_pin pin;
struct au_finfo *finfo;
struct dentry *parent, *hi_wh;
struct inode *inode;
struct super_block *sb;
struct au_cp_generic cpg = {
.dentry = file->f_path.dentry,
.bdst = -1,
.bsrc = -1,
.len = -1,
.pin = &pin,
.flags = AuCpup_DTIME
};
FiMustWriteLock(file);
err = 0;
finfo = au_fi(file);
sb = cpg.dentry->d_sb;
inode = d_inode(cpg.dentry);
cpg.bdst = au_ibstart(inode);
if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry))
goto out;
parent = dget_parent(cpg.dentry);
if (au_test_ro(sb, cpg.bdst, inode)) {
di_read_lock_parent(parent, !AuLock_IR);
err = AuWbrCopyup(au_sbi(sb), cpg.dentry);
cpg.bdst = err;
di_read_unlock(parent, !AuLock_IR);
if (unlikely(err < 0))
goto out_parent;
err = 0;
}
di_read_lock_parent(parent, AuLock_IR);
hi_wh = au_hi_wh(inode, cpg.bdst);
if (!S_ISDIR(inode->i_mode)
&& au_opt_test(au_mntflags(sb), PLINK)
&& au_plink_test(inode)
&& !d_unhashed(cpg.dentry)
&& cpg.bdst < au_dbstart(cpg.dentry)) {
err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst);
if (unlikely(err))
goto out_unlock;
/* always superio. */
err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE,
AuPin_DI_LOCKED | AuPin_MNT_WRITE);
if (!err) {
err = au_sio_cpup_simple(&cpg);
au_unpin(&pin);
}
} else if (hi_wh) {
/* already copied-up after unlink */
err = au_reopen_wh(file, cpg.bdst, hi_wh);
*need_reopen = 0;
}
out_unlock:
di_read_unlock(parent, AuLock_IR);
out_parent:
dput(parent);
out:
return err;
}
static void au_do_refresh_dir(struct file *file)
{
aufs_bindex_t bindex, bend, new_bindex, brid;
struct au_hfile *p, tmp, *q;
struct au_finfo *finfo;
struct super_block *sb;
struct au_fidir *fidir;
FiMustWriteLock(file);
sb = file->f_path.dentry->d_sb;
finfo = au_fi(file);
fidir = finfo->fi_hdir;
AuDebugOn(!fidir);
p = fidir->fd_hfile + finfo->fi_btop;
brid = p->hf_br->br_id;
bend = fidir->fd_bbot;
for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) {
if (!p->hf_file)
continue;
new_bindex = au_br_index(sb, p->hf_br->br_id);
if (new_bindex == bindex)
continue;
if (new_bindex < 0) {
au_set_h_fptr(file, bindex, NULL);
continue;
}
/* swap two lower inode, and loop again */
q = fidir->fd_hfile + new_bindex;
tmp = *q;
*q = *p;
*p = tmp;
if (tmp.hf_file) {
bindex--;
p--;
}
}
p = fidir->fd_hfile;
if (!au_test_mmapped(file) && !d_unlinked(file->f_path.dentry)) {
bend = au_sbend(sb);
for (finfo->fi_btop = 0; finfo->fi_btop <= bend;
finfo->fi_btop++, p++)
if (p->hf_file) {
if (file_inode(p->hf_file))
break;
au_hfput(p, file);
}
} else {
bend = au_br_index(sb, brid);
for (finfo->fi_btop = 0; finfo->fi_btop < bend;
finfo->fi_btop++, p++)
if (p->hf_file)
au_hfput(p, file);
bend = au_sbend(sb);
}
p = fidir->fd_hfile + bend;
for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop;
fidir->fd_bbot--, p--)
if (p->hf_file) {
if (file_inode(p->hf_file))
break;
au_hfput(p, file);
}
AuDebugOn(fidir->fd_bbot < finfo->fi_btop);
}
/*
* after branch manipulating, refresh the file.
*/
static int refresh_file(struct file *file, int (*reopen)(struct file *file))
{
int err, need_reopen;
aufs_bindex_t bend, bindex;
struct dentry *dentry;
struct au_finfo *finfo;
struct au_hfile *hfile;
dentry = file->f_path.dentry;
finfo = au_fi(file);
if (!finfo->fi_hdir) {
hfile = &finfo->fi_htop;
AuDebugOn(!hfile->hf_file);
bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id);
AuDebugOn(bindex < 0);
if (bindex != finfo->fi_btop)
au_set_fbstart(file, bindex);
} else {
err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1);
if (unlikely(err))
goto out;
au_do_refresh_dir(file);
}
err = 0;
need_reopen = 1;
if (!au_test_mmapped(file))
err = au_file_refresh_by_inode(file, &need_reopen);
if (!err && need_reopen && !d_unlinked(dentry))
err = reopen(file);
if (!err) {
au_update_figen(file);
goto out; /* success */
}
/* error, close all lower files */
if (finfo->fi_hdir) {
bend = au_fbend_dir(file);
for (bindex = au_fbstart(file); bindex <= bend; bindex++)
au_set_h_fptr(file, bindex, NULL);
}
out:
return err;
}
/* common function to regular file and dir */
int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file),
int wlock)
{
int err;
unsigned int sigen, figen;
aufs_bindex_t bstart;
unsigned char pseudo_link;
struct dentry *dentry;
struct inode *inode;
err = 0;
dentry = file->f_path.dentry;
inode = d_inode(dentry);
sigen = au_sigen(dentry->d_sb);
fi_write_lock(file);
figen = au_figen(file);
di_write_lock_child(dentry);
bstart = au_dbstart(dentry);
pseudo_link = (bstart != au_ibstart(inode));
if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) {
if (!wlock) {
di_downgrade_lock(dentry, AuLock_IR);
fi_downgrade_lock(file);
}
goto out; /* success */
}
AuDbg("sigen %d, figen %d\n", sigen, figen);
if (au_digen_test(dentry, sigen)) {
err = au_reval_dpath(dentry, sigen);
AuDebugOn(!err && au_digen_test(dentry, sigen));
}
if (!err)
err = refresh_file(file, reopen);
if (!err) {
if (!wlock) {
di_downgrade_lock(dentry, AuLock_IR);
fi_downgrade_lock(file);
}
} else {
di_write_unlock(dentry);
fi_write_unlock(file);
}
out:
return err;
}
/* ---------------------------------------------------------------------- */
/* cf. aufs_nopage() */
/* for madvise(2) */
static int aufs_readpage(struct file *file __maybe_unused, struct page *page)
{
unlock_page(page);
return 0;
}
/* it will never be called, but necessary to support O_DIRECT */
static ssize_t aufs_direct_IO(struct kiocb *iocb, struct iov_iter *iter,
loff_t offset)
{ BUG(); return 0; }
/* they will never be called. */
#ifdef CONFIG_AUFS_DEBUG
static int aufs_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{ AuUnsupport(); return 0; }
static int aufs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{ AuUnsupport(); return 0; }
static int aufs_writepage(struct page *page, struct writeback_control *wbc)
{ AuUnsupport(); return 0; }
static int aufs_set_page_dirty(struct page *page)
{ AuUnsupport(); return 0; }
static void aufs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{ AuUnsupport(); }
static int aufs_releasepage(struct page *page, gfp_t gfp)
{ AuUnsupport(); return 0; }
static int aufs_migratepage(struct address_space *mapping, struct page *newpage,
struct page *page, enum migrate_mode mode)
{ AuUnsupport(); return 0; }
static int aufs_launder_page(struct page *page)
{ AuUnsupport(); return 0; }
static int aufs_is_partially_uptodate(struct page *page,
unsigned long from,
unsigned long count)
{ AuUnsupport(); return 0; }
static void aufs_is_dirty_writeback(struct page *page, bool *dirty,
bool *writeback)
{ AuUnsupport(); }
static int aufs_error_remove_page(struct address_space *mapping,
struct page *page)
{ AuUnsupport(); return 0; }
static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file,
sector_t *span)
{ AuUnsupport(); return 0; }
static void aufs_swap_deactivate(struct file *file)
{ AuUnsupport(); }
#endif /* CONFIG_AUFS_DEBUG */
const struct address_space_operations aufs_aop = {
.readpage = aufs_readpage,
.direct_IO = aufs_direct_IO,
#ifdef CONFIG_AUFS_DEBUG
.writepage = aufs_writepage,
/* no writepages, because of writepage */
.set_page_dirty = aufs_set_page_dirty,
/* no readpages, because of readpage */
.write_begin = aufs_write_begin,
.write_end = aufs_write_end,
/* no bmap, no block device */
.invalidatepage = aufs_invalidatepage,
.releasepage = aufs_releasepage,
.migratepage = aufs_migratepage,
.launder_page = aufs_launder_page,
.is_partially_uptodate = aufs_is_partially_uptodate,
.is_dirty_writeback = aufs_is_dirty_writeback,
.error_remove_page = aufs_error_remove_page,
.swap_activate = aufs_swap_activate,
.swap_deactivate = aufs_swap_deactivate
#endif /* CONFIG_AUFS_DEBUG */
};
| loxdegio/linux-patched | fs/aufs/file.c | C | gpl-2.0 | 20,768 |
const
gulp = require('gulp'),
rename = require('gulp-rename'),
sketch = require('gulp-sketch'),
iconfont = require('gulp-iconfont'),
imagemin = require('gulp-imagemin'),
consolidate = require('gulp-consolidate')
/**
* Font settings
*/
const
// set name of your symbol font
fontName = 'floating-top-link',
// set class name in your CSS
className = 'FloatingTopLink__icon',
// you can also choose 'foundation-style'
template = 'fontawesome-style',
// you can also choose 'symbol-font-16px.sketch'
skethcFileName = 'floating-top-link-font-14px.sketch'
/**
* Recommended to get consistent builds when watching files
* See https://github.com/nfroidure/gulp-iconfont
*/
const timestamp = Math.round(Date.now() / 1000)
gulp.task('symbols', () =>
gulp.src(skethcFileName)
.pipe(sketch({
export: 'artboards',
formats: 'svg'
}))
.pipe(imagemin())
.pipe(iconfont({
fontName,
formats: ['ttf', 'eot', 'woff', 'woff2', 'svg'],
timestamp,
log: () => {} // suppress unnecessary logging
}))
.on('glyphs', (glyphs) => {
const options = {
className,
fontName,
fontPath: '../fonts/', // set path to font (from your CSS file if relative)
glyphs: glyphs.map(mapGlyphs)
}
gulp.src(`templates/${ template }.css`)
.pipe(consolidate('lodash', options))
.pipe(rename({ basename: fontName }))
.pipe(gulp.dest('./css/')) // set path to export your CSS
// if you don't need sample.html, remove next 4 lines
gulp.src(`templates/${ template }.html`)
.pipe(consolidate('lodash', options))
.pipe(rename({ basename: 'sample' }))
.pipe(gulp.dest('./')) // set path to export your sample HTML
})
.pipe(gulp.dest('../fonts/')) // set path to export your fonts
)
gulp.task('watch', () => gulp.watch('*.sketch', ['symbols']))
/**
* This is needed for mapping glyphs and codepoints.
*/
function mapGlyphs(glyph) {
return { name: glyph.name, codepoint: glyph.unicode[0].charCodeAt(0) }
}
| littlebirdjp/floating-top-link | symbols-for-sketch/gulpfile.js | JavaScript | gpl-2.0 | 2,069 |
/*
Copyright (c) 2004-2011, The Dojo Foundation All Rights Reserved.
Available via Academic Free License >= 2.1 OR the modified BSD license.
see: http://dojotoolkit.org/license for details
*/
if(!dojo._hasResource["dijit.form._FormWidget"]){ //_hasResource checks added by build. Do not use _hasResource directly in your code.
dojo._hasResource["dijit.form._FormWidget"] = true;
dojo.provide("dijit.form._FormWidget");
dojo.require("dojo.window");
dojo.require("dijit._Widget");
dojo.require("dijit._Templated");
dojo.require("dijit._CssStateMixin");
dojo.declare("dijit.form._FormWidget", [dijit._Widget, dijit._Templated, dijit._CssStateMixin],
{
// summary:
// Base class for widgets corresponding to native HTML elements such as <checkbox> or <button>,
// which can be children of a <form> node or a `dijit.form.Form` widget.
//
// description:
// Represents a single HTML element.
// All these widgets should have these attributes just like native HTML input elements.
// You can set them during widget construction or afterwards, via `dijit._Widget.attr`.
//
// They also share some common methods.
// name: [const] String
// Name used when submitting form; same as "name" attribute or plain HTML elements
name: "",
// alt: String
// Corresponds to the native HTML <input> element's attribute.
alt: "",
// value: String
// Corresponds to the native HTML <input> element's attribute.
value: "",
// type: String
// Corresponds to the native HTML <input> element's attribute.
type: "text",
// tabIndex: Integer
// Order fields are traversed when user hits the tab key
tabIndex: "0",
// disabled: Boolean
// Should this widget respond to user input?
// In markup, this is specified as "disabled='disabled'", or just "disabled".
disabled: false,
// intermediateChanges: Boolean
// Fires onChange for each value change or only on demand
intermediateChanges: false,
// scrollOnFocus: Boolean
// On focus, should this widget scroll into view?
scrollOnFocus: true,
// These mixins assume that the focus node is an INPUT, as many but not all _FormWidgets are.
attributeMap: dojo.delegate(dijit._Widget.prototype.attributeMap, {
value: "focusNode",
id: "focusNode",
tabIndex: "focusNode",
alt: "focusNode",
title: "focusNode"
}),
postMixInProperties: function(){
// Setup name=foo string to be referenced from the template (but only if a name has been specified)
// Unfortunately we can't use attributeMap to set the name due to IE limitations, see #8660
// Regarding escaping, see heading "Attribute values" in
// http://www.w3.org/TR/REC-html40/appendix/notes.html#h-B.3.2
this.nameAttrSetting = this.name ? ('name="' + this.name.replace(/'/g, """) + '"') : '';
this.inherited(arguments);
},
postCreate: function(){
this.inherited(arguments);
this.connect(this.domNode, "onmousedown", "_onMouseDown");
},
_setDisabledAttr: function(/*Boolean*/ value){
this._set("disabled", value);
dojo.attr(this.focusNode, 'disabled', value);
if(this.valueNode){
dojo.attr(this.valueNode, 'disabled', value);
}
dijit.setWaiState(this.focusNode, "disabled", value);
if(value){
// reset these, because after the domNode is disabled, we can no longer receive
// mouse related events, see #4200
this._set("hovering", false);
this._set("active", false);
// clear tab stop(s) on this widget's focusable node(s) (ComboBox has two focusable nodes)
var attachPointNames = "tabIndex" in this.attributeMap ? this.attributeMap.tabIndex : "focusNode";
dojo.forEach(dojo.isArray(attachPointNames) ? attachPointNames : [attachPointNames], function(attachPointName){
var node = this[attachPointName];
// complex code because tabIndex=-1 on a <div> doesn't work on FF
if(dojo.isWebKit || dijit.hasDefaultTabStop(node)){ // see #11064 about webkit bug
node.setAttribute('tabIndex', "-1");
}else{
node.removeAttribute('tabIndex');
}
}, this);
}else{
if(this.tabIndex != ""){
this.focusNode.setAttribute('tabIndex', this.tabIndex);
}
}
},
setDisabled: function(/*Boolean*/ disabled){
// summary:
// Deprecated. Use set('disabled', ...) instead.
dojo.deprecated("setDisabled("+disabled+") is deprecated. Use set('disabled',"+disabled+") instead.", "", "2.0");
this.set('disabled', disabled);
},
_onFocus: function(e){
if(this.scrollOnFocus){
dojo.window.scrollIntoView(this.domNode);
}
this.inherited(arguments);
},
isFocusable: function(){
// summary:
// Tells if this widget is focusable or not. Used internally by dijit.
// tags:
// protected
return !this.disabled && this.focusNode && (dojo.style(this.domNode, "display") != "none");
},
focus: function(){
// summary:
// Put focus on this widget
if(!this.disabled){
dijit.focus(this.focusNode);
}
},
compare: function(/*anything*/ val1, /*anything*/ val2){
// summary:
// Compare 2 values (as returned by get('value') for this widget).
// tags:
// protected
if(typeof val1 == "number" && typeof val2 == "number"){
return (isNaN(val1) && isNaN(val2)) ? 0 : val1 - val2;
}else if(val1 > val2){
return 1;
}else if(val1 < val2){
return -1;
}else{
return 0;
}
},
onChange: function(newValue){
// summary:
// Callback when this widget's value is changed.
// tags:
// callback
},
// _onChangeActive: [private] Boolean
// Indicates that changes to the value should call onChange() callback.
// This is false during widget initialization, to avoid calling onChange()
// when the initial value is set.
_onChangeActive: false,
_handleOnChange: function(/*anything*/ newValue, /*Boolean?*/ priorityChange){
// summary:
// Called when the value of the widget is set. Calls onChange() if appropriate
// newValue:
// the new value
// priorityChange:
// For a slider, for example, dragging the slider is priorityChange==false,
// but on mouse up, it's priorityChange==true. If intermediateChanges==false,
// onChange is only called form priorityChange=true events.
// tags:
// private
if(this._lastValueReported == undefined && (priorityChange === null || !this._onChangeActive)){
// this block executes not for a change, but during initialization,
// and is used to store away the original value (or for ToggleButton, the original checked state)
this._resetValue = this._lastValueReported = newValue;
}
this._pendingOnChange = this._pendingOnChange
|| (typeof newValue != typeof this._lastValueReported)
|| (this.compare(newValue, this._lastValueReported) != 0);
if((this.intermediateChanges || priorityChange || priorityChange === undefined) && this._pendingOnChange){
this._lastValueReported = newValue;
this._pendingOnChange = false;
if(this._onChangeActive){
if(this._onChangeHandle){
clearTimeout(this._onChangeHandle);
}
// setTimout allows hidden value processing to run and
// also the onChange handler can safely adjust focus, etc
this._onChangeHandle = setTimeout(dojo.hitch(this,
function(){
this._onChangeHandle = null;
this.onChange(newValue);
}), 0); // try to collapse multiple onChange's fired faster than can be processed
}
}
},
create: function(){
// Overrides _Widget.create()
this.inherited(arguments);
this._onChangeActive = true;
},
destroy: function(){
if(this._onChangeHandle){ // destroy called before last onChange has fired
clearTimeout(this._onChangeHandle);
this.onChange(this._lastValueReported);
}
this.inherited(arguments);
},
setValue: function(/*String*/ value){
// summary:
// Deprecated. Use set('value', ...) instead.
dojo.deprecated("dijit.form._FormWidget:setValue("+value+") is deprecated. Use set('value',"+value+") instead.", "", "2.0");
this.set('value', value);
},
getValue: function(){
// summary:
// Deprecated. Use get('value') instead.
dojo.deprecated(this.declaredClass+"::getValue() is deprecated. Use get('value') instead.", "", "2.0");
return this.get('value');
},
_onMouseDown: function(e){
// If user clicks on the button, even if the mouse is released outside of it,
// this button should get focus (to mimics native browser buttons).
// This is also needed on chrome because otherwise buttons won't get focus at all,
// which leads to bizarre focus restore on Dialog close etc.
if(!e.ctrlKey && dojo.mouseButtons.isLeft(e) && this.isFocusable()){ // !e.ctrlKey to ignore right-click on mac
// Set a global event to handle mouseup, so it fires properly
// even if the cursor leaves this.domNode before the mouse up event.
var mouseUpConnector = this.connect(dojo.body(), "onmouseup", function(){
if (this.isFocusable()) {
this.focus();
}
this.disconnect(mouseUpConnector);
});
}
}
});
dojo.declare("dijit.form._FormValueWidget", dijit.form._FormWidget,
{
// summary:
// Base class for widgets corresponding to native HTML elements such as <input> or <select> that have user changeable values.
// description:
// Each _FormValueWidget represents a single input value, and has a (possibly hidden) <input> element,
// to which it serializes it's input value, so that form submission (either normal submission or via FormBind?)
// works as expected.
// Don't attempt to mixin the 'type', 'name' attributes here programatically -- they must be declared
// directly in the template as read by the parser in order to function. IE is known to specifically
// require the 'name' attribute at element creation time. See #8484, #8660.
// TODO: unclear what that {value: ""} is for; FormWidget.attributeMap copies value to focusNode,
// so maybe {value: ""} is so the value *doesn't* get copied to focusNode?
// Seems like we really want value removed from attributeMap altogether
// (although there's no easy way to do that now)
// readOnly: Boolean
// Should this widget respond to user input?
// In markup, this is specified as "readOnly".
// Similar to disabled except readOnly form values are submitted.
readOnly: false,
attributeMap: dojo.delegate(dijit.form._FormWidget.prototype.attributeMap, {
value: "",
readOnly: "focusNode"
}),
_setReadOnlyAttr: function(/*Boolean*/ value){
dojo.attr(this.focusNode, 'readOnly', value);
dijit.setWaiState(this.focusNode, "readonly", value);
this._set("readOnly", value);
},
postCreate: function(){
this.inherited(arguments);
if(dojo.isIE < 9 || (dojo.isIE && dojo.isQuirks)){ // IE won't stop the event with keypress
this.connect(this.focusNode || this.domNode, "onkeydown", this._onKeyDown);
}
// Update our reset value if it hasn't yet been set (because this.set()
// is only called when there *is* a value)
if(this._resetValue === undefined){
this._lastValueReported = this._resetValue = this.value;
}
},
_setValueAttr: function(/*anything*/ newValue, /*Boolean?*/ priorityChange){
// summary:
// Hook so set('value', value) works.
// description:
// Sets the value of the widget.
// If the value has changed, then fire onChange event, unless priorityChange
// is specified as null (or false?)
this._handleOnChange(newValue, priorityChange);
},
_handleOnChange: function(/*anything*/ newValue, /*Boolean?*/ priorityChange){
// summary:
// Called when the value of the widget has changed. Saves the new value in this.value,
// and calls onChange() if appropriate. See _FormWidget._handleOnChange() for details.
this._set("value", newValue);
this.inherited(arguments);
},
undo: function(){
// summary:
// Restore the value to the last value passed to onChange
this._setValueAttr(this._lastValueReported, false);
},
reset: function(){
// summary:
// Reset the widget's value to what it was at initialization time
this._hasBeenBlurred = false;
this._setValueAttr(this._resetValue, true);
},
_onKeyDown: function(e){
if(e.keyCode == dojo.keys.ESCAPE && !(e.ctrlKey || e.altKey || e.metaKey)){
var te;
if(dojo.isIE){
e.preventDefault(); // default behavior needs to be stopped here since keypress is too late
te = document.createEventObject();
te.keyCode = dojo.keys.ESCAPE;
te.shiftKey = e.shiftKey;
e.srcElement.fireEvent('onkeypress', te);
}
}
},
_layoutHackIE7: function(){
// summary:
// Work around table sizing bugs on IE7 by forcing redraw
if(dojo.isIE == 7){ // fix IE7 layout bug when the widget is scrolled out of sight
var domNode = this.domNode;
var parent = domNode.parentNode;
var pingNode = domNode.firstChild || domNode; // target node most unlikely to have a custom filter
var origFilter = pingNode.style.filter; // save custom filter, most likely nothing
var _this = this;
while(parent && parent.clientHeight == 0){ // search for parents that haven't rendered yet
(function ping(){
var disconnectHandle = _this.connect(parent, "onscroll",
function(e){
_this.disconnect(disconnectHandle); // only call once
pingNode.style.filter = (new Date()).getMilliseconds(); // set to anything that's unique
setTimeout(function(){ pingNode.style.filter = origFilter }, 0); // restore custom filter, if any
}
);
})();
parent = parent.parentNode;
}
}
}
});
}
| johnkeeping/tt-rss | lib/dijit/form/_FormWidget.js | JavaScript | gpl-2.0 | 13,325 |
<?php
defined( '_JEXEC' ) or die( 'Restricted access' );
class ThontosViewThongke extends JViewLegacy {
function display($tpl = null) {
$task = JRequest::getVar('task');
switch($task){
// phụ lục 1
case 'getPhuluc1':
$this->setLayout('phuluc1');
$this->getPhuluc1();
break;
case 'default_phuluc1':
$this->setLayout('default_phuluc1');
$this->_default_phuluc1();
break;
// phụ lục 3
case 'getPhuluc3':
$this->setLayout('phuluc3');
$this->getPhuluc3();
break;
case '_default_phuluc3':
$this->setLayout('default_phuluc3');
$this->_default_phuluc3();
break;
// Phụ lục 4
case '_default_phuluc4':
$this->setLayout('default_phuluc4');
$this->_default_phuluc4();
break;
case 'getPhuluc4':
$this->setLayout('phuluc4');
$this->getPhuluc4();
break;
// Phụ lục 5
case '_default_phuluc5':
$this->setLayout('default_phuluc5');
$this->_default_phuluc5();
break;
case 'getPhuluc5':
$this->setLayout('phuluc5');
$this->getPhuluc5();
break;
// Phụ lục 6
case '_default_phuluc6':
$this->setLayout('default_phuluc6');
$this->_default_phuluc6();
break;
case 'getPhuluc6':
$this->setLayout('phuluc6');
$this->getPhuluc6();
break;
}
parent::display($tpl);
}
// phụ lục 1
private function getPhuluc1(){
$ids_donvi = JRequest::getVar('donvi_id');
$model_hoso = Core::model('Thonto/Thongke');
$ds_donvi = $model_hoso->getDonviBaocao($ids_donvi, '2,3');
$this->assignRef('ds_donvi', $ds_donvi);
}
public function _default_phuluc1(){
$document = JFactory::getDocument();
$model = Core::model('Thonto/Hoso');
$idUser = JFactory::getUser()->id;
$idRoot = '1';
if($idRoot == null){
$this->setLayout('hoso_404');
}else{
$id_donvi = $model->getInfoOfRootTree($idRoot);
$this->assignRef('id_donvi', $id_donvi);
$this->assignRef('idUser', $idUser);
}
}
// phụ lục 3 min qh
function getPhuluc3(){
$data = $_GET['id'];
$model = Core::model('Thonto/Thongke');
$this->assignRef('row', $model->getmau3($data));
}
public function _default_phuluc3(){
$document = JFactory::getDocument();
$model = Core::model('Thonto/Thongke');
$idUser = JFactory::getUser()->id;
$idRoot = 1;
if($idRoot == null){
$this->setLayout('hoso_404');
}else{
$root['root_id'] = $idRoot;
$tmp= $model->getThongtin(array('ten, kieu'),'thonto_tochuc',null,array('id='.$root['root_id']),null);
$root['root_name'] = $tmp[0]->ten;
$root['root_showlist'] = $tmp[0]->kieu;
}
$this->assignRef('root_info', $root);
}
// phụ lục 4 min px
function _default_phuluc4(){
$model = Core::model('Thonto/Thongke');
$idUser = JFactory::getUser()->id;
$idRoot = 1;
if($idRoot == null){
$this->setLayout('hoso_404');
}else{
$root['root_id'] = $idRoot;
$tmp= $model->getThongtin(array('ten, kieu'),'thonto_tochuc',null,array('id='.$root['root_id']),null);
$root['root_name'] = $tmp[0]->ten;
$root['root_showlist'] = $tmp[0]->kieu;
}
$this->assignRef('root_info', $root);
}
function getPhuluc4(){
$ids_donvi = $_GET['id'];
$model = Core::model('Thonto/Thongke');
$ds_donvi = $model->getDonviBaocao($ids_donvi,'2,3');
$this->assignRef('ds_donvi', $ds_donvi);
}
// phụ lục 5 // min thôn tổ
public function _default_phuluc5(){
$document = JFactory::getDocument();
$model = Core::model('Thonto/Thongke');
$idUser = JFactory::getUser()->id;
$idRoot = 1;
if($idRoot == null){
$this->setLayout('hoso_404');
}else{
$root['root_id'] = $idRoot;
$tmp= $model->getThongtin(array('ten, kieu'),'thonto_tochuc',null,array('id='.$root['root_id']),null);
$root['root_name'] = $tmp[0]->ten;
$root['root_showlist'] = $tmp[0]->kieu;
}
$this->assignRef('root_info', $root);
}
function getPhuluc5(){
$ids_donvi = $_GET['id'];
$model = Core::model('Thonto/Thongke');
$ds_donvi = $model->getDonviBaocao($ids_donvi,'2,3,4,5');
$row_kiennghi = $model->getThongtin('id, ten', 'thonto_danhmuckiennghi', null, 'trangthai=1','sapxep asc');
$this->assignRef('row_kiennghi', $row_kiennghi);
$this->assignRef('ds_donvi', $ds_donvi);
}
// phụ lục 6 // min px
function _default_phuluc6(){
$model = Core::model('Thonto/Thongke');
$idUser = JFactory::getUser()->id;
$idRoot = 1;
if($idRoot == null){
$this->setLayout('hoso_404');
}else{
$root['root_id'] = $idRoot;
$tmp= $model->getThongtin(array('ten, kieu'),'thonto_tochuc',null,array('id='.$root['root_id']),null);
$root['root_name'] = $tmp[0]->ten;
$root['root_showlist'] = $tmp[0]->kieu;
}
$this->assignRef('root_info', $root);
}
function getPhuluc6(){
$ids_donvi = JRequest::getVar('id');
$model = Core::model('Thonto/Thongke');
$ds_donvi = $model->getDonviBaocao($ids_donvi,'2,3');
$row_noidunghop = $model->getThongtin('id, ten', 'thonto_danhmucnoidunghop', null, 'trangthai=1','sapxep asc');
$this->assignRef('row_noidunghop', $row_noidunghop);
$this->assignRef('ds_donvi', $ds_donvi);
}
}
?> | phucdnict/cbcc_05062015 | components/com_thonto/views/thongke/view.raw.php | PHP | gpl-2.0 | 5,161 |
<?php
/**
* @version $Id: default.php 2721 2010-10-27 00:58:51Z johanjanssens $
* @category Nooku
* @package Nooku_Components
* @subpackage Default
* @copyright Copyright (C) 2007 - 2010 Johan Janssens. All rights reserved.
* @license GNU GPLv3 <http://www.gnu.org/licenses/gpl.html>
* @link http://www.nooku.org
*/
/**
* Default Html View
.*
* @author Johan Janssens <johan@nooku.org>
* @category Nooku
* @package Nooku_Components
* @subpackage Default
*/
class ComDefaultViewHtml extends KViewDefault
{
/**
* Associatives array of view names
*
* @var array
*/
public $views;
/**
* Constructor
*
* @param object An optional KConfig object with configuration options
*/
public function __construct(KConfig $config)
{
parent::__construct($config);
$this->views = $config->views;
//Add alias filter for editor helper
$this->getTemplate()->getFilter('alias')->append(array(
'@editor(' => '$this->loadHelper(\'admin::com.default.template.helper.editor.display\', ')
);
//Add the template override path
$parts = $this->_identifier->path;
array_shift($parts);
if(count($parts) > 1)
{
$path = KInflector::pluralize(array_shift($parts));
$path .= count($parts) ? DS.implode(DS, $parts) : '';
$path .= DS.strtolower($this->getName());
}
else $path = strtolower($this->getName());
$template = KFactory::get('lib.koowa.application')->getTemplate();
$path = JPATH_THEMES.'/'.$template.'/html/com_'.$this->_identifier->package.DS.$path;
$this->getTemplate()->addPath($path);
}
/**
* Initializes the configuration for the object
*
* Called from {@link __construct()} as a first step of object instantiation.
*
* @param array Configuration settings
*/
protected function _initialize(KConfig $config)
{
$config->append(array(
'views' => array(),
'layout_default' => KInflector::isSingular($this->getName()) ? 'form' : 'default'
));
parent::_initialize($config);
}
/**
* Get the identifier for the toolbar with the same name
*
* @return KIdentifierInterface
*/
public function getToolbar()
{
$identifier = clone $this->_identifier;
$identifier->path = array('toolbar');
$identifier->name = $this->getName();
return KFactory::get($identifier);
}
} | stipsan/nooku-server | administrator/components/com_default/views/html.php | PHP | gpl-2.0 | 2,730 |
/*
Sapling pagelink dialog
*/
CKEDITOR.dialog.add( 'embed', function( editor )
{
var plugin = CKEDITOR.plugins.embed;
var pagelink_plugin = CKEDITOR.plugins.pagelink;
return {
title : 'Embed media',
minWidth : 300,
minHeight : 150,
contents : [
{
id : 'info',
label : 'Embed media',
title : 'Embed media',
elements :
[
{
type : 'textarea',
id : 'code',
label : 'Paste the embed code below:',
required: true,
validate : function()
{
var dialog = this.getDialog();
var func = CKEDITOR.dialog.validate.notEmpty( 'Please enter the embed code' );
return func.apply( this );
},
setup : function( data )
{
if ( data.code )
this.setValue( data.code );
},
commit : function( data )
{
data.code = this.getValue();
}
}
]
}
],
onShow : function()
{
var editor = this.getParentEditor(),
selection = editor.getSelection(),
element = null,
data = { code : '' };
if ( ( element = selection.getStartElement() )
&& element.is( 'span' ) )
selection.selectElement( element );
else
element = null;
if( element )
{
this._.selectedElement = element;
data.code = $(element.$).text();
}
this.setupContent( data );
},
onOk : function()
{
var attributes = {},
data = {},
me = this,
editor = this.getParentEditor();
this.commitContent( data );
attributes['class'] = 'plugin embed';
var style = [];
var node = $(data.code),
width = node.attr('width'),
height = node.attr('height');
if(width)
style.push('width: ' + width + 'px;');
if(height)
style.push('height: ' + height + 'px;');
if(style.length)
attributes['style'] = style.join(' ');
if ( !this._.selectedElement )
{
if(jQuery.trim(data.code) == '')
return;
// Create element if current selection is collapsed.
var selection = editor.getSelection(),
ranges = selection.getRanges( true );
var text = new CKEDITOR.dom.text( data.code, editor.document );
ranges[0].insertNode( text );
ranges[0].selectNodeContents( text );
selection.selectRanges( ranges );
// Apply style.
var style = new CKEDITOR.style( { element : 'span', attributes : attributes } );
style.apply( editor.document );
var selected = selection.getStartElement();
ranges[0].setStartAfter( selected );
ranges[0].setEndAfter( selected );
selection.selectRanges( ranges );
}
else
{
// We're only editing an existing link, so just overwrite the attributes.
var element = this._.selectedElement;
element.setAttributes( attributes );
element.setText( data.code );
}
},
onLoad : function()
{
},
// Inital focus on 'url' field if link is of type URL.
onFocus : function()
{
var pageField = this.getContentElement( 'info', 'code' );
pageField.select();
}
};
});
| mivanov/editkit | editkit/pages/static/js/ckeditor/_source/plugins/embed/dialogs/embed.js | JavaScript | gpl-2.0 | 2,966 |
/** \file cmdline_progress_display.h */ // -*-c++-*-
// Copyright (C) 2010 Daniel Burrows
//
// This program is free software; you can redistribute it and/or
// modify it under the terms of the GNU General Public License as
// published by the Free Software Foundation; either version 2 of the
// License, or (at your option) any later version.
//
// This program is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; see the file COPYING. If not, write to
// the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
// Boston, MA 02111-1307, USA.
#ifndef APTITUDE_GENERIC_VIEWS_PROGRESS_H
#define APTITUDE_GENERIC_VIEWS_PROGRESS_H
namespace aptitude
{
namespace util
{
class progress_info;
}
namespace views
{
/** \brief A general class for displaying a single line of
* progress information.
*
* The progress information is delivered as a progress_info
* object. A blank progress_info causes the display to be
* erased. A "pulse" mode progress_info displays a message with
* no percent indicator. And a "bar" mode progress_info displays
* a message with a percent indicator.
*/
class progress
{
public:
virtual ~progress();
/** \brief Set the currently displayed progress.
*
* \param progress The new progress information to display.
*/
virtual void set_progress(const aptitude::util::progress_info &progress) = 0;
/** \brief Mark the currently displayed progress (if any) as done. */
virtual void done() = 0;
};
}
}
#endif // APTITUDE_GENERIC_VIEWS_PROGRESS_H
| iamduyu/aptitude | src/generic/views/progress.h | C | gpl-2.0 | 1,899 |
# -*- coding: utf-8 -*-
## This file is part of Invenio.
## Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
# pylint: disable=C0301
"""Invenio Search Engine in mod_python."""
__lastupdated__ = """$Date$"""
__revision__ = "$Id$"
## import general modules:
import cgi
import cStringIO
import copy
import string
import os
import re
import time
import urllib
import urlparse
import zlib
import sys
if sys.hexversion < 0x2040000:
# pylint: disable=W0622
from sets import Set as set
# pylint: enable=W0622
## import Invenio stuff:
from invenio.config import \
CFG_CERN_SITE, \
CFG_INSPIRE_SITE, \
CFG_OAI_ID_FIELD, \
CFG_WEBCOMMENT_ALLOW_REVIEWS, \
CFG_WEBSEARCH_CALL_BIBFORMAT, \
CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX, \
CFG_WEBSEARCH_FIELDS_CONVERT, \
CFG_WEBSEARCH_NB_RECORDS_TO_SORT, \
CFG_WEBSEARCH_SEARCH_CACHE_SIZE, \
CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS, \
CFG_WEBSEARCH_USE_ALEPH_SYSNOS, \
CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, \
CFG_WEBSEARCH_FULLTEXT_SNIPPETS, \
CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS, \
CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE, \
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG, \
CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS, \
CFG_WEBSEARCH_WILDCARD_LIMIT, \
CFG_WEBSEARCH_SYNONYM_KBRS, \
CFG_SITE_LANG, \
CFG_SITE_NAME, \
CFG_LOGDIR, \
CFG_BIBFORMAT_HIDDEN_TAGS, \
CFG_SITE_URL, \
CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS, \
CFG_SOLR_URL, \
CFG_SITE_RECORD, \
CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT, \
CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY, \
CFG_BIBSORT_BUCKETS
from invenio.search_engine_config import InvenioWebSearchUnknownCollectionError, InvenioWebSearchWildcardLimitError
from invenio.search_engine_utils import get_fieldvalues
from invenio.bibrecord import create_record
from invenio.bibrank_record_sorter import get_bibrank_methods, is_method_valid, rank_records as rank_records_bibrank
from invenio.bibrank_downloads_similarity import register_page_view_event, calculate_reading_similarity_list
from invenio.bibindex_engine_stemmer import stem
from invenio.bibindex_engine_tokenizer import wash_author_name, author_name_requires_phrase_search
from invenio.bibformat import format_record, format_records, get_output_format_content_type, create_excel
from invenio.bibformat_config import CFG_BIBFORMAT_USE_OLD_BIBFORMAT
from invenio.bibrank_downloads_grapher import create_download_history_graph_and_box
from invenio.bibknowledge import get_kbr_values
from invenio.data_cacher import DataCacher
from invenio.websearch_external_collections import print_external_results_overview, perform_external_collection_search
from invenio.access_control_admin import acc_get_action_id
from invenio.access_control_config import VIEWRESTRCOLL, \
CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS
from invenio.websearchadminlib import get_detailed_page_tabs, get_detailed_page_tabs_counts
from invenio.intbitset import intbitset
from invenio.dbquery import DatabaseError, deserialize_via_marshal, InvenioDbQueryWildcardLimitError
from invenio.access_control_engine import acc_authorize_action
from invenio.errorlib import register_exception
from invenio.textutils import encode_for_xml, wash_for_utf8, strip_accents
from invenio.htmlutils import get_mathjax_header
from invenio.htmlutils import nmtoken_from_string
import invenio.template
webstyle_templates = invenio.template.load('webstyle')
webcomment_templates = invenio.template.load('webcomment')
from invenio.bibrank_citation_searcher import calculate_cited_by_list, \
calculate_co_cited_with_list, get_records_with_num_cites, get_self_cited_by, \
get_refersto_hitset, get_citedby_hitset
from invenio.bibrank_citation_grapher import create_citation_history_graph_and_box
from invenio.dbquery import run_sql, run_sql_with_limit, \
get_table_update_time, Error
from invenio.webuser import getUid, collect_user_info, session_param_set
from invenio.webpage import pageheaderonly, pagefooteronly, create_error_box
from invenio.messages import gettext_set_language
from invenio.search_engine_query_parser import SearchQueryParenthesisedParser, \
SpiresToInvenioSyntaxConverter
from invenio import webinterface_handler_config as apache
from invenio.solrutils import solr_get_bitset
try:
import invenio.template
websearch_templates = invenio.template.load('websearch')
except:
pass
from invenio.websearch_external_collections import calculate_hosted_collections_results, do_calculate_hosted_collections_results
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH
from invenio.websearch_external_collections_config import CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH
from invenio.websearch_external_collections_config import CFG_EXTERNAL_COLLECTION_MAXRESULTS
VIEWRESTRCOLL_ID = acc_get_action_id(VIEWRESTRCOLL)
## global vars:
cfg_nb_browse_seen_records = 100 # limit of the number of records to check when browsing certain collection
cfg_nicely_ordered_collection_list = 0 # do we propose collection list nicely ordered or alphabetical?
## precompile some often-used regexp for speed reasons:
re_word = re.compile('[\s]')
re_quotes = re.compile('[\'\"]')
re_doublequote = re.compile('\"')
re_equal = re.compile('\=')
re_logical_and = re.compile('\sand\s', re.I)
re_logical_or = re.compile('\sor\s', re.I)
re_logical_not = re.compile('\snot\s', re.I)
re_operators = re.compile(r'\s([\+\-\|])\s')
re_pattern_wildcards_after_spaces = re.compile(r'(\s)[\*\%]+')
re_pattern_single_quotes = re.compile("'(.*?)'")
re_pattern_double_quotes = re.compile("\"(.*?)\"")
re_pattern_regexp_quotes = re.compile("\/(.*?)\/")
re_pattern_spaces_after_colon = re.compile(r'(:\s+)')
re_pattern_short_words = re.compile(r'([\s\"]\w{1,3})[\*\%]+')
re_pattern_space = re.compile("__SPACE__")
re_pattern_today = re.compile("\$TODAY\$")
re_pattern_parens = re.compile(r'\([^\)]+\s+[^\)]+\)')
class RestrictedCollectionDataCacher(DataCacher):
def __init__(self):
def cache_filler():
ret = []
try:
res = run_sql("""SELECT DISTINCT ar.value
FROM accROLE_accACTION_accARGUMENT raa JOIN accARGUMENT ar ON raa.id_accARGUMENT = ar.id
WHERE ar.keyword = 'collection' AND raa.id_accACTION = %s""", (VIEWRESTRCOLL_ID,))
except Exception:
# database problems, return empty cache
return []
for coll in res:
ret.append(coll[0])
return ret
def timestamp_verifier():
return max(get_table_update_time('accROLE_accACTION_accARGUMENT'), get_table_update_time('accARGUMENT'))
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def collection_restricted_p(collection, recreate_cache_if_needed=True):
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
return collection in restricted_collection_cache.cache
try:
restricted_collection_cache.is_ok_p
except Exception:
restricted_collection_cache = RestrictedCollectionDataCacher()
def ziplist(*lists):
"""Just like zip(), but returns lists of lists instead of lists of tuples
Example:
zip([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[(f1, p1, op1), (f2, p2, op2), (f3, p3, '')]
ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, '']) =>
[[f1, p1, op1], [f2, p2, op2], [f3, p3, '']]
FIXME: This is handy to have, and should live somewhere else, like
miscutil.really_useful_functions or something.
XXX: Starting in python 2.6, the same can be achieved (faster) by
using itertools.izip_longest(); when the minimum recommended Python
is bumped, we should use that instead.
"""
def l(*items):
return list(items)
return map(l, *lists)
def get_permitted_restricted_collections(user_info, recreate_cache_if_needed=True):
"""Return a list of collection that are restricted but for which the user
is authorized."""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
ret = []
for collection in restricted_collection_cache.cache:
if acc_authorize_action(user_info, 'viewrestrcoll', collection=collection)[0] == 0:
ret.append(collection)
return ret
def get_all_restricted_recids():
"""
Return the set of all the restricted recids, i.e. the ids of those records
which belong to at least one restricted collection.
"""
ret = intbitset()
for collection in restricted_collection_cache.cache:
ret |= get_collection_reclist(collection)
return ret
def get_restricted_collections_for_recid(recid, recreate_cache_if_needed=True):
"""
Return the list of restricted collection names to which recid belongs.
"""
if recreate_cache_if_needed:
restricted_collection_cache.recreate_cache_if_needed()
collection_reclist_cache.recreate_cache_if_needed()
return [collection for collection in restricted_collection_cache.cache if recid in get_collection_reclist(collection, recreate_cache_if_needed=False)]
def is_user_owner_of_record(user_info, recid):
"""
Check if the user is owner of the record, i.e. he is the submitter
and/or belongs to a owner-like group authorized to 'see' the record.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: True if the user is 'owner' of the record; False otherwise
@rtype: bool
"""
authorized_emails_or_group = []
for tag in CFG_ACC_GRANT_AUTHOR_RIGHTS_TO_EMAILS_IN_TAGS:
authorized_emails_or_group.extend(get_fieldvalues(recid, tag))
for email_or_group in authorized_emails_or_group:
if email_or_group in user_info['group']:
return True
email = email_or_group.strip().lower()
if user_info['email'].strip().lower() == email:
return True
return False
def check_user_can_view_record(user_info, recid):
"""
Check if the user is authorized to view the given recid. The function
grants access in two cases: either user has author rights on this
record, or he has view rights to the primary collection this record
belongs to.
@param user_info: the user_info dictionary that describe the user.
@type user_info: user_info dictionary
@param recid: the record identifier.
@type recid: positive integer
@return: (0, ''), when authorization is granted, (>0, 'message') when
authorization is not granted
@rtype: (int, string)
"""
policy = CFG_WEBSEARCH_VIEWRESTRCOLL_POLICY.strip().upper()
if isinstance(recid, str):
recid = int(recid)
if record_public_p(recid):
## The record is already known to be public.
return (0, '')
## At this point, either webcoll has not yet run or there are some
## restricted collections. Let's see first if the user own the record.
if is_user_owner_of_record(user_info, recid):
## Perfect! It's authorized then!
return (0, '')
restricted_collections = get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False)
if restricted_collections:
## If there are restricted collections the user must be authorized to all/any of them (depending on the policy)
auth_code, auth_msg = 0, ''
for collection in get_restricted_collections_for_recid(recid, recreate_cache_if_needed=False):
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=collection)
if auth_code and policy != 'ANY':
## Ouch! the user is not authorized to this collection
return (auth_code, auth_msg)
elif auth_code == 0 and policy == 'ANY':
## Good! At least one collection is authorized
return (0, '')
## Depending on the policy, the user will be either authorized or not
return auth_code, auth_msg
if is_record_in_any_collection(recid, recreate_cache_if_needed=False):
## the record is not in any restricted collection
return (0, '')
elif record_exists(recid) > 0:
## We are in the case where webcoll has not run.
## Let's authorize SUPERADMIN
(auth_code, auth_msg) = acc_authorize_action(user_info, VIEWRESTRCOLL, collection=None)
if auth_code == 0:
return (0, '')
else:
## Too bad. Let's print a nice message:
return (1, """The record you are trying to access has just been
submitted to the system and needs to be assigned to the
proper collections. It is currently restricted for security reasons
until the assignment will be fully completed. Please come back later to
properly access this record.""")
else:
## The record either does not exists or has been deleted.
## Let's handle these situations outside of this code.
return (0, '')
class IndexStemmingDataCacher(DataCacher):
"""
Provides cache for stemming information for word/phrase indexes.
This class is not to be used directly; use function
get_index_stemming_language() instead.
"""
def __init__(self):
def cache_filler():
try:
res = run_sql("""SELECT id, stemming_language FROM idxINDEX""")
except DatabaseError:
# database problems, return empty cache
return {}
return dict(res)
def timestamp_verifier():
return get_table_update_time('idxINDEX')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
index_stemming_cache.is_ok_p
except Exception:
index_stemming_cache = IndexStemmingDataCacher()
def get_index_stemming_language(index_id, recreate_cache_if_needed=True):
"""Return stemming langugage for given index."""
if recreate_cache_if_needed:
index_stemming_cache.recreate_cache_if_needed()
return index_stemming_cache.cache[index_id]
class CollectionRecListDataCacher(DataCacher):
"""
Provides cache for collection reclist hitsets. This class is not
to be used directly; use function get_collection_reclist() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT name,reclist FROM collection")
except Exception:
# database problems, return empty cache
return {}
for name, reclist in res:
ret[name] = None # this will be filled later during runtime by calling get_collection_reclist(coll)
return ret
def timestamp_verifier():
return get_table_update_time('collection')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_reclist_cache.is_ok_p:
raise Exception
except Exception:
collection_reclist_cache = CollectionRecListDataCacher()
def get_collection_reclist(coll, recreate_cache_if_needed=True):
"""Return hitset of recIDs that belong to the collection 'coll'."""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
if not collection_reclist_cache.cache[coll]:
# not yet it the cache, so calculate it and fill the cache:
set = intbitset()
query = "SELECT nbrecs,reclist FROM collection WHERE name=%s"
res = run_sql(query, (coll, ), 1)
if res:
try:
set = intbitset(res[0][1])
except:
pass
collection_reclist_cache.cache[coll] = set
# finally, return reclist:
return collection_reclist_cache.cache[coll]
def get_available_output_formats(visible_only=False):
"""
Return the list of available output formats. When visible_only is
True, returns only those output formats that have visibility flag
set to 1.
"""
formats = []
query = "SELECT code,name FROM format"
if visible_only:
query += " WHERE visibility='1'"
query += " ORDER BY name ASC"
res = run_sql(query)
if res:
# propose found formats:
for code, name in res:
formats.append({ 'value' : code,
'text' : name
})
else:
formats.append({'value' : 'hb',
'text' : "HTML brief"
})
return formats
class SearchResultsCache(DataCacher):
"""
Provides temporary lazy cache for Search Results.
Useful when users click on `next page'.
"""
def __init__(self):
def cache_filler():
return {}
def timestamp_verifier():
return '1970-01-01 00:00:00' # lazy cache is always okay;
# its filling is governed by
# CFG_WEBSEARCH_SEARCH_CACHE_SIZE
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not search_results_cache.is_ok_p:
raise Exception
except Exception:
search_results_cache = SearchResultsCache()
class CollectionI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N collection names. This class is not to be
used directly; use function get_coll_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT c.name,cn.ln,cn.value FROM collectionname AS cn, collection AS c WHERE cn.id_collection=c.id AND cn.type='ln'") # ln=long name
except Exception:
# database problems
return {}
for c, ln, i18nname in res:
if i18nname:
if not ret.has_key(c):
ret[c] = {}
ret[c][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('collectionname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not collection_i18nname_cache.is_ok_p:
raise Exception
except Exception:
collection_i18nname_cache = CollectionI18nNameDataCacher()
def get_coll_i18nname(c, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted collection name (of the name type `ln'
(=long name)) for collection C in language LN.
This function uses collection_i18nname_cache, but it verifies
whether the cache is up-to-date first by default. This
verification step is performed by checking the DB table update
time. So, if you call this function 1000 times, it can get very
slow because it will do 1000 table update time verifications, even
though collection names change not that often.
Hence the parameter VERIFY_CACHE_TIMESTAMP which, when set to
False, will assume the cache is already up-to-date. This is
useful namely in the generation of collection lists for the search
results page.
"""
if verify_cache_timestamp:
collection_i18nname_cache.recreate_cache_if_needed()
out = c
try:
out = collection_i18nname_cache.cache[c][ln]
except KeyError:
pass # translation in LN does not exist
return out
class FieldI18nNameDataCacher(DataCacher):
"""
Provides cache for I18N field names. This class is not to be used
directly; use function get_field_i18nname() instead.
"""
def __init__(self):
def cache_filler():
ret = {}
try:
res = run_sql("SELECT f.name,fn.ln,fn.value FROM fieldname AS fn, field AS f WHERE fn.id_field=f.id AND fn.type='ln'") # ln=long name
except Exception:
# database problems, return empty cache
return {}
for f, ln, i18nname in res:
if i18nname:
if not ret.has_key(f):
ret[f] = {}
ret[f][ln] = i18nname
return ret
def timestamp_verifier():
return get_table_update_time('fieldname')
DataCacher.__init__(self, cache_filler, timestamp_verifier)
try:
if not field_i18nname_cache.is_ok_p:
raise Exception
except Exception:
field_i18nname_cache = FieldI18nNameDataCacher()
def get_field_i18nname(f, ln=CFG_SITE_LANG, verify_cache_timestamp=True):
"""
Return nicely formatted field name (of type 'ln', 'long name') for
field F in language LN.
If VERIFY_CACHE_TIMESTAMP is set to True, then verify DB timestamp
and field I18N name cache timestamp and refresh cache from the DB
if needed. Otherwise don't bother checking DB timestamp and
return the cached value. (This is useful when get_field_i18nname
is called inside a loop.)
"""
if verify_cache_timestamp:
field_i18nname_cache.recreate_cache_if_needed()
out = f
try:
out = field_i18nname_cache.cache[f][ln]
except KeyError:
pass # translation in LN does not exist
return out
def get_alphabetically_ordered_collection_list(level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
out = []
res = run_sql("SELECT id,name FROM collection ORDER BY name ASC")
for c_id, c_name in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c_name, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
out.append([c_name, c_printable])
return out
def get_nicely_ordered_collection_list(collid=1, level=0, ln=CFG_SITE_LANG):
"""Returns nicely ordered (score respected) list of collections, more exactly list of tuples
(collection name, printable collection name).
Suitable for create_search_box()."""
colls_nicely_ordered = []
res = run_sql("""SELECT c.name,cc.id_son FROM collection_collection AS cc, collection AS c
WHERE c.id=cc.id_son AND cc.id_dad=%s ORDER BY score DESC""", (collid, ))
for c, cid in res:
# make a nice printable name (e.g. truncate c_printable for
# long collection names in given language):
c_printable_fullname = get_coll_i18nname(c, ln, False)
c_printable = wash_index_term(c_printable_fullname, 30, False)
if c_printable != c_printable_fullname:
c_printable = c_printable + "..."
if level:
c_printable = " " + level * '-' + " " + c_printable
colls_nicely_ordered.append([c, c_printable])
colls_nicely_ordered = colls_nicely_ordered + get_nicely_ordered_collection_list(cid, level+1, ln=ln)
return colls_nicely_ordered
def get_index_id_from_field(field):
"""
Return index id with name corresponding to FIELD, or the first
index id where the logical field code named FIELD is indexed.
Return zero in case there is no index defined for this field.
Example: field='author', output=4.
"""
out = 0
if not field:
field = 'global' # empty string field means 'global' index (field 'anyfield')
# first look in the index table:
res = run_sql("""SELECT id FROM idxINDEX WHERE name=%s""", (field,))
if res:
out = res[0][0]
return out
# not found in the index table, now look in the field table:
res = run_sql("""SELECT w.id FROM idxINDEX AS w, idxINDEX_field AS wf, field AS f
WHERE f.code=%s AND wf.id_field=f.id AND w.id=wf.id_idxINDEX
LIMIT 1""", (field,))
if res:
out = res[0][0]
return out
def get_words_from_pattern(pattern):
"Returns list of whitespace-separated words from pattern."
words = {}
for word in string.split(pattern):
if not words.has_key(word):
words[word] = 1
return words.keys()
def create_basic_search_units(req, p, f, m=None, of='hb'):
"""Splits search pattern and search field into a list of independently searchable units.
- A search unit consists of '(operator, pattern, field, type, hitset)' tuples where
'operator' is set union (|), set intersection (+) or set exclusion (-);
'pattern' is either a word (e.g. muon*) or a phrase (e.g. 'nuclear physics');
'field' is either a code like 'title' or MARC tag like '100__a';
'type' is the search type ('w' for word file search, 'a' for access file search).
- Optionally, the function accepts the match type argument 'm'.
If it is set (e.g. from advanced search interface), then it
performs this kind of matching. If it is not set, then a guess is made.
'm' can have values: 'a'='all of the words', 'o'='any of the words',
'p'='phrase/substring', 'r'='regular expression',
'e'='exact value'.
- Warnings are printed on req (when not None) in case of HTML output formats."""
opfts = [] # will hold (o,p,f,t,h) units
# FIXME: quick hack for the journal index
if f == 'journal':
opfts.append(['+', p, f, 'w'])
return opfts
## check arguments: is desired matching type set?
if m:
## A - matching type is known; good!
if m == 'e':
# A1 - exact value:
opfts.append(['+', p, f, 'a']) # '+' since we have only one unit
elif m == 'p':
# A2 - phrase/substring:
opfts.append(['+', "%" + p + "%", f, 'a']) # '+' since we have only one unit
elif m == 'r':
# A3 - regular expression:
opfts.append(['+', p, f, 'r']) # '+' since we have only one unit
elif m == 'a' or m == 'w':
# A4 - all of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
opfts.append(['+', word, f, 'w']) # '+' in all units
elif m == 'o':
# A5 - any of the words:
p = strip_accents(p) # strip accents for 'w' mode, FIXME: delete when not needed
for word in get_words_from_pattern(p):
if len(opfts)==0:
opfts.append(['+', word, f, 'w']) # '+' in the first unit
else:
opfts.append(['|', word, f, 'w']) # '|' in further units
else:
if of.startswith("h"):
print_warning(req, "Matching type '%s' is not implemented yet." % cgi.escape(m), "Warning")
opfts.append(['+', "%" + p + "%", f, 'w'])
else:
## B - matching type is not known: let us try to determine it by some heuristics
if f and p[0] == '"' and p[-1] == '"':
## B0 - does 'p' start and end by double quote, and is 'f' defined? => doing ACC search
opfts.append(['+', p[1:-1], f, 'a'])
elif f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor') and author_name_requires_phrase_search(p):
## B1 - do we search in author, and does 'p' contain space/comma/dot/etc?
## => doing washed ACC search
opfts.append(['+', p, f, 'a'])
elif f and p[0] == "'" and p[-1] == "'":
## B0bis - does 'p' start and end by single quote, and is 'f' defined? => doing ACC search
opfts.append(['+', '%' + p[1:-1] + '%', f, 'a'])
elif f and p[0] == "/" and p[-1] == "/":
## B0ter - does 'p' start and end by a slash, and is 'f' defined? => doing regexp search
opfts.append(['+', p[1:-1], f, 'r'])
elif f and string.find(p, ',') >= 0:
## B1 - does 'p' contain comma, and is 'f' defined? => doing ACC search
opfts.append(['+', p, f, 'a'])
elif f and str(f[0:2]).isdigit():
## B2 - does 'f' exist and starts by two digits? => doing ACC search
opfts.append(['+', p, f, 'a'])
else:
## B3 - doing WRD search, but maybe ACC too
# search units are separated by spaces unless the space is within single or double quotes
# so, let us replace temporarily any space within quotes by '__SPACE__'
p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p)
# and spaces after colon as well:
p = re_pattern_spaces_after_colon.sub(lambda x: string.replace(x.group(1), ' ', '__SPACE__'), p)
# wash argument:
p = re_equal.sub(":", p)
p = re_logical_and.sub(" ", p)
p = re_logical_or.sub(" |", p)
p = re_logical_not.sub(" -", p)
p = re_operators.sub(r' \1', p)
for pi in string.split(p): # iterate through separated units (or items, as "pi" stands for "p item")
pi = re_pattern_space.sub(" ", pi) # replace back '__SPACE__' by ' '
# firstly, determine set operator
if pi[0] == '+' or pi[0] == '-' or pi[0] == '|':
oi = pi[0]
pi = pi[1:]
else:
# okay, there is no operator, so let us decide what to do by default
oi = '+' # by default we are doing set intersection...
# secondly, determine search pattern and field:
if string.find(pi, ":") > 0:
fi, pi = string.split(pi, ":", 1)
fi = wash_field(fi)
# test whether fi is a real index code or a MARC-tag defined code:
if fi in get_fieldcodes() or '00' <= fi[:2] <= '99':
pass
else:
# it is not, so join it back:
fi, pi = f, fi + ":" + pi
else:
fi, pi = f, pi
# wash 'fi' argument:
fi = wash_field(fi)
# wash 'pi' argument:
pi = pi.strip() # strip eventual spaces
if re_quotes.match(pi):
# B3a - quotes are found => do ACC search (phrase search)
if pi[0] == '"' and pi[-1] == '"':
pi = string.replace(pi, '"', '') # remove quote signs
opfts.append([oi, pi, fi, 'a'])
elif pi[0] == "'" and pi[-1] == "'":
pi = string.replace(pi, "'", "") # remove quote signs
opfts.append([oi, "%" + pi + "%", fi, 'a'])
else: # unbalanced quotes, so fall back to WRD query:
opfts.append([oi, pi, fi, 'w'])
elif pi.startswith('/') and pi.endswith('/'):
# B3b - pi has slashes around => do regexp search
opfts.append([oi, pi[1:-1], fi, 'r'])
elif fi and len(fi) > 1 and str(fi[0]).isdigit() and str(fi[1]).isdigit():
# B3c - fi exists and starts by two digits => do ACC search
opfts.append([oi, pi, fi, 'a'])
elif fi and not get_index_id_from_field(fi) and get_field_name(fi):
# B3d - logical field fi exists but there is no WRD index for fi => try ACC search
opfts.append([oi, pi, fi, 'a'])
else:
# B3e - general case => do WRD search
pi = strip_accents(pi) # strip accents for 'w' mode, FIXME: delete when not needed
for pii in get_words_from_pattern(pi):
opfts.append([oi, pii, fi, 'w'])
## sanity check:
for i in range(0, len(opfts)):
try:
pi = opfts[i][1]
if pi == '*':
if of.startswith("h"):
print_warning(req, "Ignoring standalone wildcard word.", "Warning")
del opfts[i]
if pi == '' or pi == ' ':
fi = opfts[i][2]
if fi:
if of.startswith("h"):
print_warning(req, "Ignoring empty <em>%s</em> search term." % fi, "Warning")
del opfts[i]
except:
pass
## replace old logical field names if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
opfts = [[o,p,wash_field(f),t] for o,p,f,t in opfts]
## return search units:
return opfts
def page_start(req, of, cc, aas, ln, uid, title_message=None,
description='', keywords='', recID=-1, tab='', p=''):
"""
Start page according to given output format.
@param title_message: title of the page, not escaped for HTML
@param description: description of the page, not escaped for HTML
@param keywords: keywords of the page, not escaped for HTML
"""
_ = gettext_set_language(ln)
if not req or isinstance(req, cStringIO.OutputType):
return # we were called from CLI
if not title_message:
title_message = _("Search Results")
content_type = get_output_format_content_type(of)
if of.startswith('x'):
if of == 'xr':
# we are doing RSS output
req.content_type = "application/rss+xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
else:
# we are doing XML output:
req.content_type = "text/xml"
req.send_http_header()
req.write("""<?xml version="1.0" encoding="UTF-8"?>\n""")
elif of.startswith('t') or str(of[0:3]).isdigit():
# we are doing plain text output:
req.content_type = "text/plain"
req.send_http_header()
elif of == "id":
pass # nothing to do, we shall only return list of recIDs
elif content_type == 'text/html':
# we are doing HTML output:
req.content_type = "text/html"
req.send_http_header()
if not description:
description = "%s %s." % (cc, _("Search Results"))
if not keywords:
keywords = "%s, WebSearch, %s" % (get_coll_i18nname(CFG_SITE_NAME, ln, False), get_coll_i18nname(cc, ln, False))
## generate RSS URL:
argd = {}
if req.args:
argd = cgi.parse_qs(req.args)
rssurl = websearch_templates.build_rss_url(argd)
## add MathJax if displaying single records (FIXME: find
## eventual better place to this code)
if of.lower() in CFG_WEBSEARCH_USE_MATHJAX_FOR_FORMATS:
metaheaderadd = get_mathjax_header(req.is_https())
else:
metaheaderadd = ''
## generate navtrail:
navtrail = create_navtrail_links(cc, aas, ln)
if navtrail != '':
navtrail += ' > '
if (tab != '' or ((of != '' or of.lower() != 'hd') and of != 'hb')) and \
recID != -1:
# If we are not in information tab in HD format, customize
# the nav. trail to have a link back to main record. (Due
# to the way perform_request_search() works, hb
# (lowercase) is equal to hd)
navtrail += ' <a class="navtrail" href="%s/%s/%s">%s</a>' % \
(CFG_SITE_URL, CFG_SITE_RECORD, recID, cgi.escape(title_message))
if (of != '' or of.lower() != 'hd') and of != 'hb':
# Export
format_name = of
query = "SELECT name FROM format WHERE code=%s"
res = run_sql(query, (of,))
if res:
format_name = res[0][0]
navtrail += ' > ' + format_name
else:
# Discussion, citations, etc. tabs
tab_label = get_detailed_page_tabs(cc, ln=ln)[tab]['label']
navtrail += ' > ' + _(tab_label)
else:
navtrail += cgi.escape(title_message)
if p:
# we are serving search/browse results pages, so insert pattern:
navtrail += ": " + cgi.escape(p)
title_message = p + " - " + title_message
body_css_classes = []
if cc:
# we know the collection, lets allow page styles based on cc
#collection names may not satisfy rules for css classes which
#are something like: -?[_a-zA-Z]+[_a-zA-Z0-9-]*
#however it isn't clear what we should do about cases with
#numbers, so we leave them to fail. Everything else becomes "_"
css = nmtoken_from_string(cc).replace('.','_').replace('-','_').replace(':','_')
body_css_classes.append(css)
## finally, print page header:
req.write(pageheaderonly(req=req, title=title_message,
navtrail=navtrail,
description=description,
keywords=keywords,
metaheaderadd=metaheaderadd,
uid=uid,
language=ln,
navmenuid='search',
navtrail_append_title_p=0,
rssurl=rssurl,
body_css_classes=body_css_classes))
req.write(websearch_templates.tmpl_search_pagestart(ln=ln))
#else:
# req.send_http_header()
def page_end(req, of="hb", ln=CFG_SITE_LANG):
"End page according to given output format: e.g. close XML tags, add HTML footer, etc."
if of == "id":
return [] # empty recID list
if not req:
return # we were called from CLI
if of.startswith('h'):
req.write(websearch_templates.tmpl_search_pageend(ln = ln)) # pagebody end
req.write(pagefooteronly(lastupdated=__lastupdated__, language=ln, req=req))
return
def create_page_title_search_pattern_info(p, p1, p2, p3):
"""Create the search pattern bit for the page <title> web page
HTML header. Basically combine p and (p1,p2,p3) together so that
the page header may be filled whether we are in the Simple Search
or Advanced Search interface contexts."""
out = ""
if p:
out = p
else:
out = p1
if p2:
out += ' ' + p2
if p3:
out += ' ' + p3
return out
def create_inputdate_box(name="d1", selected_year=0, selected_month=0, selected_day=0, ln=CFG_SITE_LANG):
"Produces 'From Date', 'Until Date' kind of selection box. Suitable for search options."
_ = gettext_set_language(ln)
box = ""
# day
box += """<select name="%sd">""" % name
box += """<option value="">%s""" % _("any day")
for day in range(1, 32):
box += """<option value="%02d"%s>%02d""" % (day, is_selected(day, selected_day), day)
box += """</select>"""
# month
box += """<select name="%sm">""" % name
box += """<option value="">%s""" % _("any month")
# trailing space in May distinguishes short/long form of the month name
for mm, month in [(1, _("January")), (2, _("February")), (3, _("March")), (4, _("April")), \
(5, _("May ")), (6, _("June")), (7, _("July")), (8, _("August")), \
(9, _("September")), (10, _("October")), (11, _("November")), (12, _("December"))]:
box += """<option value="%02d"%s>%s""" % (mm, is_selected(mm, selected_month), month.strip())
box += """</select>"""
# year
box += """<select name="%sy">""" % name
box += """<option value="">%s""" % _("any year")
this_year = int(time.strftime("%Y", time.localtime()))
for year in range(this_year-20, this_year+1):
box += """<option value="%d"%s>%d""" % (year, is_selected(year, selected_year), year)
box += """</select>"""
return box
def create_search_box(cc, colls, p, f, rg, sf, so, sp, rm, of, ot, aas,
ln, p1, f1, m1, op1, p2, f2, m2, op2, p3, f3,
m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec,
action=""):
"""Create search box for 'search again in the results page' functionality."""
# load the right message language
_ = gettext_set_language(ln)
# some computations
cc_intl = get_coll_i18nname(cc, ln, False)
cc_colID = get_colID(cc)
colls_nicely_ordered = []
if cfg_nicely_ordered_collection_list:
colls_nicely_ordered = get_nicely_ordered_collection_list(ln=ln)
else:
colls_nicely_ordered = get_alphabetically_ordered_collection_list(ln=ln)
colls_nice = []
for (cx, cx_printable) in colls_nicely_ordered:
if not cx.startswith("Unnamed collection"):
colls_nice.append({ 'value' : cx,
'text' : cx_printable
})
coll_selects = []
if colls and colls[0] != CFG_SITE_NAME:
# some collections are defined, so print these first, and only then print 'add another collection' heading:
for c in colls:
if c:
temp = []
temp.append({ 'value' : CFG_SITE_NAME,
'text' : '*** %s ***' % _("any public collection")
})
# this field is used to remove the current collection from the ones to be searched.
temp.append({ 'value' : '',
'text' : '*** %s ***' % _("remove this collection")
})
for val in colls_nice:
# print collection:
if not cx.startswith("Unnamed collection"):
temp.append({ 'value' : val['value'],
'text' : val['text'],
'selected' : (c == re.sub("^[\s\-]*","", val['value']))
})
coll_selects.append(temp)
coll_selects.append([{ 'value' : '',
'text' : '*** %s ***' % _("add another collection")
}] + colls_nice)
else: # we searched in CFG_SITE_NAME, so print 'any public collection' heading
coll_selects.append([{ 'value' : CFG_SITE_NAME,
'text' : '*** %s ***' % _("any public collection")
}] + colls_nice)
## ranking methods
ranks = [{
'value' : '',
'text' : "- %s %s -" % (_("OR").lower (), _("rank by")),
}]
for (code, name) in get_bibrank_methods(cc_colID, ln):
# propose found rank methods:
ranks.append({
'value' : code,
'text' : name,
})
formats = get_available_output_formats(visible_only=True)
# show collections in the search box? (not if there is only one
# collection defined, and not if we are in light search)
show_colls = True
show_title = True
if len(collection_reclist_cache.cache.keys()) == 1 or \
aas == -1:
show_colls = False
show_title = False
if cc == CFG_SITE_NAME:
show_title = False
if CFG_INSPIRE_SITE:
show_title = False
return websearch_templates.tmpl_search_box(
ln = ln,
aas = aas,
cc_intl = cc_intl,
cc = cc,
ot = ot,
sp = sp,
action = action,
fieldslist = get_searchwithin_fields(ln=ln, colID=cc_colID),
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
p1 = p1,
p2 = p2,
p3 = p3,
op1 = op1,
op2 = op2,
rm = rm,
p = p,
f = f,
coll_selects = coll_selects,
d1y = d1y, d2y = d2y, d1m = d1m, d2m = d2m, d1d = d1d, d2d = d2d,
dt = dt,
sort_fields = get_sortby_fields(ln=ln, colID=cc_colID),
sf = sf,
so = so,
ranks = ranks,
sc = sc,
rg = rg,
formats = formats,
of = of,
pl = pl,
jrec = jrec,
ec = ec,
show_colls = show_colls,
show_title = show_title,
)
def create_navtrail_links(cc=CFG_SITE_NAME, aas=0, ln=CFG_SITE_LANG, self_p=1, tab=''):
"""Creates navigation trail links, i.e. links to collection
ancestors (except Home collection). If aas==1, then links to
Advanced Search interfaces; otherwise Simple Search.
"""
dads = []
for dad in get_coll_ancestors(cc):
if dad != CFG_SITE_NAME: # exclude Home collection
dads.append ((dad, get_coll_i18nname(dad, ln, False)))
if self_p and cc != CFG_SITE_NAME:
dads.append((cc, get_coll_i18nname(cc, ln, False)))
return websearch_templates.tmpl_navtrail_links(
aas=aas, ln=ln, dads=dads)
def get_searchwithin_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'search within' selection box for the collection ID colID."""
res = None
if colID:
res = run_sql("""SELECT f.code,f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='sew' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
res = run_sql("SELECT code,name FROM field ORDER BY name ASC")
fields = [{
'value' : '',
'text' : get_field_i18nname("any field", ln, False)
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({ 'value' : field_code,
'text' : get_field_i18nname(field_name, ln, False)
})
return fields
def get_sortby_fields(ln='en', colID=None):
"""Retrieves the fields name used in the 'sort by' selection box for the collection ID colID."""
_ = gettext_set_language(ln)
res = None
if colID:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (colID,))
if not res:
# no sort fields defined for this colID, try to take Home collection:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_collection=%s AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""", (1,))
if not res:
# no sort fields defined for the Home collection, take all sort fields defined wherever they are:
res = run_sql("""SELECT DISTINCT(f.code),f.name FROM field AS f, collection_field_fieldvalue AS cff
WHERE cff.type='soo' AND cff.id_field=f.id
ORDER BY cff.score DESC, f.name ASC""",)
fields = [{
'value' : '',
'text' : _("latest first")
}]
for field_code, field_name in res:
if field_code and field_code != "anyfield":
fields.append({ 'value' : field_code,
'text' : get_field_i18nname(field_name, ln, False)
})
return fields
def create_andornot_box(name='op', value='', ln='en'):
"Returns HTML code for the AND/OR/NOT selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="n"%s>%s
</select>
""" % (name,
is_selected('a', value), _("AND"),
is_selected('o', value), _("OR"),
is_selected('n', value), _("AND NOT"))
return out
def create_matchtype_box(name='m', value='', ln='en'):
"Returns HTML code for the 'match type' selection box."
_ = gettext_set_language(ln)
out = """
<select name="%s">
<option value="a"%s>%s
<option value="o"%s>%s
<option value="e"%s>%s
<option value="p"%s>%s
<option value="r"%s>%s
</select>
""" % (name,
is_selected('a', value), _("All of the words:"),
is_selected('o', value), _("Any of the words:"),
is_selected('e', value), _("Exact phrase:"),
is_selected('p', value), _("Partial phrase:"),
is_selected('r', value), _("Regular expression:"))
return out
def is_selected(var, fld):
"Checks if the two are equal, and if yes, returns ' selected'. Useful for select boxes."
if type(var) is int and type(fld) is int:
if var == fld:
return " selected"
elif str(var) == str(fld):
return " selected"
elif fld and len(fld)==3 and fld[0] == "w" and var == fld[1:]:
return " selected"
return ""
def wash_colls(cc, c, split_colls=0, verbose=0):
"""Wash collection list by checking whether user has deselected
anything under 'Narrow search'. Checks also if cc is a list or not.
Return list of cc, colls_to_display, colls_to_search since the list
of collections to display is different from that to search in.
This is because users might have chosen 'split by collection'
functionality.
The behaviour of "collections to display" depends solely whether
user has deselected a particular collection: e.g. if it started
from 'Articles and Preprints' page, and deselected 'Preprints',
then collection to display is 'Articles'. If he did not deselect
anything, then collection to display is 'Articles & Preprints'.
The behaviour of "collections to search in" depends on the
'split_colls' parameter:
* if is equal to 1, then we can wash the colls list down
and search solely in the collection the user started from;
* if is equal to 0, then we are splitting to the first level
of collections, i.e. collections as they appear on the page
we started to search from;
The function raises exception
InvenioWebSearchUnknownCollectionError
if cc or one of c collections is not known.
"""
colls_out = []
colls_out_for_display = []
# list to hold the hosted collections to be searched and displayed
hosted_colls_out = []
debug = ""
if verbose:
debug += "<br />"
debug += "<br />1) --- initial parameters ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# check what type is 'cc':
if type(cc) is list:
for ci in cc:
if collection_reclist_cache.cache.has_key(ci):
# yes this collection is real, so use it:
cc = ci
break
else:
# check once if cc is real:
if not collection_reclist_cache.cache.has_key(cc):
if cc:
raise InvenioWebSearchUnknownCollectionError(cc)
else:
cc = CFG_SITE_NAME # cc is not set, so replace it with Home collection
# check type of 'c' argument:
if type(c) is list:
colls = c
else:
colls = [c]
if verbose:
debug += "<br />2) --- after check for the integrity of cc and the being or not c a list ---"
debug += "<br />cc : %s" % cc
debug += "<br />c : %s" % c
debug += "<br />"
# remove all 'unreal' collections:
colls_real = []
for coll in colls:
if collection_reclist_cache.cache.has_key(coll):
colls_real.append(coll)
else:
if coll:
raise InvenioWebSearchUnknownCollectionError(coll)
colls = colls_real
if verbose:
debug += "<br />3) --- keeping only the real colls of c ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# check if some real collections remain:
if len(colls)==0:
colls = [cc]
if verbose:
debug += "<br />4) --- in case no colls were left we use cc directly ---"
debug += "<br />colls : %s" % colls
debug += "<br />"
# then let us check the list of non-restricted "real" sons of 'cc' and compare it to 'coll':
res = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'""", (cc,))
# list that holds all the non restricted sons of cc that are also not hosted collections
l_cc_nonrestricted_sons_and_nonhosted_colls = []
res_hosted = run_sql("""SELECT c.name FROM collection AS c,
collection_collection AS cc,
collection AS ccc
WHERE c.id=cc.id_son AND cc.id_dad=ccc.id
AND ccc.name=%s AND cc.type='r'
AND (c.dbquery NOT LIKE 'hostedcollection:%%' OR c.dbquery IS NULL)""", (cc,))
for row_hosted in res_hosted:
l_cc_nonrestricted_sons_and_nonhosted_colls.append(row_hosted[0])
l_cc_nonrestricted_sons_and_nonhosted_colls.sort()
l_cc_nonrestricted_sons = []
l_c = colls[:]
for row in res:
if not collection_restricted_p(row[0]):
l_cc_nonrestricted_sons.append(row[0])
l_c.sort()
l_cc_nonrestricted_sons.sort()
if l_cc_nonrestricted_sons == l_c:
colls_out_for_display = [cc] # yep, washing permitted, it is sufficient to display 'cc'
# the following elif is a hack that preserves the above funcionality when we start searching from
# the frontpage with some hosted collections deselected (either by default or manually)
elif set(l_cc_nonrestricted_sons_and_nonhosted_colls).issubset(set(l_c)):
colls_out_for_display = colls
split_colls = 0
else:
colls_out_for_display = colls # nope, we need to display all 'colls' successively
# remove duplicates:
#colls_out_for_display_nondups=filter(lambda x, colls_out_for_display=colls_out_for_display: colls_out_for_display[x-1] not in colls_out_for_display[x:], range(1, len(colls_out_for_display)+1))
#colls_out_for_display = map(lambda x, colls_out_for_display=colls_out_for_display:colls_out_for_display[x-1], colls_out_for_display_nondups)
#colls_out_for_display = list(set(colls_out_for_display))
#remove duplicates while preserving the order
set_out = set()
colls_out_for_display = [coll for coll in colls_out_for_display if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />5) --- decide whether colls_out_for_diplay should be colls or is it sufficient for it to be cc; remove duplicates ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# FIXME: The below quoted part of the code has been commented out
# because it prevents searching in individual restricted daughter
# collections when both parent and all its public daughter
# collections were asked for, in addition to some restricted
# daughter collections. The removal was introduced for hosted
# collections, so we may want to double check in this context.
# the following piece of code takes care of removing collections whose ancestors are going to be searched anyway
# list to hold the collections to be removed
#colls_to_be_removed = []
# first calculate the collections that can safely be removed
#for coll in colls_out_for_display:
# for ancestor in get_coll_ancestors(coll):
# #if ancestor in colls_out_for_display: colls_to_be_removed.append(coll)
# if ancestor in colls_out_for_display and not is_hosted_collection(coll): colls_to_be_removed.append(coll)
# secondly remove the collections
#for coll in colls_to_be_removed:
# colls_out_for_display.remove(coll)
if verbose:
debug += "<br />6) --- remove collections that have ancestors about to be search, unless they are hosted ---"
debug += "<br />colls_out_for_display : %s" % colls_out_for_display
debug += "<br />"
# calculate the hosted collections to be searched.
if colls_out_for_display == [cc]:
if is_hosted_collection(cc):
hosted_colls_out.append(cc)
else:
for coll in get_coll_sons(cc):
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
else:
for coll in colls_out_for_display:
if is_hosted_collection(coll):
hosted_colls_out.append(coll)
if verbose:
debug += "<br />7) --- calculate the hosted_colls_out ---"
debug += "<br />hosted_colls_out : %s" % hosted_colls_out
debug += "<br />"
# second, let us decide on collection splitting:
if split_colls == 0:
# type A - no sons are wanted
colls_out = colls_out_for_display
else:
# type B - sons (first-level descendants) are wanted
for coll in colls_out_for_display:
coll_sons = get_coll_sons(coll)
if coll_sons == []:
colls_out.append(coll)
else:
for coll_son in coll_sons:
if not is_hosted_collection(coll_son):
colls_out.append(coll_son)
#else:
# colls_out = colls_out + coll_sons
# remove duplicates:
#colls_out_nondups=filter(lambda x, colls_out=colls_out: colls_out[x-1] not in colls_out[x:], range(1, len(colls_out)+1))
#colls_out = map(lambda x, colls_out=colls_out:colls_out[x-1], colls_out_nondups)
#colls_out = list(set(colls_out))
#remove duplicates while preserving the order
set_out = set()
colls_out = [coll for coll in colls_out if coll not in set_out and not set_out.add(coll)]
if verbose:
debug += "<br />8) --- calculate the colls_out; remove duplicates ---"
debug += "<br />colls_out : %s" % colls_out
debug += "<br />"
# remove the hosted collections from the collections to be searched
if hosted_colls_out:
for coll in hosted_colls_out:
try:
colls_out.remove(coll)
except ValueError:
# in case coll was not found in colls_out
pass
if verbose:
debug += "<br />9) --- remove the hosted_colls from the colls_out ---"
debug += "<br />colls_out : %s" % colls_out
return (cc, colls_out_for_display, colls_out, hosted_colls_out, debug)
def wash_index_term(term, max_char_length=50, lower_term=True):
"""
Return washed form of the index term TERM that would be suitable
for storing into idxWORD* tables. I.e., lower the TERM if
LOWER_TERM is True, and truncate it safely to MAX_CHAR_LENGTH
UTF-8 characters (meaning, in principle, 4*MAX_CHAR_LENGTH bytes).
The function works by an internal conversion of TERM, when needed,
from its input Python UTF-8 binary string format into Python
Unicode format, and then truncating it safely to the given number
of UTF-8 characters, without possible mis-truncation in the middle
of a multi-byte UTF-8 character that could otherwise happen if we
would have been working with UTF-8 binary representation directly.
Note that MAX_CHAR_LENGTH corresponds to the length of the term
column in idxINDEX* tables.
"""
if lower_term:
washed_term = unicode(term, 'utf-8').lower()
else:
washed_term = unicode(term, 'utf-8')
if len(washed_term) <= max_char_length:
# no need to truncate the term, because it will fit
# nicely even if it uses four-byte UTF-8 characters
return washed_term.encode('utf-8')
else:
# truncate the term in a safe position:
return washed_term[:max_char_length].encode('utf-8')
def lower_index_term(term):
"""
Return safely lowered index term TERM. This is done by converting
to UTF-8 first, because standard Python lower() function is not
UTF-8 safe. To be called by both the search engine and the
indexer when appropriate (e.g. before stemming).
In case of problems with UTF-8 compliance, this function raises
UnicodeDecodeError, so the client code may want to catch it.
"""
return unicode(term, 'utf-8').lower().encode('utf-8')
def get_synonym_terms(term, kbr_name, match_type):
"""
Return list of synonyms for TERM by looking in KBR_NAME in
MATCH_TYPE style.
@param term: search-time term or index-time term
@type term: str
@param kbr_name: knowledge base name
@type kbr_name: str
@param match_type: specifies how the term matches against the KBR
before doing the lookup. Could be `exact' (default),
'leading_to_comma', `leading_to_number'.
@type match_type: str
@return: list of term synonyms
@rtype: list of strings
"""
dterms = {}
## exact match is default:
term_for_lookup = term
term_remainder = ''
## but maybe match different term:
if match_type == 'leading_to_comma':
mmm = re.match(r'^(.*?)(\s*,.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
elif match_type == 'leading_to_number':
mmm = re.match(r'^(.*?)(\s*\d.*)$', term)
if mmm:
term_for_lookup = mmm.group(1)
term_remainder = mmm.group(2)
## FIXME: workaround: escaping SQL wild-card signs, since KBR's
## exact search is doing LIKE query, so would match everything:
term_for_lookup = term_for_lookup.replace('%', '\%')
## OK, now find synonyms:
for kbr_values in get_kbr_values(kbr_name,
searchkey=term_for_lookup,
searchtype='e'):
for kbr_value in kbr_values:
dterms[kbr_value + term_remainder] = 1
## return list of term synonyms:
return dterms.keys()
def wash_output_format(format):
"""Wash output format FORMAT. Currently only prevents input like
'of=9' for backwards-compatible format that prints certain fields
only. (for this task, 'of=tm' is preferred)"""
if str(format[0:3]).isdigit() and len(format) != 6:
# asked to print MARC tags, but not enough digits,
# so let's switch back to HTML brief default
return 'hb'
else:
return format
def wash_pattern(p):
"""Wash pattern passed by URL. Check for sanity of the wildcard by
removing wildcards if they are appended to extremely short words
(1-3 letters). TODO: instead of this approximative treatment, it
will be much better to introduce a temporal limit, e.g. to kill a
query if it does not finish in 10 seconds."""
# strip accents:
# p = strip_accents(p) # FIXME: when available, strip accents all the time
# add leading/trailing whitespace for the two following wildcard-sanity checking regexps:
p = " " + p + " "
# replace spaces within quotes by __SPACE__ temporarily:
p = re_pattern_single_quotes.sub(lambda x: "'"+string.replace(x.group(1), ' ', '__SPACE__')+"'", p)
p = re_pattern_double_quotes.sub(lambda x: "\""+string.replace(x.group(1), ' ', '__SPACE__')+"\"", p)
p = re_pattern_regexp_quotes.sub(lambda x: "/"+string.replace(x.group(1), ' ', '__SPACE__')+"/", p)
# get rid of unquoted wildcards after spaces:
p = re_pattern_wildcards_after_spaces.sub("\\1", p)
# get rid of extremely short words (1-3 letters with wildcards):
#p = re_pattern_short_words.sub("\\1", p)
# replace back __SPACE__ by spaces:
p = re_pattern_space.sub(" ", p)
# replace special terms:
p = re_pattern_today.sub(time.strftime("%Y-%m-%d", time.localtime()), p)
# remove unnecessary whitespace:
p = string.strip(p)
# remove potentially wrong UTF-8 characters:
p = wash_for_utf8(p)
return p
def wash_field(f):
"""Wash field passed by URL."""
if f:
# get rid of unnecessary whitespace and make it lowercase
# (e.g. Author -> author) to better suit iPhone etc input
# mode:
f = f.strip().lower()
# wash legacy 'f' field names, e.g. replace 'wau' or `au' by
# 'author', if applicable:
if CFG_WEBSEARCH_FIELDS_CONVERT:
f = CFG_WEBSEARCH_FIELDS_CONVERT.get(f, f)
return f
def wash_dates(d1="", d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0):
"""
Take user-submitted date arguments D1 (full datetime string) or
(D1Y, D1M, D1Y) year, month, day tuple and D2 or (D2Y, D2M, D2Y)
and return (YYY1-M1-D2 H1:M1:S2, YYY2-M2-D2 H2:M2:S2) datetime
strings in the YYYY-MM-DD HH:MM:SS format suitable for time
restricted searching.
Note that when both D1 and (D1Y, D1M, D1D) parameters are present,
the precedence goes to D1. Ditto for D2*.
Note that when (D1Y, D1M, D1D) are taken into account, some values
may be missing and are completed e.g. to 01 or 12 according to
whether it is the starting or the ending date.
"""
datetext1, datetext2 = "", ""
# sanity checking:
if d1 == "" and d1y == 0 and d1m == 0 and d1d == 0 and d2 == "" and d2y == 0 and d2m == 0 and d2d == 0:
return ("", "") # nothing selected, so return empty values
# wash first (starting) date:
if d1:
# full datetime string takes precedence:
datetext1 = d1
else:
# okay, first date passed as (year,month,day):
if d1y:
datetext1 += "%04d" % d1y
else:
datetext1 += "0000"
if d1m:
datetext1 += "-%02d" % d1m
else:
datetext1 += "-01"
if d1d:
datetext1 += "-%02d" % d1d
else:
datetext1 += "-01"
datetext1 += " 00:00:00"
# wash second (ending) date:
if d2:
# full datetime string takes precedence:
datetext2 = d2
else:
# okay, second date passed as (year,month,day):
if d2y:
datetext2 += "%04d" % d2y
else:
datetext2 += "9999"
if d2m:
datetext2 += "-%02d" % d2m
else:
datetext2 += "-12"
if d2d:
datetext2 += "-%02d" % d2d
else:
datetext2 += "-31" # NOTE: perhaps we should add max(datenumber) in
# given month, but for our quering it's not
# needed, 31 will always do
datetext2 += " 00:00:00"
# okay, return constructed YYYY-MM-DD HH:MM:SS datetexts:
return (datetext1, datetext2)
def is_hosted_collection(coll):
"""Check if the given collection is a hosted one; i.e. its dbquery starts with hostedcollection:
Returns True if it is, False if it's not or if the result is empty or if the query failed"""
res = run_sql("SELECT dbquery FROM collection WHERE name=%s", (coll, ))
try:
return res[0][0].startswith("hostedcollection:")
except:
return False
def get_colID(c):
"Return collection ID for collection name C. Return None if no match found."
colID = None
res = run_sql("SELECT id FROM collection WHERE name=%s", (c,), 1)
if res:
colID = res[0][0]
return colID
def get_coll_normalised_name(c):
"""Returns normalised collection name (case sensitive) for collection name
C (case insensitive).
Returns None if no match found."""
try:
return run_sql("SELECT name FROM collection WHERE name=%s", (c,))[0][0]
except:
return None
def get_coll_ancestors(coll):
"Returns a list of ancestors for collection 'coll'."
coll_ancestors = []
coll_ancestor = coll
while 1:
res = run_sql("""SELECT c.name FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_dad
LEFT JOIN collection AS ccc ON ccc.id=cc.id_son
WHERE ccc.name=%s ORDER BY cc.id_dad ASC LIMIT 1""",
(coll_ancestor,))
if res:
coll_name = res[0][0]
coll_ancestors.append(coll_name)
coll_ancestor = coll_name
else:
break
# ancestors found, return reversed list:
coll_ancestors.reverse()
return coll_ancestors
def get_coll_sons(coll, type='r', public_only=1):
"""Return a list of sons (first-level descendants) of type 'type' for collection 'coll'.
If public_only, then return only non-restricted son collections.
"""
coll_sons = []
query = "SELECT c.name FROM collection AS c "\
"LEFT JOIN collection_collection AS cc ON c.id=cc.id_son "\
"LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad "\
"WHERE cc.type=%s AND ccc.name=%s"
query += " ORDER BY cc.score DESC"
res = run_sql(query, (type, coll))
for name in res:
if not public_only or not collection_restricted_p(name[0]):
coll_sons.append(name[0])
return coll_sons
def get_coll_real_descendants(coll, type='_', get_hosted_colls=True):
"""Return a list of all descendants of collection 'coll' that are defined by a 'dbquery'.
IOW, we need to decompose compound collections like "A & B" into "A" and "B" provided
that "A & B" has no associated database query defined.
"""
coll_sons = []
res = run_sql("""SELECT c.name,c.dbquery FROM collection AS c
LEFT JOIN collection_collection AS cc ON c.id=cc.id_son
LEFT JOIN collection AS ccc ON ccc.id=cc.id_dad
WHERE ccc.name=%s AND cc.type LIKE %s ORDER BY cc.score DESC""",
(coll, type,))
for name, dbquery in res:
if dbquery: # this is 'real' collection, so return it:
if get_hosted_colls:
coll_sons.append(name)
else:
if not dbquery.startswith("hostedcollection:"):
coll_sons.append(name)
else: # this is 'composed' collection, so recurse:
coll_sons.extend(get_coll_real_descendants(name))
return coll_sons
def browse_pattern(req, colls, p, f, rg, ln=CFG_SITE_LANG):
"""Browse either biliographic phrases or words indexes, and display it."""
# load the right message language
_ = gettext_set_language(ln)
## is p enclosed in quotes? (coming from exact search)
if p.startswith('"') and p.endswith('"'):
p = p[1:-1]
p_orig = p
## okay, "real browse" follows:
## FIXME: the maths in the get_nearest_terms_in_bibxxx is just a test
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
## do we search in words indexes?
if not f:
return browse_in_bibwords(req, p, f)
index_id = get_index_id_from_field(f)
if index_id != 0:
coll = intbitset()
for coll_name in colls:
coll |= get_collection_reclist(coll_name)
browsed_phrases_in_colls = get_nearest_terms_in_idxphrase_with_collection(p, index_id, rg/2, rg/2, coll)
else:
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
while not browsed_phrases:
# try again and again with shorter and shorter pattern:
try:
p = p[:-1]
browsed_phrases = get_nearest_terms_in_bibxxx(p, f, (rg+1)/2+1, (rg-1)/2+1)
except:
# probably there are no hits at all:
req.write(_("No values found."))
return
## try to check hits in these particular collection selection:
browsed_phrases_in_colls = []
if 0:
for phrase in browsed_phrases:
phrase_hitset = intbitset()
phrase_hitsets = search_pattern("", phrase, f, 'e')
for coll in colls:
phrase_hitset.union_update(phrase_hitsets[coll])
if len(phrase_hitset) > 0:
# okay, this phrase has some hits in colls, so add it:
browsed_phrases_in_colls.append([phrase, len(phrase_hitset)])
## were there hits in collections?
if browsed_phrases_in_colls == []:
if browsed_phrases != []:
#print_warning(req, """<p>No match close to <em>%s</em> found in given collections.
#Please try different term.<p>Displaying matches in any collection...""" % p_orig)
## try to get nbhits for these phrases in any collection:
for phrase in browsed_phrases:
browsed_phrases_in_colls.append([phrase, get_nbhits_in_bibxxx(phrase, f)])
## display results now:
out = websearch_templates.tmpl_browse_pattern(
f=f,
fn=get_field_i18nname(get_field_name(f) or f, ln, False),
ln=ln,
browsed_phrases_in_colls=browsed_phrases_in_colls,
colls=colls,
rg=rg,
)
req.write(out)
return
def browse_in_bibwords(req, p, f, ln=CFG_SITE_LANG):
"""Browse inside words indexes."""
if not p:
return
_ = gettext_set_language(ln)
urlargd = {}
urlargd.update(req.argd)
urlargd['action'] = 'search'
nearest_box = create_nearest_terms_box(urlargd, p, f, 'w', ln=ln, intro_text_p=0)
req.write(websearch_templates.tmpl_search_in_bibwords(
p = p,
f = f,
ln = ln,
nearest_box = nearest_box
))
return
def search_pattern(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' within field 'f' according to
matching type 'm'. Return hitset of recIDs.
The function uses multi-stage searching algorithm in case of no
exact match found. See the Search Internals document for
detailed description.
The 'ap' argument governs whether an alternative patterns are to
be used in case there is no direct hit for (p,f,m). For
example, whether to replace non-alphanumeric characters by
spaces if it would give some hits. See the Search Internals
document for detailed description. (ap=0 forbits the
alternative pattern usage, ap=1 permits it.)
The 'of' argument governs whether to print or not some
information to the user in case of no match found. (Usually it
prints the information in case of HTML formats, otherwise it's
silent).
The 'verbose' argument controls the level of debugging information
to be printed (0=least, 9=most).
All the parameters are assumed to have been previously washed.
This function is suitable as a mid-level API.
"""
_ = gettext_set_language(ln)
hitset_empty = intbitset()
# sanity check:
if not p:
hitset_full = intbitset(trailing_bits=1)
hitset_full.discard(0)
# no pattern, so return all universe
return hitset_full
# search stage 1: break up arguments into basic search units:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units = create_basic_search_units(req, p, f, m, of)
if verbose and of.startswith("h"):
t2 = os.times()[4]
print_warning(req, "Search stage 1: basic search units are: %s" % cgi.escape(repr(basic_search_units)))
print_warning(req, "Search stage 1: execution took %.2f seconds." % (t2 - t1))
# search stage 2: do search for each search unit and verify hit presence:
if verbose and of.startswith("h"):
t1 = os.times()[4]
basic_search_units_hitsets = []
#prepare hiddenfield-related..
myhiddens = CFG_BIBFORMAT_HIDDEN_TAGS
can_see_hidden = False
if req:
user_info = collect_user_info(req)
can_see_hidden = (acc_authorize_action(user_info, 'runbibedit')[0] == 0)
if can_see_hidden:
myhiddens = []
if CFG_INSPIRE_SITE and of.startswith('h'):
# fulltext/caption search warnings for INSPIRE:
fields_to_be_searched = [f for o,p,f,m in basic_search_units]
if 'fulltext' in fields_to_be_searched:
print_warning(req, _("Warning: full-text search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") % \
{'x_range_from_year': '2006',
'x_range_to_year': '2012'})
elif 'caption' in fields_to_be_searched:
print_warning(req, _("Warning: figure caption search is only available for a subset of papers mostly from %(x_range_from_year)s-%(x_range_to_year)s.") % \
{'x_range_from_year': '2008',
'x_range_to_year': '2012'})
for idx_unit in xrange(len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_f and len(bsu_f) < 2:
if of.startswith("h"):
print_warning(req, _("There is no index %s. Searching for %s in all fields." % (bsu_f, bsu_p)))
bsu_f = ''
bsu_m = 'w'
if of.startswith("h") and verbose:
print_warning(req, _('Instead searching %s.' % str([bsu_o, bsu_p, bsu_f, bsu_m])))
try:
basic_search_unit_hitset = search_unit(bsu_p, bsu_f, bsu_m, wl)
except InvenioWebSearchWildcardLimitError, excp:
basic_search_unit_hitset = excp.res
if of.startswith("h"):
print_warning(req, _("Search term too generic, displaying only partial results..."))
# FIXME: print warning if we use native full-text indexing
if bsu_f == 'fulltext' and bsu_m != 'w' and of.startswith('h') and not CFG_SOLR_URL:
print_warning(req, _("No phrase index available for fulltext yet, looking for word combination..."))
#check that the user is allowed to search with this tag
#if he/she tries it
if bsu_f and len(bsu_f) > 1 and bsu_f[0].isdigit() and bsu_f[1].isdigit():
for htag in myhiddens:
ltag = len(htag)
samelenfield = bsu_f[0:ltag]
if samelenfield == htag: #user searches by a hidden tag
#we won't show you anything..
basic_search_unit_hitset = intbitset()
if verbose >= 9 and of.startswith("h"):
print_warning(req, "Pattern %s hitlist omitted since \
it queries in a hidden tag %s" %
(repr(bsu_p), repr(myhiddens)))
display_nearest_terms_box=False #..and stop spying, too.
if verbose >= 9 and of.startswith("h"):
print_warning(req, "Search stage 1: pattern %s gave hitlist %s" % (cgi.escape(bsu_p), basic_search_unit_hitset))
if len(basic_search_unit_hitset) > 0 or \
ap==0 or \
bsu_o=="|" or \
((idx_unit+1)<len(basic_search_units) and basic_search_units[idx_unit+1][0]=="|"):
# stage 2-1: this basic search unit is retained, since
# either the hitset is non-empty, or the approximate
# pattern treatment is switched off, or the search unit
# was joined by an OR operator to preceding/following
# units so we do not require that it exists
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-2: no hits found for this search unit, try to replace non-alphanumeric chars inside pattern:
if re.search(r'[^a-zA-Z0-9\s\:]', bsu_p) and bsu_f != 'refersto' and bsu_f != 'citedby':
if bsu_p.startswith('"') and bsu_p.endswith('"'): # is it ACC query?
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', "*", bsu_p)
else: # it is WRD query
bsu_pn = re.sub(r'[^a-zA-Z0-9\s\:]+', " ", bsu_p)
if verbose and of.startswith('h') and req:
print_warning(req, "Trying (%s,%s,%s)" % (cgi.escape(bsu_pn), cgi.escape(bsu_f), cgi.escape(bsu_m)))
basic_search_unit_hitset = search_pattern(req=None, p=bsu_pn, f=bsu_f, m=bsu_m, of="id", ln=ln, wl=wl)
if len(basic_search_unit_hitset) > 0:
# we retain the new unit instead
if of.startswith('h'):
print_warning(req, _("No exact match found for %(x_query1)s, using %(x_query2)s instead...") % \
{'x_query1': "<em>" + cgi.escape(bsu_p) + "</em>",
'x_query2': "<em>" + cgi.escape(bsu_pn) + "</em>"})
basic_search_units[idx_unit][1] = bsu_pn
basic_search_units_hitsets.append(basic_search_unit_hitset)
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
print_warning(req, _("Requested record does not seem to exist."))
else:
print_warning(req, create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln))
return hitset_empty
else:
# stage 2-3: no hits found either, propose nearest indexed terms:
if of.startswith('h') and display_nearest_terms_box:
if req:
if bsu_f == "recid":
print_warning(req, _("Requested record does not seem to exist."))
else:
print_warning(req, create_nearest_terms_box(req.argd, bsu_p, bsu_f, bsu_m, ln=ln))
return hitset_empty
if verbose and of.startswith("h"):
t2 = os.times()[4]
for idx_unit in range(0, len(basic_search_units)):
print_warning(req, "Search stage 2: basic search unit %s gave %d hits." %
(basic_search_units[idx_unit][1:], len(basic_search_units_hitsets[idx_unit])))
print_warning(req, "Search stage 2: execution took %.2f seconds." % (t2 - t1))
# search stage 3: apply boolean query for each search unit:
if verbose and of.startswith("h"):
t1 = os.times()[4]
# let the initial set be the complete universe:
hitset_in_any_collection = intbitset(trailing_bits=1)
hitset_in_any_collection.discard(0)
for idx_unit in xrange(len(basic_search_units)):
this_unit_operation = basic_search_units[idx_unit][0]
this_unit_hitset = basic_search_units_hitsets[idx_unit]
if this_unit_operation == '+':
hitset_in_any_collection.intersection_update(this_unit_hitset)
elif this_unit_operation == '-':
hitset_in_any_collection.difference_update(this_unit_hitset)
elif this_unit_operation == '|':
hitset_in_any_collection.union_update(this_unit_hitset)
else:
if of.startswith("h"):
print_warning(req, "Invalid set operation %s." % cgi.escape(this_unit_operation), "Error")
if len(hitset_in_any_collection) == 0:
# no hits found, propose alternative boolean query:
if of.startswith('h') and display_nearest_terms_box:
nearestterms = []
for idx_unit in range(0, len(basic_search_units)):
bsu_o, bsu_p, bsu_f, bsu_m = basic_search_units[idx_unit]
if bsu_p.startswith("%") and bsu_p.endswith("%"):
bsu_p = "'" + bsu_p[1:-1] + "'"
bsu_nbhits = len(basic_search_units_hitsets[idx_unit])
# create a similar query, but with the basic search unit only
argd = {}
argd.update(req.argd)
argd['p'] = bsu_p
argd['f'] = bsu_f
nearestterms.append((bsu_p, bsu_nbhits, argd))
text = websearch_templates.tmpl_search_no_boolean_hits(
ln=ln, nearestterms=nearestterms)
print_warning(req, text)
if verbose and of.startswith("h"):
t2 = os.times()[4]
print_warning(req, "Search stage 3: boolean query gave %d hits." % len(hitset_in_any_collection))
print_warning(req, "Search stage 3: execution took %.2f seconds." % (t2 - t1))
return hitset_in_any_collection
def search_pattern_parenthesised(req=None, p=None, f=None, m=None, ap=0, of="id", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True, wl=0):
"""Search for complex pattern 'p' containing parenthesis within field 'f' according to
matching type 'm'. Return hitset of recIDs.
For more details on the parameters see 'search_pattern'
"""
_ = gettext_set_language(ln)
spires_syntax_converter = SpiresToInvenioSyntaxConverter()
spires_syntax_query = False
# if the pattern uses SPIRES search syntax, convert it to Invenio syntax
if spires_syntax_converter.is_applicable(p):
spires_syntax_query = True
p = spires_syntax_converter.convert_query(p)
# sanity check: do not call parenthesised parser for search terms
# like U(1):
if not re_pattern_parens.search(p):
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# Try searching with parentheses
try:
parser = SearchQueryParenthesisedParser()
# get a hitset with all recids
result_hitset = intbitset(trailing_bits=1)
# parse the query. The result is list of [op1, expr1, op2, expr2, ..., opN, exprN]
parsing_result = parser.parse_query(p)
if verbose and of.startswith("h"):
print_warning(req, "Search stage 1: search_pattern_parenthesised() searched %s." % repr(p))
print_warning(req, "Search stage 1: search_pattern_parenthesised() returned %s." % repr(parsing_result))
# go through every pattern
# calculate hitset for it
# combine pattern's hitset with the result using the corresponding operator
for index in xrange(0, len(parsing_result)-1, 2 ):
current_operator = parsing_result[index]
current_pattern = parsing_result[index+1]
if CFG_INSPIRE_SITE and spires_syntax_query:
# setting ap=0 to turn off approximate matching for 0 results.
# Doesn't work well in combinations.
# FIXME: The right fix involves collecting statuses for each
# hitset, then showing a nearest terms box exactly once,
# outside this loop.
ap = 0
display_nearest_terms_box=False
# obtain a hitset for the current pattern
current_hitset = search_pattern(req, current_pattern, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
# combine the current hitset with resulting hitset using the current operator
if current_operator == '+':
result_hitset = result_hitset & current_hitset
elif current_operator == '-':
result_hitset = result_hitset - current_hitset
elif current_operator == '|':
result_hitset = result_hitset | current_hitset
else:
assert False, "Unknown operator in search_pattern_parenthesised()"
return result_hitset
# If searching with parenteses fails, perform search ignoring parentheses
except SyntaxError:
print_warning(req, _("Search syntax misunderstood. Ignoring all parentheses in the query. If this doesn't help, please check your search and try again."))
# remove the parentheses in the query. Current implementation removes all the parentheses,
# but it could be improved to romove only these that are not inside quotes
p = p.replace('(', ' ')
p = p.replace(')', ' ')
return search_pattern(req, p, f, m, ap, of, verbose, ln, display_nearest_terms_box=display_nearest_terms_box, wl=wl)
def search_unit(p, f=None, m=None, wl=0):
"""Search for basic search unit defined by pattern 'p' and field
'f' and matching type 'm'. Return hitset of recIDs.
All the parameters are assumed to have been previously washed.
'p' is assumed to be already a ``basic search unit'' so that it
is searched as such and is not broken up in any way. Only
wildcard and span queries are being detected inside 'p'.
If CFG_WEBSEARCH_SYNONYM_KBRS is set and we are searching in
one of the indexes that has defined runtime synonym knowledge
base, then look up there and automatically enrich search
results with results for synonyms.
In case the wildcard limit (wl) is greater than 0 and this limit
is reached an InvenioWebSearchWildcardLimitError will be raised.
In case you want to call this function with no limit for the
wildcard queries, wl should be 0.
This function is suitable as a low-level API.
"""
## create empty output results set:
hitset = intbitset()
if not p: # sanity checking
return hitset
## eventually look up runtime synonyms:
hitset_synonyms = intbitset()
if CFG_WEBSEARCH_SYNONYM_KBRS.has_key(f):
for p_synonym in get_synonym_terms(p,
CFG_WEBSEARCH_SYNONYM_KBRS[f][0],
CFG_WEBSEARCH_SYNONYM_KBRS[f][1]):
if p_synonym != p:
hitset_synonyms |= search_unit(p_synonym, f, m, wl)
## look up hits:
if CFG_SOLR_URL and f == 'fulltext':
# redirect to Solr/Lucene
try:
return search_unit_in_solr(p, f, m)
except:
# There were troubles with getting full-text search
# results from Solr. Let us alert the admin of these
# problems and let us simply return empty results to the
# end user.
register_exception(alert_admin=True)
return hitset
if f == 'datecreated':
hitset = search_unit_in_bibrec(p, p, 'c')
elif f == 'datemodified':
hitset = search_unit_in_bibrec(p, p, 'm')
elif f == 'refersto':
# we are doing search by the citation count
hitset = search_unit_refersto(p)
elif f == 'citedby':
# we are doing search by the citation count
hitset = search_unit_citedby(p)
elif m == 'a' or m == 'r':
# we are doing either phrase search or regexp search
if f == 'fulltext':
# FIXME: workaround for not having phrase index yet
return search_pattern(None, p, f, 'w')
index_id = get_index_id_from_field(f)
if index_id != 0:
hitset = search_unit_in_idxphrases(p, f, m, wl)
else:
hitset = search_unit_in_bibxxx(p, f, m, wl)
elif p.startswith("cited:"):
# we are doing search by the citation count
hitset = search_unit_by_times_cited(p[6:])
else:
# we are doing bibwords search by default
hitset = search_unit_in_bibwords(p, f, m, wl=wl)
## merge synonym results and return total:
hitset |= hitset_synonyms
return hitset
def search_unit_in_bibwords(word, f, m=None, decompress=zlib.decompress, wl=0):
"""Searches for 'word' inside bibwordsX table for field 'f' and returns hitset of recIDs."""
set = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
# if no field is specified, search in the global index.
f = f or 'anyfield'
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
stemming_language = get_index_stemming_language(index_id)
else:
return intbitset() # word index f does not exist
# wash 'word' argument and run query:
if f == 'authorcount' and word.endswith('+'):
# field count query of the form N+ so transform N+ to N->99999:
word = word[:-1] + '->99999'
word = string.replace(word, '*', '%') # we now use '*' as the truncation character
words = string.split(word, "->", 1) # check for span query
if len(words) == 2:
word0 = re_word.sub('', words[0])
word1 = re_word.sub('', words[1])
if stemming_language:
word0 = lower_index_term(word0)
word1 = lower_index_term(word1)
word0 = stem(word0, stemming_language)
word1 = stem(word1, stemming_language)
word0_washed = wash_index_term(word0)
word1_washed = wash_index_term(word1)
if f == 'authorcount':
# field count query; convert to integers in order
# to have numerical behaviour for 'BETWEEN n1 AND n2' query
try:
word0_washed = int(word0_washed)
word1_washed = int(word1_washed)
except ValueError:
pass
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term BETWEEN %%s AND %%s" % bibwordsX,
(word0_washed, word1_washed), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
if f == 'journal':
pass # FIXME: quick hack for the journal index
else:
word = re_word.sub('', word)
if stemming_language:
word = lower_index_term(word)
word = stem(word, stemming_language)
if string.find(word, '%') >= 0: # do we have wildcard in the word?
if f == 'journal':
# FIXME: quick hack for the journal index
# FIXME: we can run a sanity check here for all indexes
res = ()
else:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term LIKE %%s" % bibwordsX,
(wash_index_term(word),), wildcard_limit = wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term=%%s" % bibwordsX,
(wash_index_term(word),))
# fill the result set:
for word, hitlist in res:
hitset_bibwrd = intbitset(hitlist)
# add the results:
if set_used:
set.union_update(hitset_bibwrd)
else:
set = hitset_bibwrd
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
# okay, return result set:
return set
def search_unit_in_idxphrases(p, f, type, wl=0):
"""Searches for phrase 'p' inside idxPHRASE*F table for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'authorcount':
return search_unit_in_bibwords(p, f, wl=wl)
set = intbitset() # will hold output result set
set_used = 0 # not-yet-used flag, to be able to circumvent set operations
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
# deduce in which idxPHRASE table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return intbitset() # phrase index f does not exist
# detect query type (exact phrase, partial phrase, regexp):
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# special washing for fuzzy author index:
if f in ('author', 'firstauthor', 'exactauthor', 'exactfirstauthor'):
query_params_washed = ()
for query_param in query_params:
query_params_washed += (wash_author_name(query_param),)
query_params = query_params_washed
# perform search:
if use_query_limit:
try:
res = run_sql_with_limit("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons),
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT term,hitlist FROM %s WHERE term %s" % (idxphraseX, query_addons), query_params)
# fill the result set:
for word, hitlist in res:
hitset_bibphrase = intbitset(hitlist)
# add the results:
if set_used:
set.union_update(hitset_bibphrase)
else:
set = hitset_bibphrase
set_used = 1
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
# okay, return result set:
return set
def search_unit_in_bibxxx(p, f, type, wl=0):
"""Searches for pattern 'p' inside bibxxx tables for field 'f' and returns hitset of recIDs found.
The search type is defined by 'type' (e.g. equals to 'r' for a regexp search)."""
# call word search method in some cases:
if f == 'journal' or f == 'authorcount':
return search_unit_in_bibwords(p, f, wl=wl)
p_orig = p # saving for eventual future 'no match' reporting
limit_reached = 0 # flag for knowing if the query limit has been reached
use_query_limit = False # flag for knowing if to limit the query results or not
query_addons = "" # will hold additional SQL code for the query
query_params = () # will hold parameters for the query (their number may vary depending on TYPE argument)
# wash arguments:
f = string.replace(f, '*', '%') # replace truncation char '*' in field definition
if type == 'r':
query_addons = "REGEXP %s"
query_params = (p,)
use_query_limit = True
else:
p = string.replace(p, '*', '%') # we now use '*' as the truncation character
ps = string.split(p, "->", 1) # check for span query:
if len(ps) == 2 and not (ps[0].endswith(' ') or ps[1].startswith(' ')):
query_addons = "BETWEEN %s AND %s"
query_params = (ps[0], ps[1])
use_query_limit = True
else:
if string.find(p, '%') > -1:
query_addons = "LIKE %s"
query_params = (p,)
use_query_limit = True
else:
query_addons = "= %s"
query_params = (p,)
# construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if len(f) >= 2 and str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
if not tl:
# f index does not exist, nevermind
pass
# okay, start search:
l = [] # will hold list of recID that matched
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# construct and run query:
if t == "001":
if query_addons.find('BETWEEN') > -1 or query_addons.find('=') > -1:
# verify that the params are integers (to avoid returning record 123 when searching for 123foo)
try:
query_params = tuple(int(param) for param in query_params)
except ValueError:
return intbitset()
if use_query_limit:
try:
res = run_sql_with_limit("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql("SELECT id FROM bibrec WHERE id %s" % query_addons,
query_params)
else:
query = "SELECT bibx.id_bibrec FROM %s AS bx LEFT JOIN %s AS bibx ON bx.id=bibx.id_bibxxx WHERE bx.value %s" % \
(bx, bibx, query_addons)
if len(t) != 6 or t[-1:]=='%':
# wildcard query, or only the beginning of field 't'
# is defined, so add wildcard character:
query += " AND bx.tag LIKE %s"
query_params_and_tag = query_params + (t + '%',)
else:
# exact query for 't':
query += " AND bx.tag=%s"
query_params_and_tag = query_params + (t,)
if use_query_limit:
try:
res = run_sql_with_limit(query, query_params_and_tag, wildcard_limit=wl)
except InvenioDbQueryWildcardLimitError, excp:
res = excp.res
limit_reached = 1 # set the limit reached flag to true
else:
res = run_sql(query, query_params_and_tag)
# fill the result set:
for id_bibrec in res:
if id_bibrec[0]:
l.append(id_bibrec[0])
# check no of hits found:
nb_hits = len(l)
# okay, return result set:
set = intbitset(l)
#check to see if the query limit was reached
if limit_reached:
#raise an exception, so we can print a nice message to the user
raise InvenioWebSearchWildcardLimitError(set)
return set
def search_unit_in_solr(p, f=None, m=None):
"""
Query the Solr full-text index and return an intbitset corresponding
to the result. Parameters (p,f,m) are usual search unit ones.
"""
if m and (m == 'a' or m == 'r'): # phrase/regexp query
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
p = '"' + p + '"'
return solr_get_bitset(p, CFG_SOLR_URL)
def search_unit_in_bibrec(datetext1, datetext2, type='c'):
"""
Return hitset of recIDs found that were either created or modified
(according to 'type' arg being 'c' or 'm') from datetext1 until datetext2, inclusive.
Does not pay attention to pattern, collection, anything. Useful
to intersect later on with the 'real' query.
"""
set = intbitset()
if type.startswith("m"):
type = "modification_date"
else:
type = "creation_date" # by default we are searching for creation dates
parts = datetext1.split('->')
if len(parts) > 1 and datetext1 == datetext2:
datetext1 = parts[0]
datetext2 = parts[1]
if datetext1 == datetext2:
res = run_sql("SELECT id FROM bibrec WHERE %s LIKE %%s" % (type,),
(datetext1 + '%',))
else:
res = run_sql("SELECT id FROM bibrec WHERE %s>=%%s AND %s<=%%s" % (type, type),
(datetext1, datetext2))
for row in res:
set += row[0]
return set
def search_unit_by_times_cited(p):
"""
Return histset of recIDs found that are cited P times.
Usually P looks like '10->23'.
"""
numstr = '"'+p+'"'
#this is sort of stupid but since we may need to
#get the records that do _not_ have cites, we have to
#know the ids of all records, too
#but this is needed only if bsu_p is 0 or 0 or 0->0
allrecs = []
if p == 0 or p == "0" or \
p.startswith("0->") or p.endswith("->0"):
allrecs = intbitset(run_sql("SELECT id FROM bibrec"))
return get_records_with_num_cites(numstr, allrecs)
def search_unit_refersto(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records referred to by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_refersto_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def search_unit_citedby(query):
"""
Search for records satisfying the query (e.g. author:ellis) and
return list of records cited by these records.
"""
if query:
ahitset = search_pattern(p=query)
if ahitset:
return get_citedby_hitset(ahitset)
else:
return intbitset([])
else:
return intbitset([])
def intersect_results_with_collrecs(req, hitset_in_any_collection, colls, ap=0, of="hb", verbose=0, ln=CFG_SITE_LANG, display_nearest_terms_box=True):
"""Return dict of hitsets given by intersection of hitset with the collection universes."""
_ = gettext_set_language(ln)
# search stage 4: intersect with the collection universe:
if verbose and of.startswith("h"):
t1 = os.times()[4]
results = {}
results_nbhits = 0
for coll in colls:
results[coll] = hitset_in_any_collection & get_collection_reclist(coll)
results_nbhits += len(results[coll])
if results_nbhits == 0:
# no hits found, try to search in Home:
results_in_Home = hitset_in_any_collection & get_collection_reclist(CFG_SITE_NAME)
if len(results_in_Home) > 0:
# some hits found in Home, so propose this search:
if of.startswith("h") and display_nearest_terms_box:
url = websearch_templates.build_search_url(req.argd, cc=CFG_SITE_NAME, c=[])
print_warning(req, _("No match found in collection %(x_collection)s. Other public collections gave %(x_url_open)s%(x_nb_hits)d hits%(x_url_close)s.") %\
{'x_collection': '<em>' + string.join([get_coll_i18nname(coll, ln, False) for coll in colls], ', ') + '</em>',
'x_url_open': '<a class="nearestterms" href="%s">' % (url),
'x_nb_hits': len(results_in_Home),
'x_url_close': '</a>'})
results = {}
else:
# no hits found in Home, recommend different search terms:
if of.startswith("h") and display_nearest_terms_box:
print_warning(req, _("No public collection matched your query. "
"If you were looking for a non-public document, please choose "
"the desired restricted collection first."))
results = {}
if verbose and of.startswith("h"):
t2 = os.times()[4]
print_warning(req, "Search stage 4: intersecting with collection universe gave %d hits." % results_nbhits)
print_warning(req, "Search stage 4: execution took %.2f seconds." % (t2 - t1))
return results
def intersect_results_with_hitset(req, results, hitset, ap=0, aptext="", of="hb"):
"""Return intersection of search 'results' (a dict of hitsets
with collection as key) with the 'hitset', i.e. apply
'hitset' intersection to each collection within search
'results'.
If the final 'results' set is to be empty, and 'ap'
(approximate pattern) is true, and then print the `warningtext'
and return the original 'results' set unchanged. If 'ap' is
false, then return empty results set.
"""
if ap:
results_ap = copy.deepcopy(results)
else:
results_ap = {} # will return empty dict in case of no hits found
nb_total = 0
for coll in results.keys():
results[coll].intersection_update(hitset)
nb_total += len(results[coll])
if nb_total == 0:
if of.startswith("h"):
print_warning(req, aptext)
results = results_ap
return results
def create_similarly_named_authors_link_box(author_name, ln=CFG_SITE_LANG):
"""Return a box similar to ``Not satisfied...'' one by proposing
author searches for similar names. Namely, take AUTHOR_NAME
and the first initial of the firstame (after comma) and look
into author index whether authors with e.g. middle names exist.
Useful mainly for CERN Library that sometimes contains name
forms like Ellis-N, Ellis-Nick, Ellis-Nicolas all denoting the
same person. The box isn't proposed if no similarly named
authors are found to exist.
"""
# return nothing if not configured:
if CFG_WEBSEARCH_CREATE_SIMILARLY_NAMED_AUTHORS_LINK_BOX == 0:
return ""
# return empty box if there is no initial:
if re.match(r'[^ ,]+, [^ ]', author_name) is None:
return ""
# firstly find name comma initial:
author_name_to_search = re.sub(r'^([^ ,]+, +[^ ,]).*$', '\\1', author_name)
# secondly search for similar name forms:
similar_author_names = {}
for name in author_name_to_search, strip_accents(author_name_to_search):
for tag in get_field_tags("author"):
# deduce into which bibxxx table we will search:
digit1, digit2 = int(tag[0]), int(tag[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(tag) != 6 or tag[-1:]=='%':
# only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag LIKE %%s""" % bx,
(name + "%", tag + "%"))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value LIKE %%s AND bx.tag=%%s""" % bx,
(name + "%", tag))
for row in res:
similar_author_names[row[0]] = 1
# remove the original name and sort the list:
try:
del similar_author_names[author_name]
except KeyError:
pass
# thirdly print the box:
out = ""
if similar_author_names:
out_authors = similar_author_names.keys()
out_authors.sort()
tmp_authors = []
for out_author in out_authors:
nbhits = get_nbhits_in_bibxxx(out_author, "author")
if nbhits:
tmp_authors.append((out_author, nbhits))
out += websearch_templates.tmpl_similar_author_names(
authors=tmp_authors, ln=ln)
return out
def create_nearest_terms_box(urlargd, p, f, t='w', n=5, ln=CFG_SITE_LANG, intro_text_p=True):
"""Return text box containing list of 'n' nearest terms above/below 'p'
for the field 'f' for matching type 't' (words/phrases) in
language 'ln'.
Propose new searches according to `urlargs' with the new words.
If `intro_text_p' is true, then display the introductory message,
otherwise print only the nearest terms in the box content.
"""
# load the right message language
_ = gettext_set_language(ln)
if not CFG_WEBSEARCH_DISPLAY_NEAREST_TERMS:
return _("Your search did not match any records. Please try again.")
nearest_terms = []
if not p: # sanity check
p = "."
if p.startswith('%') and p.endswith('%'):
p = p[1:-1] # fix for partial phrase
index_id = get_index_id_from_field(f)
if f == 'fulltext':
if CFG_SOLR_URL:
return _("No match found, please enter different search terms.")
else:
# FIXME: workaround for not having native phrase index yet
t = 'w'
# special indexes:
if f == 'refersto':
return _("There are no records referring to %s.") % cgi.escape(p)
if f == 'citedby':
return _("There are no records cited by %s.") % cgi.escape(p)
# look for nearest terms:
if t == 'w':
nearest_terms = get_nearest_terms_in_bibwords(p, f, n, n)
if not nearest_terms:
return _("No word index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
else:
nearest_terms = []
if index_id:
nearest_terms = get_nearest_terms_in_idxphrase(p, index_id, n, n)
if f == 'datecreated' or f == 'datemodified':
nearest_terms = get_nearest_terms_in_bibrec(p, f, n, n)
if not nearest_terms:
nearest_terms = get_nearest_terms_in_bibxxx(p, f, n, n)
if not nearest_terms:
return _("No phrase index is available for %s.") % \
('<em>' + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + '</em>')
terminfo = []
for term in nearest_terms:
if t == 'w':
hits = get_nbhits_in_bibwords(term, f)
else:
if index_id:
hits = get_nbhits_in_idxphrases(term, f)
elif f == 'datecreated' or f == 'datemodified':
hits = get_nbhits_in_bibrec(term, f)
else:
hits = get_nbhits_in_bibxxx(term, f)
argd = {}
argd.update(urlargd)
# check which fields contained the requested parameter, and replace it.
for (px, fx) in ('p', 'f'), ('p1', 'f1'), ('p2', 'f2'), ('p3', 'f3'):
if px in argd:
argd_px = argd[px]
if t == 'w':
# p was stripped of accents, to do the same:
argd_px = strip_accents(argd_px)
#argd[px] = string.replace(argd_px, p, term, 1)
#we need something similar, but case insensitive
pattern_index = string.find(argd_px.lower(), p.lower())
if pattern_index > -1:
argd[px] = argd_px[:pattern_index] + term + argd_px[pattern_index+len(p):]
break
#this is doing exactly the same as:
#argd[px] = re.sub('(?i)' + re.escape(p), term, argd_px, 1)
#but is ~4x faster (2us vs. 8.25us)
terminfo.append((term, hits, argd))
intro = ""
if intro_text_p: # add full leading introductory text
if f:
intro = _("Search term %(x_term)s inside index %(x_index)s did not match any record. Nearest terms in any collection are:") % \
{'x_term': "<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>",
'x_index': "<em>" + cgi.escape(get_field_i18nname(get_field_name(f) or f, ln, False)) + "</em>"}
else:
intro = _("Search term %s did not match any record. Nearest terms in any collection are:") % \
("<em>" + cgi.escape(p.startswith("%") and p.endswith("%") and p[1:-1] or p) + "</em>")
return websearch_templates.tmpl_nearest_term_box(p=p, ln=ln, f=f, terminfo=terminfo,
intro=intro)
def get_nearest_terms_in_bibwords(p, f, n_below, n_above):
"""Return list of +n -n nearest terms to word `p' in index for field `f'."""
nearest_words = [] # will hold the (sorted) list of nearest words to return
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return nearest_words
# firstly try to get `n' closest words above `p':
res = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % bibwordsX,
(p, n_above))
for row in res:
nearest_words.append(row[0])
nearest_words.reverse()
# secondly insert given word `p':
nearest_words.append(p)
# finally try to get `n' closest words below `p':
res = run_sql("SELECT term FROM %s WHERE term>%%s ORDER BY term ASC LIMIT %%s" % bibwordsX,
(p, n_below))
for row in res:
nearest_words.append(row[0])
return nearest_words
def get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
regardless of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
if CFG_INSPIRE_SITE and index_id in (3, 15): # FIXME: workaround due to new fuzzy index
return [p,]
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above))
res_above = map(lambda x: x[0], res_above)
res_above.reverse()
res_below = run_sql("SELECT term FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below))
res_below = map(lambda x: x[0], res_below)
return res_above + res_below
def get_nearest_terms_in_idxphrase_with_collection(p, index_id, n_below, n_above, collection):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field idxPHRASE table,
considering the collection (intbitset).
Return list of [(phrase1, hitset), (phrase2, hitset), ... , (phrase_n, hitset)]."""
idxphraseX = "idxPHRASE%02dF" % index_id
res_above = run_sql("SELECT term,hitlist FROM %s WHERE term<%%s ORDER BY term DESC LIMIT %%s" % idxphraseX, (p, n_above * 3))
res_above = [(term, intbitset(hitlist) & collection) for term, hitlist in res_above]
res_above = [(term, len(hitlist)) for term, hitlist in res_above if hitlist]
res_below = run_sql("SELECT term,hitlist FROM %s WHERE term>=%%s ORDER BY term ASC LIMIT %%s" % idxphraseX, (p, n_below * 3))
res_below = [(term, intbitset(hitlist) & collection) for term, hitlist in res_below]
res_below = [(term, len(hitlist)) for term, hitlist in res_below if hitlist]
res_above.reverse()
return res_above[-n_above:] + res_below[:n_below]
def get_nearest_terms_in_bibxxx(p, f, n_below, n_above):
"""Browse (-n_above, +n_below) closest bibliographic phrases
for the given pattern p in the given field f, regardless
of collection.
Return list of [phrase1, phrase2, ... , phrase_n]."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nearest_terms_in_bibwords(p, f, n_below, n_above)
## We are going to take max(n_below, n_above) as the number of
## values to ferch from bibXXx. This is needed to work around
## MySQL UTF-8 sorting troubles in 4.0.x. Proper solution is to
## use MySQL 4.1.x or our own idxPHRASE in the future.
index_id = get_index_id_from_field(f)
if index_id:
return get_nearest_terms_in_idxphrase(p, index_id, n_below, n_above)
n_fetch = 2*max(n_below, n_above)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
## start browsing to fetch list of hits:
browsed_phrases = {} # will hold {phrase1: 1, phrase2: 1, ..., phraseN: 1} dict of browsed phrases (to make them unique)
# always add self to the results set:
browsed_phrases[p.startswith("%") and p.endswith("%") and p[1:-1] or p] = 1
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
# firstly try to get `n' closest phrases above `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag LIKE %%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value<%%s AND bx.tag=%%s
ORDER BY bx.value DESC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# secondly try to get `n' closest phrases equal to or below `p':
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag LIKE %%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t + "%", n_fetch))
else:
res = run_sql("""SELECT bx.value FROM %s AS bx
WHERE bx.value>=%%s AND bx.tag=%%s
ORDER BY bx.value ASC LIMIT %%s""" % bx,
(p, t, n_fetch))
for row in res:
browsed_phrases[row[0]] = 1
# select first n words only: (this is needed as we were searching
# in many different tables and so aren't sure we have more than n
# words right; this of course won't be needed when we shall have
# one ACC table only for given field):
phrases_out = browsed_phrases.keys()
phrases_out.sort(lambda x, y: cmp(string.lower(strip_accents(x)),
string.lower(strip_accents(y))))
# find position of self:
try:
idx_p = phrases_out.index(p)
except:
idx_p = len(phrases_out)/2
# return n_above and n_below:
return phrases_out[max(0, idx_p-n_above):idx_p+n_below]
def get_nearest_terms_in_bibrec(p, f, n_below, n_above):
"""Return list of nearest terms and counts from bibrec table.
p is usually a date, and f either datecreated or datemodified.
Note: below/above count is very approximative, not really respected.
"""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res_above = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s < %%s
ORDER BY %s DESC LIMIT %%s""" % (col, col, col),
(p, n_above))
res_below = run_sql("""SELECT DATE_FORMAT(%s,'%%%%Y-%%%%m-%%%%d %%%%H:%%%%i:%%%%s')
FROM bibrec WHERE %s > %%s
ORDER BY %s ASC LIMIT %%s""" % (col, col, col),
(p, n_below))
out = set([])
for row in res_above:
out.add(row[0])
for row in res_below:
out.add(row[0])
out_list = list(out)
out_list.sort()
return list(out_list)
def get_nbhits_in_bibrec(term, f):
"""Return number of hits in bibrec table. term is usually a date,
and f is either 'datecreated' or 'datemodified'."""
col = 'creation_date'
if f == 'datemodified':
col = 'modification_date'
res = run_sql("SELECT COUNT(*) FROM bibrec WHERE %s LIKE %%s" % (col,),
(term + '%',))
return res[0][0]
def get_nbhits_in_bibwords(word, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
bibwordsX = "idxWORD%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
bibwordsX = "idxWORD%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % bibwordsX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_idxphrases(word, f):
"""Return number of hits for word 'word' inside phrase index for field 'f'."""
out = 0
# deduce into which bibwordsX table we will search:
idxphraseX = "idxPHRASE%02dF" % get_index_id_from_field("anyfield")
if f:
index_id = get_index_id_from_field(f)
if index_id:
idxphraseX = "idxPHRASE%02dF" % index_id
else:
return 0
if word:
res = run_sql("SELECT hitlist FROM %s WHERE term=%%s" % idxphraseX,
(word,))
for hitlist in res:
out += len(intbitset(hitlist[0]))
return out
def get_nbhits_in_bibxxx(p, f):
"""Return number of hits for word 'word' inside words index for field 'f'."""
## determine browse field:
if not f and string.find(p, ":") > 0: # does 'p' contain ':'?
f, p = string.split(p, ":", 1)
# FIXME: quick hack for the journal index
if f == 'journal':
return get_nbhits_in_bibwords(p, f)
## construct 'tl' which defines the tag list (MARC tags) to search in:
tl = []
if str(f[0]).isdigit() and str(f[1]).isdigit():
tl.append(f) # 'f' seems to be okay as it starts by two digits
else:
# deduce desired MARC tags on the basis of chosen 'f'
tl = get_field_tags(f)
# start searching:
recIDs = {} # will hold dict of {recID1: 1, recID2: 1, ..., } (unique recIDs, therefore)
for t in tl:
# deduce into which bibxxx table we will search:
digit1, digit2 = int(t[0]), int(t[1])
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
if len(t) != 6 or t[-1:]=='%': # only the beginning of field 't' is defined, so add wildcard character:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag LIKE %%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t + "%"))
else:
res = run_sql("""SELECT bibx.id_bibrec FROM %s AS bibx, %s AS bx
WHERE bx.value=%%s AND bx.tag=%%s
AND bibx.id_bibxxx=bx.id""" % (bibx, bx),
(p, t))
for row in res:
recIDs[row[0]] = 1
return len(recIDs)
def get_mysql_recid_from_aleph_sysno(sysno):
"""Returns DB's recID for ALEPH sysno passed in the argument (e.g. "002379334CER").
Returns None in case of failure."""
out = None
res = run_sql("""SELECT bb.id_bibrec FROM bibrec_bib97x AS bb, bib97x AS b
WHERE b.value=%s AND b.tag='970__a' AND bb.id_bibxxx=b.id""",
(sysno,))
if res:
out = res[0][0]
return out
def guess_primary_collection_of_a_record(recID):
"""Return primary collection name a record recid belongs to, by
testing 980 identifier.
May lead to bad guesses when a collection is defined dynamically
via dbquery.
In that case, return 'CFG_SITE_NAME'."""
out = CFG_SITE_NAME
dbcollids = get_fieldvalues(recID, "980__a")
for dbcollid in dbcollids:
variants = ("collection:" + dbcollid,
'collection:"' + dbcollid + '"',
"980__a:" + dbcollid,
'980__a:"' + dbcollid + '"',
'980:' + dbcollid ,
'980:"' + dbcollid + '"')
res = run_sql("SELECT name FROM collection WHERE dbquery IN (%s,%s,%s,%s,%s,%s)", variants)
if res:
out = res[0][0]
break
if CFG_CERN_SITE:
# dirty hack for ATLAS collections at CERN:
if out in ('ATLAS Communications', 'ATLAS Internal Notes'):
for alternative_collection in ('ATLAS Communications Physics',
'ATLAS Communications General',
'ATLAS Internal Notes Physics',
'ATLAS Internal Notes General',):
if recID in get_collection_reclist(alternative_collection):
return alternative_collection
# dirty hack for FP
FP_collections = {'DO': 'Current Price Enquiries',
'IT': 'Current Invitation for Tenders',
'MS': 'Current Market Surveys'}
fp_coll_ids = [coll for coll in dbcollids if coll in FP_collections]
for coll in fp_coll_ids:
if recID in get_collection_reclist(FP_collections[coll]):
return FP_collections[coll]
return out
_re_collection_url = re.compile('/collection/(.+)')
def guess_collection_of_a_record(recID, referer=None, recreate_cache_if_needed=True):
"""Return collection name a record recid belongs to, by first testing
the referer URL if provided and otherwise returning the
primary collection."""
if referer:
dummy, hostname, path, dummy, query, dummy = urlparse.urlparse(referer)
#requests can come from different invenio installations, with different collections
if CFG_SITE_URL.find(hostname) < 0:
return guess_primary_collection_of_a_record(recID)
g = _re_collection_url.match(path)
if g:
name = urllib.unquote_plus(g.group(1))
#check if this collection actually exist (also normalize the name if case-insensitive)
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name):
return name
elif path.startswith('/search'):
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
query = cgi.parse_qs(query)
for name in query.get('cc', []) + query.get('c', []):
name = get_coll_normalised_name(name)
if name and recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return name
return guess_primary_collection_of_a_record(recID)
def is_record_in_any_collection(recID, recreate_cache_if_needed=True):
"""Return True if the record belongs to at least one collection. This is a
good, although not perfect, indicator to guess if webcoll has already run
after this record has been entered into the system.
"""
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
return True
return False
def get_all_collections_of_a_record(recID, recreate_cache_if_needed=True):
"""Return all the collection names a record belongs to.
Note this function is O(n_collections)."""
ret = []
if recreate_cache_if_needed:
collection_reclist_cache.recreate_cache_if_needed()
for name in collection_reclist_cache.cache.keys():
if recID in get_collection_reclist(name, recreate_cache_if_needed=False):
ret.append(name)
return ret
def get_tag_name(tag_value, prolog="", epilog=""):
"""Return tag name from the known tag value, by looking up the 'tag' table.
Return empty string in case of failure.
Example: input='100__%', output=first author'."""
out = ""
res = run_sql("SELECT name FROM tag WHERE value=%s", (tag_value,))
if res:
out = prolog + res[0][0] + epilog
return out
def get_fieldcodes():
"""Returns a list of field codes that may have been passed as 'search options' in URL.
Example: output=['subject','division']."""
out = []
res = run_sql("SELECT DISTINCT(code) FROM field")
for row in res:
out.append(row[0])
return out
def get_field_name(code):
"""Return the corresponding field_name given the field code.
e.g. reportnumber -> report number."""
res = run_sql("SELECT name FROM field WHERE code=%s", (code, ))
if res:
return res[0][0]
else:
return ""
def get_field_tags(field):
"""Returns a list of MARC tags for the field code 'field'.
Returns empty list in case of error.
Example: field='author', output=['100__%','700__%']."""
out = []
query = """SELECT t.value FROM tag AS t, field_tag AS ft, field AS f
WHERE f.code=%s AND ft.id_field=f.id AND t.id=ft.id_tag
ORDER BY ft.score DESC"""
res = run_sql(query, (field, ))
for val in res:
out.append(val[0])
return out
def get_fieldvalues_alephseq_like(recID, tags_in, can_see_hidden=False):
"""Return buffer of ALEPH sequential-like textual format with fields found
in the list TAGS_IN for record RECID.
If can_see_hidden is True, just print everything. Otherwise hide fields
from CFG_BIBFORMAT_HIDDEN_TAGS.
"""
out = ""
if type(tags_in) is not list:
tags_in = [tags_in,]
if len(tags_in) == 1 and len(tags_in[0]) == 6:
## case A: one concrete subfield asked, so print its value if found
## (use with care: can mislead if field has multiple occurrences)
out += string.join(get_fieldvalues(recID, tags_in[0]),"\n")
else:
## case B: print our "text MARC" format; works safely all the time
# find out which tags to output:
dict_of_tags_out = {}
if not tags_in:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
else:
for tag in tags_in:
if len(tag) == 0:
for i in range(0, 10):
for j in range(0, 10):
dict_of_tags_out["%d%d%%" % (i, j)] = 1
elif len(tag) == 1:
for j in range(0, 10):
dict_of_tags_out["%s%d%%" % (tag, j)] = 1
elif len(tag) < 5:
dict_of_tags_out["%s%%" % tag] = 1
elif tag >= 6:
dict_of_tags_out[tag[0:5]] = 1
tags_out = dict_of_tags_out.keys()
tags_out.sort()
# search all bibXXx tables as needed:
for tag in tags_out:
digits = tag[0:2]
try:
intdigits = int(digits)
if intdigits < 0 or intdigits > 99:
raise ValueError
except ValueError:
# invalid tag value asked for
continue
if tag.startswith("001") or tag.startswith("00%"):
if out:
out += "\n"
out += "%09d %s %d" % (recID, "001__", recID)
bx = "bib%sx" % digits
bibx = "bibrec_bib%sx" % digits
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(tag)+'%'))
# go through fields:
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
printme = True
#check the stuff in hiddenfields
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if ind1 == "_":
ind1 = ""
if ind2 == "_":
ind2 = ""
# print field tag
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if out:
out += "\n"
out += "%09d %s " % (recID, field[:5])
field_number_old = field_number
field_old = field
# print subfield value
if field[0:2] == "00" and field[-1:] == "_":
out += value
else:
out += "$$%s%s" % (field[-1:], value)
return out
def get_merged_recid(recID):
""" Return the record ID of the record with
which the given record has been merged.
@param recID: deleted record recID
@type recID: int
@return: merged record recID
@rtype: int or None
"""
merged_recid = None
for val in get_fieldvalues(recID, "970__d"):
try:
merged_recid = int(val)
break
except ValueError:
pass
return merged_recid
def record_exists(recID):
"""Return 1 if record RECID exists.
Return 0 if it doesn't exist.
Return -1 if it exists but is marked as deleted.
"""
out = 0
res = run_sql("SELECT id FROM bibrec WHERE id=%s", (recID,), 1)
if res:
try: # if recid is '123foo', mysql will return id=123, and we don't want that
recID = int(recID)
except ValueError:
return 0
# record exists; now check whether it isn't marked as deleted:
dbcollids = get_fieldvalues(recID, "980__%")
if ("DELETED" in dbcollids) or (CFG_CERN_SITE and "DUMMY" in dbcollids):
out = -1 # exists, but marked as deleted
else:
out = 1 # exists fine
return out
def record_empty(recID):
"""
Is this record empty, e.g. has only 001, waiting for integration?
@param recID: the record identifier.
@type recID: int
@return: 1 if the record is empty, 0 otherwise.
@rtype: int
"""
record = get_record(recID)
if record is None or len(record) < 2:
return 1
else:
return 0
def record_public_p(recID, recreate_cache_if_needed=True):
"""Return 1 if the record is public, i.e. if it can be found in the Home collection.
Return 0 otherwise.
"""
return recID in get_collection_reclist(CFG_SITE_NAME, recreate_cache_if_needed=recreate_cache_if_needed)
def get_creation_date(recID, fmt="%Y-%m-%d"):
"Returns the creation date of the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(creation_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def get_modification_date(recID, fmt="%Y-%m-%d"):
"Returns the date of last modification for the record 'recID'."
out = ""
res = run_sql("SELECT DATE_FORMAT(modification_date,%s) FROM bibrec WHERE id=%s", (fmt, recID), 1)
if res:
out = res[0][0]
return out
def print_warning(req, msg, msg_type='', prologue='<br />', epilogue='<br />'):
"Prints warning message and flushes output."
if req and msg:
req.write(websearch_templates.tmpl_print_warning(
msg = msg,
type = msg_type,
prologue = prologue,
epilogue = epilogue,
))
return
def print_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=10,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, collection=CFG_SITE_NAME, nb_found=-1, jrec=1, rg=10,
aas=0, ln=CFG_SITE_LANG, p1="", p2="", p3="", f1="", f2="", f3="", m1="", m2="", m3="", op1="", op2="",
sc=1, pl_in_url="",
d1y=0, d1m=0, d1d=0, d2y=0, d2m=0, d2d=0, dt="",
cpu_time=-1, middle_only=0):
"""Prints stripe with the information on 'collection' and 'nb_found' results and CPU time.
Also, prints navigation links (beg/next/prev/end) inside the results set.
If middle_only is set to 1, it will only print the middle box information (beg/netx/prev/end/etc) links.
This is suitable for displaying navigation links at the bottom of the search results page."""
out = ""
# sanity check:
if jrec < 1:
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
return websearch_templates.tmpl_print_hosted_search_info(
ln = ln,
collection = collection,
aas = aas,
collection_name = get_coll_i18nname(collection, ln, False),
collection_id = get_colID(collection),
middle_only = middle_only,
rg = rg,
nb_found = nb_found,
sf = sf,
so = so,
rm = rm,
of = of,
ot = ot,
p = p,
f = f,
p1 = p1,
p2 = p2,
p3 = p3,
f1 = f1,
f2 = f2,
f3 = f3,
m1 = m1,
m2 = m2,
m3 = m3,
op1 = op1,
op2 = op2,
pl_in_url = pl_in_url,
d1y = d1y,
d1m = d1m,
d1d = d1d,
d2y = d2y,
d2m = d2m,
d2d = d2d,
dt = dt,
jrec = jrec,
sc = sc,
sp = sp,
all_fieldcodes = get_fieldcodes(),
cpu_time = cpu_time,
)
def print_results_overview(colls, results_final_nb_total, results_final_nb, cpu_time, ln=CFG_SITE_LANG, ec=[], hosted_colls_potential_results_p=False):
"""Prints results overview box with links to particular collections below."""
out = ""
new_colls = []
for coll in colls:
new_colls.append({
'id': get_colID(coll),
'code': coll,
'name': get_coll_i18nname(coll, ln, False),
})
return websearch_templates.tmpl_print_results_overview(
ln = ln,
results_final_nb_total = results_final_nb_total,
results_final_nb = results_final_nb,
cpu_time = cpu_time,
colls = new_colls,
ec = ec,
hosted_colls_potential_results_p = hosted_colls_potential_results_p,
)
def print_hosted_results(url_and_engine, ln=CFG_SITE_LANG, of=None, req=None, no_records_found=False, search_timed_out=False, limit=CFG_EXTERNAL_COLLECTION_MAXRESULTS):
"""Prints the full results of a hosted collection"""
if of.startswith("h"):
if no_records_found:
return "<br />No results found."
if search_timed_out:
return "<br />The search engine did not respond in time."
return websearch_templates.tmpl_print_hosted_results(
url_and_engine=url_and_engine,
ln=ln,
of=of,
req=req,
limit=limit
)
class BibSortDataCacher(DataCacher):
"""
Cache holding all structures created by bibsort
( _data, data_dict).
"""
def __init__(self, method_name):
self.method_name = method_name
self.method_id = 0
try:
res = run_sql("""SELECT id from bsrMETHOD where name = %s""", (self.method_name,))
except:
self.method_id = 0
if res and res[0]:
self.method_id = res[0][0]
else:
self.method_id = 0
def cache_filler():
method_id = self.method_id
alldicts = {}
if self.method_id == 0:
return {}
try:
res_data = run_sql("""SELECT data_dict_ordered from bsrMETHODDATA \
where id_bsrMETHOD = %s""", (method_id,))
res_buckets = run_sql("""SELECT bucket_no, bucket_data from bsrMETHODDATABUCKET\
where id_bsrMETHOD = %s""", (method_id,))
except Exception:
# database problems, return empty cache
return {}
try:
data_dict_ordered = deserialize_via_marshal(res_data[0][0])
except:
data_dict_ordered= {}
alldicts['data_dict_ordered'] = data_dict_ordered # recid: weight
if not res_buckets:
alldicts['bucket_data'] = {}
return alldicts
for row in res_buckets:
bucket_no = row[0]
try:
bucket_data = intbitset(row[1])
except:
bucket_data = intbitset([])
alldicts.setdefault('bucket_data', {})[bucket_no] = bucket_data
return alldicts
def timestamp_verifier():
method_id = self.method_id
res = run_sql("""SELECT last_updated from bsrMETHODDATA where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_methoddata = str(res[0][0])
except IndexError:
update_time_methoddata = '1970-01-01 00:00:00'
res = run_sql("""SELECT max(last_updated) from bsrMETHODDATABUCKET where id_bsrMETHOD = %s""", (method_id,))
try:
update_time_buckets = str(res[0][0])
except IndexError:
update_time_buckets = '1970-01-01 00:00:00'
return max(update_time_methoddata, update_time_buckets)
DataCacher.__init__(self, cache_filler, timestamp_verifier)
def get_sorting_methods():
if not CFG_BIBSORT_BUCKETS: # we do not want to use buckets
return {}
try: # make sure the method has some data
res = run_sql("""SELECT m.name, m.definition FROM bsrMETHOD m, bsrMETHODDATA md WHERE m.id = md.id_bsrMETHOD""")
except:
return {}
return dict(res)
sorting_methods = get_sorting_methods()
cache_sorted_data = {}
for sorting_method in sorting_methods:
try:
cache_sorted_data[sorting_method].is_ok_p
except Exception:
cache_sorted_data[sorting_method] = BibSortDataCacher(sorting_method)
def get_tags_form_sort_fields(sort_fields):
"""Given a list of sort_fields, return the tags associated with it and
also the name of the field that has no tags associated, to be able to
display a message to the user."""
tags = []
if not sort_fields:
return [], ''
for sort_field in sort_fields:
if sort_field and str(sort_field[0:2]).isdigit():
# sort_field starts by two digits, so this is probably a MARC tag already
tags.append(sort_field)
else:
# let us check the 'field' table
field_tags = get_field_tags(sort_field)
if field_tags:
tags.extend(field_tags)
else:
return [], sort_field
return tags, ''
def rank_records(req, rank_method_code, rank_limit_relevance, hitset_global, pattern=None, verbose=0, sort_order='d', of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""Initial entry point for ranking records, acts like a dispatcher.
(i) rank_method_code is in bsrMETHOD, bibsort buckets can be used;
(ii)rank_method_code is not in bsrMETHOD, use bibrank;
"""
if CFG_BIBSORT_BUCKETS and sorting_methods:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('RNK') and \
definition.replace('RNK:','').strip().lower() == string.lower(rank_method_code):
(solution_recs, solution_scores) = sort_records_bibsort(req, hitset_global, sort_method, '', sort_order, verbose, of, ln, rg, jrec, 'r')
#return (solution_recs, solution_scores, '', '', '')
comment = ''
if verbose > 0:
comment = 'find_citations retlist %s' %[[solution_recs[i], solution_scores[i]] for i in range(len(solution_recs))]
return (solution_recs, solution_scores, '(', ')', comment)
return rank_records_bibrank(rank_method_code, rank_limit_relevance, hitset_global, pattern, verbose)
def sort_records(req, recIDs, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""Initial entry point for sorting records, acts like a dispatcher.
(i) sort_field is in the bsrMETHOD, and thus, the BibSort has sorted the data for this field, so we can use the cache;
(ii)sort_field is not in bsrMETHOD, and thus, the cache does not contain any information regarding this sorting method"""
_ = gettext_set_language(ln)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
#calculate the min index on the reverted list
index_min = max(len(recIDs) - irec_max, 0) #just to be sure that the min index is not negative
#bibsort does not handle sort_pattern for now, use bibxxx
if sort_pattern:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
use_sorting_buckets = True
if not CFG_BIBSORT_BUCKETS or not sorting_methods: #ignore the use of buckets, use old fashion sorting
use_sorting_buckets = False
if not sort_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
return recIDs[index_min:]
sort_fields = string.split(sort_field, ",")
if len(sort_fields) == 1:
# we have only one sorting_field, check if it is treated by BibSort
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if use_sorting_buckets and \
((definition.startswith('FIELD') and \
definition.replace('FIELD:','').strip().lower() == string.lower(sort_fields[0])) or \
sort_method == sort_fields[0]):
#use BibSort
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#deduce sorting MARC tag out of the 'sort_field' argument:
tags, error_field = get_tags_form_sort_fields(sort_fields)
if error_field:
if use_sorting_buckets:
return sort_records_bibsort(req, recIDs, 'latest first', sort_field, sort_order, verbose, of, ln, rg, jrec)
else:
if of.startswith('h'):
print_warning(req, _("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error")
return recIDs[index_min:]
if tags:
for sort_method in sorting_methods:
definition = sorting_methods[sort_method]
if definition.startswith('MARC') \
and definition.replace('MARC:','').strip().split(',') == tags \
and use_sorting_buckets:
#this list of tags have a designated method in BibSort, so use it
return sort_records_bibsort(req, recIDs, sort_method, sort_field, sort_order, verbose, of, ln, rg, jrec)
#we do not have this sort_field in BibSort tables -> do the old fashion sorting
return sort_records_bibxxx(req, recIDs, tags, sort_field, sort_order, sort_pattern, verbose, of, ln, rg, jrec)
return recIDs[index_min:]
def sort_records_bibsort(req, recIDs, sort_method, sort_field='', sort_order='d', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None, sort_or_rank = 's'):
"""This function orders the recIDs list, based on a sorting method(sort_field) using the BibSortDataCacher for speed"""
_ = gettext_set_language(ln)
#sanity check
if sort_method not in sorting_methods:
if sort_or_rank == 'r':
return rank_records_bibrank(sort_method, 0, recIDs, None, verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if verbose >= 3 and of.startswith('h'):
print_warning(req, "Sorting (using BibSort cache) by method %s (definition %s)." \
% (cgi.escape(repr(sort_method)), cgi.escape(repr(sorting_methods[sort_method]))))
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
solution = intbitset([])
input_recids = intbitset(recIDs)
cache_sorted_data[sort_method].recreate_cache_if_needed()
sort_cache = cache_sorted_data[sort_method].cache
bucket_numbers = sort_cache['bucket_data'].keys()
#check if all buckets have been constructed
if len(bucket_numbers) != CFG_BIBSORT_BUCKETS:
if verbose > 3 and of.startswith('h'):
print_warning(req, "Not all buckets have been constructed.. switching to old fashion sorting.")
if sort_or_rank == 'r':
return rank_records_bibrank(sort_method, 0, recIDs, None, verbose)
else:
return sort_records_bibxxx(req, recIDs, None, sort_field, sort_order, '', verbose, of, ln, rg, jrec)
if sort_order == 'd':
bucket_numbers.reverse()
for bucket_no in bucket_numbers:
solution.union_update(input_recids & sort_cache['bucket_data'][bucket_no])
if len(solution) >= irec_max:
break
dict_solution = {}
missing_records = []
for recid in solution:
try:
dict_solution[recid] = sort_cache['data_dict_ordered'][recid]
except KeyError:
#recid is in buckets, but not in the bsrMETHODDATA,
#maybe because the value has been deleted, but the change has not yet been propagated to the buckets
missing_records.append(recid)
#check if there are recids that are not in any bucket -> to be added at the end/top, ordered by insertion date
if len(solution) < irec_max:
#some records have not been yet inserted in the bibsort structures
#or, some records have no value for the sort_method
missing_records = sorted(missing_records + list(input_recids.difference(solution)))
#the records need to be sorted in reverse order for the print record function
#the return statement should be equivalent with the following statements
#(these are clearer, but less efficient, since they revert the same list twice)
#sorted_solution = (missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='d'))[:irec_max]
#sorted_solution.reverse()
#return sorted_solution
if sort_method.strip().lower().startswith('latest') and sort_order == 'd':
# if we want to sort the records on their insertion date, add the mission records at the top
solution = sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='a') + missing_records
else:
solution = missing_records + sorted(dict_solution, key=dict_solution.__getitem__, reverse=sort_order=='a')
#calculate the min index on the reverted list
index_min = max(len(solution) - irec_max, 0) #just to be sure that the min index is not negative
#return all the records up to irec_max, but on the reverted list
if sort_or_rank == 'r':
# we need the recids, with values
return (solution[index_min:], [dict_solution.get(record, 0) for record in solution[index_min:]])
else:
return solution[index_min:]
def sort_records_bibxxx(req, recIDs, tags, sort_field='', sort_order='d', sort_pattern='', verbose=0, of='hb', ln=CFG_SITE_LANG, rg=None, jrec=None):
"""OLD FASHION SORTING WITH NO CACHE, for sort fields that are not run in BibSort
Sort records in 'recIDs' list according sort field 'sort_field' in order 'sort_order'.
If more than one instance of 'sort_field' is found for a given record, try to choose that that is given by
'sort pattern', for example "sort by report number that starts by CERN-PS".
Note that 'sort_field' can be field code like 'author' or MARC tag like '100__a' directly."""
_ = gettext_set_language(ln)
#we should return sorted records up to irec_max(exclusive)
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs), jrec, rg)
#calculate the min index on the reverted list
index_min = max(len(recIDs) - irec_max, 0) #just to be sure that the min index is not negative
## check arguments:
if not sort_field:
return recIDs[index_min:]
if len(recIDs) > CFG_WEBSEARCH_NB_RECORDS_TO_SORT:
if of.startswith('h'):
print_warning(req, _("Sorry, sorting is allowed on sets of up to %d records only. Using default sort order.") % CFG_WEBSEARCH_NB_RECORDS_TO_SORT, "Warning")
return recIDs[index_min:]
recIDs_dict = {}
recIDs_out = []
if not tags:
# tags have not been camputed yet
sort_fields = string.split(sort_field, ",")
tags, error_field = get_tags_form_sort_fields(sort_fields)
if error_field:
if of.startswith('h'):
print_warning(req, _("Sorry, %s does not seem to be a valid sort option. The records will not be sorted.") % cgi.escape(error_field), "Error")
return recIDs[index_min:]
if verbose >= 3 and of.startswith('h'):
print_warning(req, "Sorting by tags %s." % cgi.escape(repr(tags)))
if sort_pattern:
print_warning(req, "Sorting preferentially by %s." % cgi.escape(sort_pattern))
## check if we have sorting tag defined:
if tags:
# fetch the necessary field values:
for recID in recIDs:
val = "" # will hold value for recID according to which sort
vals = [] # will hold all values found in sorting tag for recID
for tag in tags:
if CFG_CERN_SITE and tag == '773__c':
# CERN hack: journal sorting
# 773__c contains page numbers, e.g. 3-13, and we want to sort by 3, and numerically:
vals.extend(["%050s" % x.split("-",1)[0] for x in get_fieldvalues(recID, tag)])
else:
vals.extend(get_fieldvalues(recID, tag))
if sort_pattern:
# try to pick that tag value that corresponds to sort pattern
bingo = 0
for v in vals:
if v.lower().startswith(sort_pattern.lower()): # bingo!
bingo = 1
val = v
break
if not bingo: # sort_pattern not present, so add other vals after spaces
val = sort_pattern + " " + string.join(vals)
else:
# no sort pattern defined, so join them all together
val = string.join(vals)
val = strip_accents(val.lower()) # sort values regardless of accents and case
if recIDs_dict.has_key(val):
recIDs_dict[val].append(recID)
else:
recIDs_dict[val] = [recID]
# sort them:
recIDs_dict_keys = recIDs_dict.keys()
recIDs_dict_keys.sort()
# now that keys are sorted, create output array:
for k in recIDs_dict_keys:
for s in recIDs_dict[k]:
recIDs_out.append(s)
# ascending or descending?
if sort_order == 'a':
recIDs_out.reverse()
# okay, we are done
# return only up to the maximum that we need to sort
if len(recIDs_out) != len(recIDs):
dummy, irec_max = get_interval_for_records_to_sort(len(recIDs_out), jrec, rg)
index_min = max(len(recIDs_out) - irec_max, 0) #just to be sure that the min index is not negative
return recIDs_out[index_min:]
else:
# good, no sort needed
return recIDs[index_min:]
def get_interval_for_records_to_sort(nb_found, jrec=None, rg=None):
"""calculates in which interval should the sorted records be
a value of 'rg=-9999' means to print all records: to be used with care."""
if not jrec:
jrec = 1
if not rg:
#return all
return jrec-1, nb_found
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will sort records from irec_min to irec_max excluded
irec_min = jrec - 1
irec_max = irec_min + rg
if irec_min < 0:
irec_min = 0
if irec_max > nb_found:
irec_max = nb_found
return irec_min, irec_max
def print_records(req, recIDs, jrec=1, rg=10, format='hb', ot='', ln=CFG_SITE_LANG, relevances=[], relevances_prologue="(", relevances_epilogue="%%)", decompress=zlib.decompress, search_pattern='', print_records_prologue_p=True, print_records_epilogue_p=True, verbose=0, tab='', sf='', so='d', sp='', rm=''):
"""
Prints list of records 'recIDs' formatted according to 'format' in
groups of 'rg' starting from 'jrec'.
Assumes that the input list 'recIDs' is sorted in reverse order,
so it counts records from tail to head.
A value of 'rg=-9999' means to print all records: to be used with care.
Print also list of RELEVANCES for each record (if defined), in
between RELEVANCE_PROLOGUE and RELEVANCE_EPILOGUE.
Print prologue and/or epilogue specific to 'format' if
'print_records_prologue_p' and/or print_records_epilogue_p' are
True.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
# load the right message language
_ = gettext_set_language(ln)
# sanity checking:
if req is None:
return
# get user_info (for formatting based on user)
if isinstance(req, cStringIO.OutputType):
user_info = {}
else:
user_info = collect_user_info(req)
if len(recIDs):
nb_found = len(recIDs)
if rg == -9999: # print all records
rg = nb_found
else:
rg = abs(rg)
if jrec < 1: # sanity checks
jrec = 1
if jrec > nb_found:
jrec = max(nb_found-rg+1, 1)
# will print records from irec_max to irec_min excluded:
irec_max = nb_found - jrec
irec_min = nb_found - jrec - rg
if irec_min < 0:
irec_min = -1
if irec_max >= nb_found:
irec_max = nb_found - 1
#req.write("%s:%d-%d" % (recIDs, irec_min, irec_max))
if format.startswith('x'):
# print header if needed
if print_records_prologue_p:
print_records_prologue(req, format)
# print records
recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)]
format_records(recIDs_to_print,
format,
ln=ln,
search_pattern=search_pattern,
record_separator="\n",
user_info=user_info,
req=req)
# print footer if needed
if print_records_epilogue_p:
print_records_epilogue(req, format)
elif format.startswith('t') or str(format[0:3]).isdigit():
# we are doing plain text output:
for irec in range(irec_max, irec_min, -1):
x = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(x)
if x:
req.write('\n')
elif format == 'excel':
recIDs_to_print = [recIDs[x] for x in range(irec_max, irec_min, -1)]
create_excel(recIDs=recIDs_to_print, req=req, ln=ln, ot=ot)
else:
# we are doing HTML output:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
# portfolio and on-the-fly formats:
for irec in range(irec_max, irec_min, -1):
req.write(print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm))
elif format.startswith("hb"):
# HTML brief format:
display_add_to_basket = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_add_to_basket = False
else:
if not user_info['precached_usebaskets']:
display_add_to_basket = False
req.write(websearch_templates.tmpl_record_format_htmlbrief_header(
ln = ln))
for irec in range(irec_max, irec_min, -1):
row_number = jrec+irec_max-irec
recid = recIDs[irec]
if relevances and relevances[irec]:
relevance = relevances[irec]
else:
relevance = ''
record = print_record(recIDs[irec], format, ot, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
req.write(websearch_templates.tmpl_record_format_htmlbrief_body(
ln = ln,
recid = recid,
row_number = row_number,
relevance = relevance,
record = record,
relevances_prologue = relevances_prologue,
relevances_epilogue = relevances_epilogue,
display_add_to_basket = display_add_to_basket
))
req.write(websearch_templates.tmpl_record_format_htmlbrief_footer(
ln = ln,
display_add_to_basket = display_add_to_basket))
elif format.startswith("hd"):
# HTML detailed format:
for irec in range(irec_max, irec_min, -1):
if record_exists(recIDs[irec]) == -1:
print_warning(req, _("The record has been deleted."))
merged_recid = get_merged_recid(recIDs[irec])
if merged_recid:
print_warning(req, _("The record %d replaces it." % merged_recid))
continue
unordered_tabs = get_detailed_page_tabs(get_colID(guess_primary_collection_of_a_record(recIDs[irec])),
recIDs[irec], ln=ln)
ordered_tabs_id = [(tab_id, values['order']) for (tab_id, values) in unordered_tabs.iteritems()]
ordered_tabs_id.sort(lambda x,y: cmp(x[1],y[1]))
link_ln = ''
if ln != CFG_SITE_LANG:
link_ln = '?ln=%s' % ln
recid = recIDs[irec]
recid_to_display = recid # Record ID used to build the URL.
if CFG_WEBSEARCH_USE_ALEPH_SYSNOS:
try:
recid_to_display = get_fieldvalues(recid,
CFG_BIBUPLOAD_EXTERNAL_SYSNO_TAG)[0]
except IndexError:
# No external sysno is available, keep using
# internal recid.
pass
tabs = [(unordered_tabs[tab_id]['label'], \
'%s/%s/%s/%s%s' % (CFG_SITE_URL, CFG_SITE_RECORD, recid_to_display, tab_id, link_ln), \
tab_id == tab,
unordered_tabs[tab_id]['enabled']) \
for (tab_id, order) in ordered_tabs_id
if unordered_tabs[tab_id]['visible'] == True]
tabs_counts = get_detailed_page_tabs_counts(recid)
citedbynum = tabs_counts['Citations']
references = tabs_counts['References']
discussions = tabs_counts['Discussions']
# load content
if tab == 'usage':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
r = calculate_reading_similarity_list(recIDs[irec], "downloads")
downloadsimilarity = None
downloadhistory = None
#if r:
# downloadsimilarity = r
if CFG_BIBRANK_SHOW_DOWNLOAD_GRAPHS:
downloadhistory = create_download_history_graph_and_box(recIDs[irec], ln)
r = calculate_reading_similarity_list(recIDs[irec], "pageviews")
viewsimilarity = None
if r: viewsimilarity = r
content = websearch_templates.tmpl_detailed_record_statistics(recIDs[irec],
ln,
downloadsimilarity=downloadsimilarity,
downloadhistory=downloadhistory,
viewsimilarity=viewsimilarity)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
elif tab == 'citations':
recid = recIDs[irec]
req.write(webstyle_templates.detailed_record_container_top(recid,
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(websearch_templates.tmpl_detailed_record_citations_prologue(recid, ln))
# Citing
citinglist = calculate_cited_by_list(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_citing_list(recid,
ln,
citinglist,
sf=sf,
so=so,
sp=sp,
rm=rm))
# Self-cited
selfcited = get_self_cited_by(recid)
req.write(websearch_templates.tmpl_detailed_record_citations_self_cited(recid,
ln, selfcited=selfcited, citinglist=citinglist))
# Co-cited
s = calculate_co_cited_with_list(recid)
cociting = None
if s:
cociting = s
req.write(websearch_templates.tmpl_detailed_record_citations_co_citing(recid,
ln,
cociting=cociting))
# Citation history, if needed
citationhistory = None
if citinglist:
citationhistory = create_citation_history_graph_and_box(recid, ln)
#debug
if verbose > 3:
print_warning(req, "Citation graph debug: " + \
str(len(citationhistory)))
req.write(websearch_templates.tmpl_detailed_record_citations_citation_history(recid, ln, citationhistory))
req.write(websearch_templates.tmpl_detailed_record_citations_epilogue(recid, ln))
req.write(webstyle_templates.detailed_record_container_bottom(recid,
tabs,
ln))
elif tab == 'references':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
citationnum=citedbynum,
referencenum=references,
discussionnum=discussions))
req.write(format_record(recIDs[irec], 'HDREF', ln=ln, user_info=user_info, verbose=verbose))
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
elif tab == 'keywords':
import bibclassify_webinterface
recid = recIDs[irec]
bibclassify_webinterface.main_page(req, recid, tabs, ln, webstyle_templates)
elif tab == 'plots':
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln))
content = websearch_templates.tmpl_record_plots(recID=recIDs[irec],
ln=ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln))
else:
# Metadata tab
req.write(webstyle_templates.detailed_record_container_top(recIDs[irec],
tabs,
ln,
show_short_rec_p=False,
citationnum=citedbynum, referencenum=references,
discussionnum=discussions))
creationdate = None
modificationdate = None
if record_exists(recIDs[irec]) == 1:
creationdate = get_creation_date(recIDs[irec])
modificationdate = get_modification_date(recIDs[irec])
content = print_record(recIDs[irec], format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm)
content = websearch_templates.tmpl_detailed_record_metadata(
recID = recIDs[irec],
ln = ln,
format = format,
creationdate = creationdate,
modificationdate = modificationdate,
content = content)
# display of the next-hit/previous-hit/back-to-search links
# on the detailed record pages
content += websearch_templates.tmpl_display_back_to_search(req,
recIDs[irec],
ln)
req.write(content)
req.write(webstyle_templates.detailed_record_container_bottom(recIDs[irec],
tabs,
ln,
creationdate=creationdate,
modificationdate=modificationdate,
show_short_rec_p=False))
if len(tabs) > 0:
# Add the mini box at bottom of the page
if CFG_WEBCOMMENT_ALLOW_REVIEWS:
from invenio.webcomment import get_mini_reviews
reviews = get_mini_reviews(recid = recIDs[irec], ln=ln)
else:
reviews = ''
actions = format_record(recIDs[irec], 'HDACT', ln=ln, user_info=user_info, verbose=verbose)
files = format_record(recIDs[irec], 'HDFILE', ln=ln, user_info=user_info, verbose=verbose)
req.write(webstyle_templates.detailed_record_mini_panel(recIDs[irec],
ln,
format,
files=files,
reviews=reviews,
actions=actions))
else:
# Other formats
for irec in range(irec_max, irec_min, -1):
req.write(print_record(recIDs[irec], format, ot, ln,
search_pattern=search_pattern,
user_info=user_info, verbose=verbose,
sf=sf, so=so, sp=sp, rm=rm))
else:
print_warning(req, _("Use different search terms."))
def print_records_prologue(req, format, cc=None):
"""
Print the appropriate prologue for list of records in the given
format.
"""
prologue = "" # no prologue needed for HTML or Text formats
if format.startswith('xm'):
prologue = websearch_templates.tmpl_xml_marc_prologue()
elif format.startswith('xn'):
prologue = websearch_templates.tmpl_xml_nlm_prologue()
elif format.startswith('xw'):
prologue = websearch_templates.tmpl_xml_refworks_prologue()
elif format.startswith('xr'):
prologue = websearch_templates.tmpl_xml_rss_prologue(cc=cc)
elif format.startswith('xe'):
prologue = websearch_templates.tmpl_xml_endnote_prologue()
elif format.startswith('xo'):
prologue = websearch_templates.tmpl_xml_mods_prologue()
elif format.startswith('xp'):
prologue = websearch_templates.tmpl_xml_podcast_prologue(cc=cc)
elif format.startswith('x'):
prologue = websearch_templates.tmpl_xml_default_prologue()
req.write(prologue)
def print_records_epilogue(req, format):
"""
Print the appropriate epilogue for list of records in the given
format.
"""
epilogue = "" # no epilogue needed for HTML or Text formats
if format.startswith('xm'):
epilogue = websearch_templates.tmpl_xml_marc_epilogue()
elif format.startswith('xn'):
epilogue = websearch_templates.tmpl_xml_nlm_epilogue()
elif format.startswith('xw'):
epilogue = websearch_templates.tmpl_xml_refworks_epilogue()
elif format.startswith('xr'):
epilogue = websearch_templates.tmpl_xml_rss_epilogue()
elif format.startswith('xe'):
epilogue = websearch_templates.tmpl_xml_endnote_epilogue()
elif format.startswith('xo'):
epilogue = websearch_templates.tmpl_xml_mods_epilogue()
elif format.startswith('xp'):
epilogue = websearch_templates.tmpl_xml_podcast_epilogue()
elif format.startswith('x'):
epilogue = websearch_templates.tmpl_xml_default_epilogue()
req.write(epilogue)
def get_record(recid):
"""Directly the record object corresponding to the recid."""
if CFG_BIBUPLOAD_SERIALIZE_RECORD_STRUCTURE:
value = run_sql("SELECT value FROM bibfmt WHERE id_bibrec=%s AND FORMAT='recstruct'", (recid, ))
if value:
try:
return deserialize_via_marshal(value[0][0])
except:
### In case of corruption, let's rebuild it!
pass
return create_record(print_record(recid, 'xm'))[0]
def print_record(recID, format='hb', ot='', ln=CFG_SITE_LANG, decompress=zlib.decompress,
search_pattern=None, user_info=None, verbose=0, sf='', so='d', sp='', rm=''):
"""
Prints record 'recID' formatted according to 'format'.
'sf' is sort field and 'rm' is ranking method that are passed here
only for proper linking purposes: e.g. when a certain ranking
method or a certain sort field was selected, keep it selected in
any dynamic search links that may be printed.
"""
if format == 'recstruct':
return get_record(recID)
_ = gettext_set_language(ln)
display_claim_this_paper = False
try:
display_claim_this_paper = user_info["precached_viewclaimlink"]
except (KeyError, TypeError):
display_claim_this_paper = False
#check from user information if the user has the right to see hidden fields/tags in the
#records as well
can_see_hidden = (acc_authorize_action(user_info, 'runbibedit')[0] == 0)
out = ""
# sanity check:
record_exist_p = record_exists(recID)
if record_exist_p == 0: # doesn't exist
return out
# New Python BibFormat procedure for formatting
# Old procedure follows further below
# We must still check some special formats, but these
# should disappear when BibFormat improves.
if not (CFG_BIBFORMAT_USE_OLD_BIBFORMAT \
or format.lower().startswith('t') \
or format.lower().startswith('hm') \
or str(format[0:3]).isdigit() \
or ot):
# Unspecified format is hd
if format == '':
format = 'hd'
if record_exist_p == -1 and get_output_format_content_type(format) == 'text/html':
# HTML output displays a default value for deleted records.
# Other format have to deal with it.
out += _("The record has been deleted.")
# was record deleted-but-merged ?
merged_recid = get_merged_recid(recID)
if merged_recid:
out += ' ' + _("The record %d replaces it." % merged_recid)
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format.lower().startswith('hb') and \
format.lower() != 'hb_p':
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper)
return out
# Old PHP BibFormat procedure for formatting
# print record opening tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " <record>\n"
out += " <header>\n"
for oai_id in get_fieldvalues(recID, CFG_OAI_ID_FIELD):
out += " <identifier>%s</identifier>\n" % oai_id
out += " <datestamp>%s</datestamp>\n" % get_modification_date(recID)
out += " </header>\n"
out += " <metadata>\n"
if format.startswith("xm") or format == "marcxml":
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res and record_exist_p == 1:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format' -- they are not in "bibfmt" table; so fetch all the data from "bibXXx" tables:
if format == "marcxml":
out += """ <record xmlns="http://www.loc.gov/MARC21/slim">\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
elif format.startswith("xm"):
out += """ <record>\n"""
out += " <controlfield tag=\"001\">%d</controlfield>\n" % int(recID)
if record_exist_p == -1:
# deleted record, so display only OAI ID and 980:
oai_ids = get_fieldvalues(recID, CFG_OAI_ID_FIELD)
if oai_ids:
out += "<datafield tag=\"%s\" ind1=\"%s\" ind2=\"%s\"><subfield code=\"%s\">%s</subfield></datafield>\n" % \
(CFG_OAI_ID_FIELD[0:3], CFG_OAI_ID_FIELD[3:4], CFG_OAI_ID_FIELD[4:5], CFG_OAI_ID_FIELD[5:6], oai_ids[0])
out += "<datafield tag=\"980\" ind1=\"\" ind2=\"\"><subfield code=\"c\">DELETED</subfield></datafield>\n"
else:
# controlfields
query = "SELECT b.tag,b.value,bb.field_number FROM bib00x AS b, bibrec_bib00x AS bb "\
"WHERE bb.id_bibrec=%s AND b.id=bb.id_bibxxx AND b.tag LIKE '00%%' "\
"ORDER BY bb.field_number, b.tag ASC"
res = run_sql(query, (recID, ))
for row in res:
field, value = row[0], row[1]
value = encode_for_xml(value)
out += """ <controlfield tag="%s" >%s</controlfield>\n""" % \
(encode_for_xml(field[0:3]), value)
# datafields
i = 1 # Do not process bib00x and bibrec_bib00x, as
# they are controlfields. So start at bib01x and
# bibrec_bib00x (and set i = 0 at the end of
# first loop)
for digit1 in range(0, 10):
for digit2 in range(i, 10):
bx = "bib%d%dx" % (digit1, digit2)
bibx = "bibrec_bib%d%dx" % (digit1, digit2)
query = "SELECT b.tag,b.value,bb.field_number FROM %s AS b, %s AS bb "\
"WHERE bb.id_bibrec=%%s AND b.id=bb.id_bibxxx AND b.tag LIKE %%s"\
"ORDER BY bb.field_number, b.tag ASC" % (bx, bibx)
res = run_sql(query, (recID, str(digit1)+str(digit2)+'%'))
field_number_old = -999
field_old = ""
for row in res:
field, value, field_number = row[0], row[1], row[2]
ind1, ind2 = field[3], field[4]
if ind1 == "_" or ind1 == "":
ind1 = " "
if ind2 == "_" or ind2 == "":
ind2 = " "
# print field tag, unless hidden
printme = True
if not can_see_hidden:
for htag in CFG_BIBFORMAT_HIDDEN_TAGS:
ltag = len(htag)
samelenfield = field[0:ltag]
if samelenfield == htag:
printme = False
if printme:
if field_number != field_number_old or field[:-1] != field_old[:-1]:
if field_number_old != -999:
out += """ </datafield>\n"""
out += """ <datafield tag="%s" ind1="%s" ind2="%s">\n""" % \
(encode_for_xml(field[0:3]), encode_for_xml(ind1), encode_for_xml(ind2))
field_number_old = field_number
field_old = field
# print subfield value
value = encode_for_xml(value)
out += """ <subfield code="%s">%s</subfield>\n""" % \
(encode_for_xml(field[-1:]), value)
# all fields/subfields printed in this run, so close the tag:
if field_number_old != -999:
out += """ </datafield>\n"""
i = 0 # Next loop should start looking at bib%0 and bibrec_bib00x
# we are at the end of printing the record:
out += " </record>\n"
elif format == "xd" or format == "oai_dc":
# XML Dublin Core format, possibly OAI -- select only some bibXXx fields:
out += """ <dc xmlns="http://purl.org/dc/elements/1.1/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://purl.org/dc/elements/1.1/
http://www.openarchives.org/OAI/1.1/dc.xsd">\n"""
if record_exist_p == -1:
out += ""
else:
for f in get_fieldvalues(recID, "041__a"):
out += " <language>%s</language>\n" % f
for f in get_fieldvalues(recID, "100__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "700__a"):
out += " <creator>%s</creator>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "245__a"):
out += " <title>%s</title>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "65017a"):
out += " <subject>%s</subject>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "8564_u"):
if f.split('.') == 'png':
continue
out += " <identifier>%s</identifier>\n" % encode_for_xml(f)
for f in get_fieldvalues(recID, "520__a"):
out += " <description>%s</description>\n" % encode_for_xml(f)
out += " <date>%s</date>\n" % get_creation_date(recID)
out += " </dc>\n"
elif len(format) == 6 and str(format[0:3]).isdigit():
# user has asked to print some fields only
if format == "001":
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, recID, format)
else:
vals = get_fieldvalues(recID, format)
for val in vals:
out += "<!--%s-begin-->%s<!--%s-end-->\n" % (format, val, format)
elif format.startswith('t'):
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)
else:
out += get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)
elif format == "hm":
if record_exist_p == -1:
out += "\n<pre>" + cgi.escape(get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden)) + "</pre>"
else:
out += "\n<pre>" + cgi.escape(get_fieldvalues_alephseq_like(recID, ot, can_see_hidden)) + "</pre>"
elif format.startswith("h") and ot:
## user directly asked for some tags to be displayed only
if record_exist_p == -1:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ["001", CFG_OAI_ID_FIELD, "980"], can_see_hidden) + "</pre>"
else:
out += "\n<pre>" + get_fieldvalues_alephseq_like(recID, ot, can_see_hidden) + "</pre>"
elif format == "hd":
# HTML detailed format
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
# look for detailed format existence:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format), 1)
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly or use default format:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_detailed(
ln = ln,
recID = recID,
)
elif format.startswith("hb_") or format.startswith("hd_"):
# underscore means that HTML brief/detailed formats should be called on-the-fly; suitable for testing formats
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hx"):
# BibTeX format, called on the fly:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
elif format.startswith("hs"):
# for citation/download similarity navigation links:
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
out += '<a href="%s">' % websearch_templates.build_search_url(recid=recID, ln=ln)
# firstly, title:
titles = get_fieldvalues(recID, "245__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# usual title not found, try conference title:
titles = get_fieldvalues(recID, "111__a")
if titles:
for title in titles:
out += "<strong>%s</strong>" % title
else:
# just print record ID:
out += "<strong>%s %d</strong>" % (get_field_i18nname("record ID", ln, False), recID)
out += "</a>"
# secondly, authors:
authors = get_fieldvalues(recID, "100__a") + get_fieldvalues(recID, "700__a")
if authors:
out += " - %s" % authors[0]
if len(authors) > 1:
out += " <em>et al</em>"
# thirdly publication info:
publinfos = get_fieldvalues(recID, "773__s")
if not publinfos:
publinfos = get_fieldvalues(recID, "909C4s")
if not publinfos:
publinfos = get_fieldvalues(recID, "037__a")
if not publinfos:
publinfos = get_fieldvalues(recID, "088__a")
if publinfos:
out += " - %s" % publinfos[0]
else:
# fourthly publication year (if not publication info):
years = get_fieldvalues(recID, "773__y")
if not years:
years = get_fieldvalues(recID, "909C4y")
if not years:
years = get_fieldvalues(recID, "260__c")
if years:
out += " (%s)" % years[0]
else:
# HTML brief format by default
if record_exist_p == -1:
out += _("The record has been deleted.")
else:
query = "SELECT value FROM bibfmt WHERE id_bibrec=%s AND format=%s"
res = run_sql(query, (recID, format))
if res:
# record 'recID' is formatted in 'format', so print it
out += "%s" % decompress(res[0][0])
else:
# record 'recID' is not formatted in 'format', so try to call BibFormat on the fly: or use default format:
if CFG_WEBSEARCH_CALL_BIBFORMAT:
out_record_in_format = call_bibformat(recID, format, ln, search_pattern=search_pattern,
user_info=user_info, verbose=verbose)
if out_record_in_format:
out += out_record_in_format
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
else:
out += websearch_templates.tmpl_print_record_brief(
ln = ln,
recID = recID,
)
# at the end of HTML brief mode, print the "Detailed record" functionality:
if format == 'hp' or format.startswith("hb_") or format.startswith("hd_"):
pass # do nothing for portfolio and on-the-fly formats
else:
out += websearch_templates.tmpl_print_record_brief_links(ln=ln,
recID=recID,
sf=sf,
so=so,
sp=sp,
rm=rm,
display_claim_link=display_claim_this_paper)
# print record closing tags, if needed:
if format == "marcxml" or format == "oai_dc":
out += " </metadata>\n"
out += " </record>\n"
return out
def call_bibformat(recID, format="HD", ln=CFG_SITE_LANG, search_pattern=None, user_info=None, verbose=0):
"""
Calls BibFormat and returns formatted record.
BibFormat will decide by itself if old or new BibFormat must be used.
"""
from invenio.bibformat_utils import get_pdf_snippets
keywords = []
if search_pattern is not None:
units = create_basic_search_units(None, str(search_pattern), None)
keywords = [unit[1] for unit in units if (unit[0] != '-' and unit[2] in [None, 'fulltext'])]
out = format_record(recID,
of=format,
ln=ln,
search_pattern=keywords,
user_info=user_info,
verbose=verbose)
if CFG_WEBSEARCH_FULLTEXT_SNIPPETS and user_info and \
'fulltext' in user_info['uri']:
# check snippets only if URL contains fulltext
# FIXME: make it work for CLI too, via new function arg
if keywords:
snippets = get_pdf_snippets(recID, keywords)
if snippets:
out += snippets
return out
def log_query(hostname, query_args, uid=-1):
"""
Log query into the query and user_query tables.
Return id_query or None in case of problems.
"""
id_query = None
if uid >= 0:
# log the query only if uid is reasonable
res = run_sql("SELECT id FROM query WHERE urlargs=%s", (query_args,), 1)
try:
id_query = res[0][0]
except:
id_query = run_sql("INSERT INTO query (type, urlargs) VALUES ('r', %s)", (query_args,))
if id_query:
run_sql("INSERT INTO user_query (id_user, id_query, hostname, date) VALUES (%s, %s, %s, %s)",
(uid, id_query, hostname,
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())))
return id_query
def log_query_info(action, p, f, colls, nb_records_found_total=-1):
"""Write some info to the log file for later analysis."""
try:
log = open(CFG_LOGDIR + "/search.log", "a")
log.write(time.strftime("%Y%m%d%H%M%S#", time.localtime()))
log.write(action+"#")
log.write(p+"#")
log.write(f+"#")
for coll in colls[:-1]:
log.write("%s," % coll)
log.write("%s#" % colls[-1])
log.write("%d" % nb_records_found_total)
log.write("\n")
log.close()
except:
pass
return
### CALLABLES
def perform_request_search(req=None, cc=CFG_SITE_NAME, c=None, p="", f="", rg=CFG_WEBSEARCH_DEF_RECORDS_IN_GROUPS, sf="", so="d", sp="", rm="", of="id", ot="", aas=0,
p1="", f1="", m1="", op1="", p2="", f2="", m2="", op2="", p3="", f3="", m3="", sc=0, jrec=0,
recid=-1, recidb=-1, sysno="", id=-1, idb=-1, sysnb="", action="", d1="",
d1y=0, d1m=0, d1d=0, d2="", d2y=0, d2m=0, d2d=0, dt="", verbose=0, ap=0, ln=CFG_SITE_LANG, ec=None, tab="", wl=0):
"""Perform search or browse request, without checking for
authentication. Return list of recIDs found, if of=id.
Otherwise create web page.
The arguments are as follows:
req - mod_python Request class instance.
cc - current collection (e.g. "ATLAS"). The collection the
user started to search/browse from.
c - collection list (e.g. ["Theses", "Books"]). The
collections user may have selected/deselected when
starting to search from 'cc'.
p - pattern to search for (e.g. "ellis and muon or kaon").
f - field to search within (e.g. "author").
rg - records in groups of (e.g. "10"). Defines how many hits
per collection in the search results page are
displayed.
sf - sort field (e.g. "title").
so - sort order ("a"=ascending, "d"=descending).
sp - sort pattern (e.g. "CERN-") -- in case there are more
values in a sort field, this argument tells which one
to prefer
rm - ranking method (e.g. "jif"). Defines whether results
should be ranked by some known ranking method.
of - output format (e.g. "hb"). Usually starting "h" means
HTML output (and "hb" for HTML brief, "hd" for HTML
detailed), "x" means XML output, "t" means plain text
output, "id" means no output at all but to return list
of recIDs found. (Suitable for high-level API.)
ot - output only these MARC tags (e.g. "100,700,909C0b").
Useful if only some fields are to be shown in the
output, e.g. for library to control some fields.
aas - advanced search ("0" means no, "1" means yes). Whether
search was called from within the advanced search
interface.
p1 - first pattern to search for in the advanced search
interface. Much like 'p'.
f1 - first field to search within in the advanced search
interface. Much like 'f'.
m1 - first matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op1 - first operator, to join the first and the second unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p2 - second pattern to search for in the advanced search
interface. Much like 'p'.
f2 - second field to search within in the advanced search
interface. Much like 'f'.
m2 - second matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
op2 - second operator, to join the second and the third unit
in the advanced search interface. ("a" add, "o" or,
"n" not).
p3 - third pattern to search for in the advanced search
interface. Much like 'p'.
f3 - third field to search within in the advanced search
interface. Much like 'f'.
m3 - third matching type in the advanced search interface.
("a" all of the words, "o" any of the words, "e" exact
phrase, "p" partial phrase, "r" regular expression).
sc - split by collection ("0" no, "1" yes). Governs whether
we want to present the results in a single huge list,
or splitted by collection.
jrec - jump to record (e.g. "234"). Used for navigation
inside the search results.
recid - display record ID (e.g. "20000"). Do not
search/browse but go straight away to the Detailed
record page for the given recID.
recidb - display record ID bis (e.g. "20010"). If greater than
'recid', then display records from recid to recidb.
Useful for example for dumping records from the
database for reformatting.
sysno - display old system SYS number (e.g. ""). If you
migrate to Invenio from another system, and store your
old SYS call numbers, you can use them instead of recid
if you wish so.
id - the same as recid, in case recid is not set. For
backwards compatibility.
idb - the same as recid, in case recidb is not set. For
backwards compatibility.
sysnb - the same as sysno, in case sysno is not set. For
backwards compatibility.
action - action to do. "SEARCH" for searching, "Browse" for
browsing. Default is to search.
d1 - first datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-08-23 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd1' takes precedence over d1y, d1m,
d1d if these are defined.
d1y - first date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d1m - first date's month (e.g. "08"). Useful for search
limits on creation/modification date.
d1d - first date's day (e.g. "23"). Useful for search
limits on creation/modification date.
d2 - second datetime in full YYYY-mm-dd HH:MM:DD format
(e.g. "1998-09-02 12:34:56"). Useful for search limits
on creation/modification date (see 'dt' argument
below). Note that 'd2' takes precedence over d2y, d2m,
d2d if these are defined.
d2y - second date's year (e.g. "1998"). Useful for search
limits on creation/modification date.
d2m - second date's month (e.g. "09"). Useful for search
limits on creation/modification date.
d2d - second date's day (e.g. "02"). Useful for search
limits on creation/modification date.
dt - first and second date's type (e.g. "c"). Specifies
whether to search in creation dates ("c") or in
modification dates ("m"). When dt is not set and d1*
and d2* are set, the default is "c".
verbose - verbose level (0=min, 9=max). Useful to print some
internal information on the searching process in case
something goes wrong.
ap - alternative patterns (0=no, 1=yes). In case no exact
match is found, the search engine can try alternative
patterns e.g. to replace non-alphanumeric characters by
a boolean query. ap defines if this is wanted.
ln - language of the search interface (e.g. "en"). Useful
for internationalization.
ec - list of external search engines to search as well
(e.g. "SPIRES HEP").
wl - wildcard limit (ex: 100) the wildcard queries will be
limited at 100 results
"""
selected_external_collections_infos = None
# wash output format:
of = wash_output_format(of)
# raise an exception when trying to print out html from the cli
if of.startswith("h"):
assert req
# for every search engine request asking for an HTML output, we
# first regenerate cache of collection and field I18N names if
# needed; so that later we won't bother checking timestamps for
# I18N names at all:
if of.startswith("h"):
collection_i18nname_cache.recreate_cache_if_needed()
field_i18nname_cache.recreate_cache_if_needed()
# wash all arguments requiring special care
try:
(cc, colls_to_display, colls_to_search, hosted_colls, wash_colls_debug) = wash_colls(cc, c, sc, verbose) # which colls to search and to display?
except InvenioWebSearchUnknownCollectionError, exc:
colname = exc.colname
if of.startswith("h"):
page_start(req, of, cc, aas, ln, getUid(req),
websearch_templates.tmpl_collection_not_found_page_title(colname, ln))
req.write(websearch_templates.tmpl_collection_not_found_page_body(colname, ln))
return page_end(req, of, ln)
elif of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
else:
return page_end(req, of, ln)
p = wash_pattern(p)
f = wash_field(f)
p1 = wash_pattern(p1)
f1 = wash_field(f1)
p2 = wash_pattern(p2)
f2 = wash_field(f2)
p3 = wash_pattern(p3)
f3 = wash_field(f3)
datetext1, datetext2 = wash_dates(d1, d1y, d1m, d1d, d2, d2y, d2m, d2d)
# wash ranking method:
if not is_method_valid(None, rm):
rm = ""
_ = gettext_set_language(ln)
# backwards compatibility: id, idb, sysnb -> recid, recidb, sysno (if applicable)
if sysnb != "" and sysno == "":
sysno = sysnb
if id > 0 and recid == -1:
recid = id
if idb > 0 and recidb == -1:
recidb = idb
# TODO deduce passed search limiting criterias (if applicable)
pl, pl_in_url = "", "" # no limits by default
if action != "browse" and req and not isinstance(req, cStringIO.OutputType) \
and req.args: # we do not want to add options while browsing or while calling via command-line
fieldargs = cgi.parse_qs(req.args)
for fieldcode in get_fieldcodes():
if fieldargs.has_key(fieldcode):
for val in fieldargs[fieldcode]:
pl += "+%s:\"%s\" " % (fieldcode, val)
pl_in_url += "&%s=%s" % (urllib.quote(fieldcode), urllib.quote(val))
# deduce recid from sysno argument (if applicable):
if sysno: # ALEPH SYS number was passed, so deduce DB recID for the record:
recid = get_mysql_recid_from_aleph_sysno(sysno)
if recid is None:
recid = 0 # use recid 0 to indicate that this sysno does not exist
# deduce collection we are in (if applicable):
if recid > 0:
referer = None
if req:
referer = req.headers_in.get('Referer')
cc = guess_collection_of_a_record(recid, referer)
# deduce user id (if applicable):
try:
uid = getUid(req)
except:
uid = 0
## 0 - start output
if recid >= 0: # recid can be 0 if deduced from sysno and if such sysno does not exist
## 1 - detailed record display
title, description, keywords = \
websearch_templates.tmpl_record_page_header_content(req, recid, ln)
if req is not None and not req.header_only:
page_start(req, of, cc, aas, ln, uid, title, description, keywords, recid, tab)
# Default format is hb but we are in detailed -> change 'of'
if of == "hb":
of = "hd"
if record_exists(recid):
if recidb <= recid: # sanity check
recidb = recid + 1
if of == "id":
return [recidx for recidx in range(recid, recidb) if record_exists(recidx)]
else:
print_records(req, range(recid, recidb), -1, -9999, of, ot, ln, search_pattern=p, verbose=verbose, tab=tab, sf=sf, so=so, sp=sp, rm=rm)
if req and of.startswith("h"): # register detailed record page view event
client_ip_address = str(req.remote_ip)
register_page_view_event(recid, uid, client_ip_address)
else: # record does not exist
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
else:
print_warning(req, _("Requested record does not seem to exist."))
elif action == "browse":
## 2 - browse needed
of = 'hb'
page_start(req, of, cc, aas, ln, uid, _("Browse"), p=create_page_title_search_pattern_info(p, p1, p2, p3))
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
try:
if aas == 1 or (p1 or p2 or p3):
browse_pattern(req, colls_to_search, p1, f1, rg, ln)
browse_pattern(req, colls_to_search, p2, f2, rg, ln)
browse_pattern(req, colls_to_search, p3, f3, rg, ln)
else:
browse_pattern(req, colls_to_search, p, f, rg, ln)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
elif rm and p.startswith("recid:"):
## 3-ter - similarity search (or old-style citation search) needed
if req and not req.header_only:
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3))
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
if record_exists(p[6:]) != 1:
# record does not exist
if of.startswith("h"):
if req.header_only:
raise apache.SERVER_RETURN, apache.HTTP_NOT_FOUND
else:
print_warning(req, _("Requested record does not seem to exist."))
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find similar ones to it
t1 = os.times()[4]
results_similar_recIDs, results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, results_similar_comments = \
rank_records_bibrank(rm, 0, get_collection_reclist(cc), string.split(p), verbose)
if results_similar_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, cc, len(results_similar_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
print_warning(req, results_similar_comments)
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
elif of=="id":
return results_similar_recIDs
elif of.startswith("x"):
print_records(req, results_similar_recIDs, jrec, rg, of, ot, ln,
results_similar_relevances, results_similar_relevances_prologue, results_similar_relevances_epilogue, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
else:
# rank_records failed and returned some error message to display:
if of.startswith("h"):
print_warning(req, results_similar_relevances_prologue)
print_warning(req, results_similar_relevances_epilogue)
print_warning(req, results_similar_comments)
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
elif p.startswith("cocitedwith:"): #WAS EXPERIMENTAL
## 3-terter - cited by search needed
page_start(req, of, cc, aas, ln, uid, _("Search Results"), p=create_page_title_search_pattern_info(p, p1, p2, p3))
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
recID = p[12:]
if record_exists(recID) != 1:
# record does not exist
if of.startswith("h"):
print_warning(req, _("Requested record does not seem to exist."))
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# record well exists, so find co-cited ones:
t1 = os.times()[4]
results_cocited_recIDs = map(lambda x: x[0], calculate_co_cited_with_list(int(recID)))
if results_cocited_recIDs:
t2 = os.times()[4]
cpu_time = t2 - t1
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, CFG_SITE_NAME, len(results_cocited_recIDs),
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
elif of=="id":
return results_cocited_recIDs
elif of.startswith("x"):
print_records(req, results_cocited_recIDs, jrec, rg, of, ot, ln, search_pattern=p, verbose=verbose, sf=sf, so=so, sp=sp, rm=rm)
else:
# cited rank_records failed and returned some error message to display:
if of.startswith("h"):
print_warning(req, "nothing found")
if of == "id":
return []
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
## 3 - common search needed
query_in_cache = False
query_representation_in_cache = repr((p,f,colls_to_search, wl))
page_start(req, of, cc, aas, ln, uid, p=create_page_title_search_pattern_info(p, p1, p2, p3))
if of.startswith("h") and verbose and wash_colls_debug:
print_warning(req, "wash_colls debugging info : %s" % wash_colls_debug)
# search into the hosted collections only if the output format is html or xml
if hosted_colls and (of.startswith("h") or of.startswith("x")) and not p.startswith("recid:"):
# hosted_colls_results : the hosted collections' searches that did not timeout
# hosted_colls_timeouts : the hosted collections' searches that timed out and will be searched later on again
(hosted_colls_results, hosted_colls_timeouts) = calculate_hosted_collections_results(req, [p, p1, p2, p3], f, hosted_colls, verbose, ln, CFG_HOSTED_COLLECTION_TIMEOUT_ANTE_SEARCH)
# successful searches
if hosted_colls_results:
hosted_colls_true_results = []
for result in hosted_colls_results:
# if the number of results is None or 0 (or False) then just do nothing
if result[1] == None or result[1] == False:
# these are the searches the returned no or zero results
if verbose:
print_warning(req, "Hosted collections (perform_search_request): %s returned no results" % result[0][1].name)
else:
# these are the searches that actually returned results on time
hosted_colls_true_results.append(result)
if verbose:
print_warning(req, "Hosted collections (perform_search_request): %s returned %s results in %s seconds" % (result[0][1].name, result[1], result[2]))
else:
if verbose:
print_warning(req, "Hosted collections (perform_search_request): there were no hosted collections results to be printed at this time")
if hosted_colls_timeouts:
if verbose:
for timeout in hosted_colls_timeouts:
print_warning(req, "Hosted collections (perform_search_request): %s timed out and will be searched again later" % timeout[0][1].name)
# we need to know for later use if there were any hosted collections to be searched even if they weren't in the end
elif hosted_colls and ((not (of.startswith("h") or of.startswith("x"))) or p.startswith("recid:")):
(hosted_colls_results, hosted_colls_timeouts) = (None, None)
else:
if verbose:
print_warning(req, "Hosted collections (perform_search_request): there were no hosted collections to be searched")
## let's define some useful boolean variables:
# True means there are actual or potential hosted collections results to be printed
hosted_colls_actual_or_potential_results_p = not (not hosted_colls or not ((hosted_colls_results and hosted_colls_true_results) or hosted_colls_timeouts))
# True means there are hosted collections timeouts to take care of later
# (useful for more accurate printing of results later)
hosted_colls_potential_results_p = not (not hosted_colls or not hosted_colls_timeouts)
# True means we only have hosted collections to deal with
only_hosted_colls_actual_or_potential_results_p = not colls_to_search and hosted_colls_actual_or_potential_results_p
if of.startswith("h"):
req.write(create_search_box(cc, colls_to_display, p, f, rg, sf, so, sp, rm, of, ot, aas, ln, p1, f1, m1, op1,
p2, f2, m2, op2, p3, f3, m3, sc, pl, d1y, d1m, d1d, d2y, d2m, d2d, dt, jrec, ec, action))
t1 = os.times()[4]
results_in_any_collection = intbitset()
if aas == 1 or (p1 or p2 or p3):
## 3A - advanced search
try:
results_in_any_collection = search_pattern_parenthesised(req, p1, f1, m1, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
if len(results_in_any_collection) == 0:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
if p2:
results_tmp = search_pattern_parenthesised(req, p2, f2, m2, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
if op1 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op1 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op1 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
print_warning(req, "Invalid set operation %s." % cgi.escape(op1), "Error")
if len(results_in_any_collection) == 0:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
if p3:
results_tmp = search_pattern_parenthesised(req, p3, f3, m3, ap=ap, of=of, verbose=verbose, ln=ln, wl=wl)
if op2 == "a": # add
results_in_any_collection.intersection_update(results_tmp)
elif op2 == "o": # or
results_in_any_collection.union_update(results_tmp)
elif op2 == "n": # not
results_in_any_collection.difference_update(results_tmp)
else:
if of.startswith("h"):
print_warning(req, "Invalid set operation %s." % cgi.escape(op2), "Error")
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
else:
## 3B - simple search
if search_results_cache.cache.has_key(query_representation_in_cache):
# query is not in the cache already, so reuse it:
query_in_cache = True
results_in_any_collection = search_results_cache.cache[query_representation_in_cache]
if verbose and of.startswith("h"):
print_warning(req, "Search stage 0: query found in cache, reusing cached results.")
else:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there are results only in the hosted collections. Also added the if clause to avoid
# searching in case we know we only have actual or potential hosted collections results
if not only_hosted_colls_actual_or_potential_results_p:
results_in_any_collection = search_pattern_parenthesised(req, p, f, ap=ap, of=of, verbose=verbose, ln=ln, display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p, wl=wl)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if len(results_in_any_collection) == 0 and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
# store this search query results into search results cache if needed:
if CFG_WEBSEARCH_SEARCH_CACHE_SIZE and not query_in_cache:
if len(search_results_cache.cache) > CFG_WEBSEARCH_SEARCH_CACHE_SIZE:
search_results_cache.clear()
search_results_cache.cache[query_representation_in_cache] = results_in_any_collection
if verbose and of.startswith("h"):
print_warning(req, "Search stage 3: storing query results in cache.")
# search stage 4: intersection with collection universe:
try:
# added the display_nearest_terms_box parameter to avoid printing out the "Nearest terms in any collection"
# recommendations when there results only in the hosted collections. Also added the if clause to avoid
# searching in case we know since the last stage that we have no results in any collection
if len(results_in_any_collection) != 0:
results_final = intersect_results_with_collrecs(req, results_in_any_collection, colls_to_search, ap, of, verbose, ln, display_nearest_terms_box=not hosted_colls_actual_or_potential_results_p)
else:
results_final = {}
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
# search stage 5: apply search option limits and restrictions:
if datetext1 != "" and results_final != {}:
if verbose and of.startswith("h"):
print_warning(req, "Search stage 5: applying time etc limits, from %s until %s..." % (datetext1, datetext2))
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_unit_in_bibrec(datetext1, datetext2, dt),
ap,
aptext= _("No match within your time limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
#if of.startswith("x"):
# # Print empty, but valid XML
# print_records_prologue(req, of)
# print_records_epilogue(req, of)
return page_end(req, of, ln)
if pl and results_final != {}:
pl = wash_pattern(pl)
if verbose and of.startswith("h"):
print_warning(req, "Search stage 5: applying search pattern limit %s..." % cgi.escape(pl))
try:
results_final = intersect_results_with_hitset(req,
results_final,
search_pattern_parenthesised(req, pl, ap=0, ln=ln, wl=wl),
ap,
aptext=_("No match within your search limits, "
"discarding this condition..."),
of=of)
except:
register_exception(req=req, alert_admin=True)
if of.startswith("h"):
req.write(create_error_box(req, verbose=verbose, ln=ln))
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
if results_final == {} and not hosted_colls_actual_or_potential_results_p:
if of.startswith("h"):
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
if of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
return page_end(req, of, ln)
t2 = os.times()[4]
cpu_time = t2 - t1
## search stage 6: display results:
results_final_nb_total = 0
results_final_nb = {} # will hold number of records found in each collection
# (in simple dict to display overview more easily)
for coll in results_final.keys():
results_final_nb[coll] = len(results_final[coll])
#results_final_nb_total += results_final_nb[coll]
# Now let us calculate results_final_nb_total more precisely,
# in order to get the total number of "distinct" hits across
# searched collections; this is useful because a record might
# have been attributed to more than one primary collection; so
# we have to avoid counting it multiple times. The price to
# pay for this accuracy of results_final_nb_total is somewhat
# increased CPU time.
if results_final.keys() == 1:
# only one collection; no need to union them
results_final_for_all_selected_colls = results_final.values()[0]
results_final_nb_total = results_final_nb.values()[0]
else:
# okay, some work ahead to union hits across collections:
results_final_for_all_selected_colls = intbitset()
for coll in results_final.keys():
results_final_for_all_selected_colls.union_update(results_final[coll])
results_final_nb_total = len(results_final_for_all_selected_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
for result in hosted_colls_true_results:
colls_to_search.append(result[0][1].name)
results_final_nb[result[0][1].name] = result[1]
results_final_nb_total += result[1]
cpu_time += result[2]
if hosted_colls_timeouts:
for timeout in hosted_colls_timeouts:
colls_to_search.append(timeout[1].name)
# use -963 as a special number to identify the collections that timed out
results_final_nb[timeout[1].name] = -963
# we continue past this point only if there is a hosted collection that has timed out and might offer potential results
if results_final_nb_total ==0 and not hosted_colls_potential_results_p:
if of.startswith("h"):
print_warning(req, "No match found, please enter different search terms.")
elif of.startswith("x"):
# Print empty, but valid XML
print_records_prologue(req, of)
print_records_epilogue(req, of)
else:
# yes, some hits found: good!
# collection list may have changed due to not-exact-match-found policy so check it out:
for coll in results_final.keys():
if coll not in colls_to_search:
colls_to_search.append(coll)
# print results overview:
if of == "id":
# we have been asked to return list of recIDs
recIDs = list(results_final_for_all_selected_colls)
if rm: # do we have to rank?
results_final_for_all_colls_rank_records_output = rank_records(req, rm, 0, results_final_for_all_selected_colls,
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec)
if results_final_for_all_colls_rank_records_output[0]:
recIDs = results_final_for_all_colls_rank_records_output[0]
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
recIDs = sort_records(req, recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
return recIDs
elif of.startswith("h"):
if of not in ['hcs']:
# added the hosted_colls_potential_results_p parameter to help print out the overview more accurately
req.write(print_results_overview(colls_to_search, results_final_nb_total, results_final_nb, cpu_time, ln, ec, hosted_colls_potential_results_p=hosted_colls_potential_results_p))
selected_external_collections_infos = print_external_results_overview(req, cc, [p, p1, p2, p3], f, ec, verbose, ln)
# print number of hits found for XML outputs:
if of.startswith("x"):
req.write("<!-- Search-Engine-Total-Number-Of-Results: %s -->\n" % results_final_nb_total)
# print records:
if of in ['hcs']:
# feed the current search to be summarized:
from invenio.search_engine_summarizer import summarize_records
search_p = p
search_f = f
if not p and (aas == 1 or p1 or p2 or p3):
op_d = {'n': ' and not ', 'a': ' and ', 'o': ' or ', '': ''}
triples = ziplist([f1, f2, f3], [p1, p2, p3], [op1, op2, ''])
triples_len = len(triples)
for i in range(triples_len):
fi, pi, oi = triples[i] # e.g.:
if i < triples_len-1 and not triples[i+1][1]: # if p2 empty
triples[i+1][0] = '' # f2 must be too
oi = '' # and o1
if ' ' in pi:
pi = '"'+pi+'"'
if fi:
fi = fi + ':'
search_p += fi + pi + op_d[oi]
search_f = ''
summarize_records(results_final_for_all_selected_colls, 'hcs', ln, search_p, search_f, req)
else:
if len(colls_to_search)>1:
cpu_time = -1 # we do not want to have search time printed on each collection
print_records_prologue(req, of, cc=cc)
results_final_colls = []
wlqh_results_overlimit = 0
for coll in colls_to_search:
if results_final.has_key(coll) and len(results_final[coll]):
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
results_final_recIDs = list(results_final[coll])
results_final_relevances = []
results_final_relevances_prologue = ""
results_final_relevances_epilogue = ""
if rm: # do we have to rank?
results_final_recIDs_ranked, results_final_relevances, results_final_relevances_prologue, results_final_relevances_epilogue, results_final_comments = \
rank_records(req, rm, 0, results_final[coll],
string.split(p) + string.split(p1) +
string.split(p2) + string.split(p3), verbose, so, of, ln, rg, jrec)
if of.startswith("h"):
print_warning(req, results_final_comments)
if results_final_recIDs_ranked:
results_final_recIDs = results_final_recIDs_ranked
else:
# rank_records failed and returned some error message to display:
print_warning(req, results_final_relevances_prologue)
print_warning(req, results_final_relevances_epilogue)
elif sf or (CFG_BIBSORT_BUCKETS and sorting_methods): # do we have to sort?
results_final_recIDs = sort_records(req, results_final_recIDs, sf, so, sp, verbose, of, ln, rg, jrec)
if len(results_final_recIDs) < CFG_WEBSEARCH_PREV_NEXT_HIT_LIMIT:
results_final_colls.append(results_final_recIDs)
else:
wlqh_results_overlimit = 1
print_records(req, results_final_recIDs, jrec, rg, of, ot, ln,
results_final_relevances,
results_final_relevances_prologue,
results_final_relevances_epilogue,
search_pattern=p,
print_records_prologue_p=False,
print_records_epilogue_p=False,
verbose=verbose,
sf=sf,
so=so,
sp=sp,
rm=rm)
if of.startswith("h"):
req.write(print_search_info(p, f, sf, so, sp, rm, of, ot, coll, results_final_nb[coll],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if req and not isinstance(req, cStringIO.OutputType):
# store the last search results page
session_param_set(req, 'websearch-last-query', req.unparsed_uri)
if wlqh_results_overlimit:
results_final_colls = None
# store list of results if user wants to display hits
# in a single list, or store list of collections of records
# if user displays hits split by collections:
session_param_set(req, 'websearch-last-query-hits', results_final_colls)
#if hosted_colls and (of.startswith("h") or of.startswith("x")):
if hosted_colls_actual_or_potential_results_p:
if hosted_colls_results:
# TODO: add a verbose message here
for result in hosted_colls_true_results:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, results_final_nb[result[0][1].name],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts:
# TODO: add a verbose message here
# TODO: check if verbose messages still work when dealing with (re)calculations of timeouts
(hosted_colls_timeouts_results, hosted_colls_timeouts_timeouts) = do_calculate_hosted_collections_results(req, ln, None, verbose, None, hosted_colls_timeouts, CFG_HOSTED_COLLECTION_TIMEOUT_POST_SEARCH)
if hosted_colls_timeouts_results:
for result in hosted_colls_timeouts_results:
if result[1] == None or result[1] == False:
## these are the searches the returned no or zero results
## also print a nearest terms box, in case this is the only
## collection being searched and it returns no results?
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, no_records_found=True, limit=rg))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
else:
# these are the searches that actually returned results on time
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=result[0], ln=ln, of=of, req=req, limit=rg))
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, result[0][1].name, result[1],
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
if hosted_colls_timeouts_timeouts:
for timeout in hosted_colls_timeouts_timeouts:
if of.startswith("h"):
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time))
req.write(print_hosted_results(url_and_engine=timeout[0], ln=ln, of=of, req=req, search_timed_out=True, limit=rg))
req.write(print_hosted_search_info(p, f, sf, so, sp, rm, of, ot, timeout[1].name, -963,
jrec, rg, aas, ln, p1, p2, p3, f1, f2, f3, m1, m2, m3, op1, op2,
sc, pl_in_url,
d1y, d1m, d1d, d2y, d2m, d2d, dt, cpu_time, 1))
print_records_epilogue(req, of)
if f == "author" and of.startswith("h"):
req.write(create_similarly_named_authors_link_box(p, ln))
# log query:
try:
id_query = log_query(req.remote_host, req.args, uid)
if of.startswith("h") and id_query:
if not of in ['hcs']:
# display alert/RSS teaser for non-summary formats:
user_info = collect_user_info(req)
display_email_alert_part = True
if user_info:
if user_info['email'] == 'guest':
if CFG_ACCESS_CONTROL_LEVEL_ACCOUNTS > 4:
display_email_alert_part = False
else:
if not user_info['precached_usealerts']:
display_email_alert_part = False
req.write(websearch_templates.tmpl_alert_rss_teaser_box_for_query(id_query, \
ln=ln, display_email_alert_part=display_email_alert_part))
except:
# do not log query if req is None (used by CLI interface)
pass
log_query_info("ss", p, f, colls_to_search, results_final_nb_total)
# External searches
if of.startswith("h"):
if not of in ['hcs']:
perform_external_collection_search(req, cc, [p, p1, p2, p3], f, ec, verbose, ln, selected_external_collections_infos)
return page_end(req, of, ln)
def perform_request_cache(req, action="show"):
"""Manipulates the search engine cache."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
out = ""
out += "<h1>Search Cache</h1>"
# clear cache if requested:
if action == "clear":
search_results_cache.clear()
req.write(out)
# show collection reclist cache:
out = "<h3>Collection reclist cache</h3>"
out += "- collection table last updated: %s" % get_table_update_time('collection')
out += "<br />- reclist cache timestamp: %s" % collection_reclist_cache.timestamp
out += "<br />- reclist cache contents:"
out += "<blockquote>"
for coll in collection_reclist_cache.cache.keys():
if collection_reclist_cache.cache[coll]:
out += "%s (%d)<br />" % (coll, len(collection_reclist_cache.cache[coll]))
out += "</blockquote>"
req.write(out)
# show search results cache:
out = "<h3>Search Cache</h3>"
out += "- search cache usage: %d queries cached (max. ~%d)" % \
(len(search_results_cache.cache), CFG_WEBSEARCH_SEARCH_CACHE_SIZE)
if len(search_results_cache.cache):
out += "<br />- search cache contents:"
out += "<blockquote>"
for query, hitset in search_results_cache.cache.items():
out += "<br />%s ... %s" % (query, hitset)
out += """<p><a href="%s/search/cache?action=clear">clear search results cache</a>""" % CFG_SITE_URL
out += "</blockquote>"
req.write(out)
# show field i18nname cache:
out = "<h3>Field I18N names cache</h3>"
out += "- fieldname table last updated: %s" % get_table_update_time('fieldname')
out += "<br />- i18nname cache timestamp: %s" % field_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for field in field_i18nname_cache.cache.keys():
for ln in field_i18nname_cache.cache[field].keys():
out += "%s, %s = %s<br />" % (field, ln, field_i18nname_cache.cache[field][ln])
out += "</blockquote>"
req.write(out)
# show collection i18nname cache:
out = "<h3>Collection I18N names cache</h3>"
out += "- collectionname table last updated: %s" % get_table_update_time('collectionname')
out += "<br />- i18nname cache timestamp: %s" % collection_i18nname_cache.timestamp
out += "<br />- i18nname cache contents:"
out += "<blockquote>"
for coll in collection_i18nname_cache.cache.keys():
for ln in collection_i18nname_cache.cache[coll].keys():
out += "%s, %s = %s<br />" % (coll, ln, collection_i18nname_cache.cache[coll][ln])
out += "</blockquote>"
req.write(out)
req.write("</html>")
return "\n"
def perform_request_log(req, date=""):
"""Display search log information for given date."""
req.content_type = "text/html"
req.send_http_header()
req.write("<html>")
req.write("<h1>Search Log</h1>")
if date: # case A: display stats for a day
yyyymmdd = string.atoi(date)
req.write("<p><big><strong>Date: %d</strong></big><p>" % yyyymmdd)
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td><td><strong>%s</strong></td></tr>" % ("No.", "Time", "Pattern", "Field", "Collection", "Number of Hits"))
# read file:
p = os.popen("grep ^%d %s/search.log" % (yyyymmdd, CFG_LOGDIR), 'r')
lines = p.readlines()
p.close()
# process lines:
i = 0
for line in lines:
try:
datetime, dummy_aas, p, f, c, nbhits = string.split(line,"#")
i += 1
req.write("<tr><td align=\"right\">#%d</td><td>%s:%s:%s</td><td>%s</td><td>%s</td><td>%s</td><td>%s</td></tr>" \
% (i, datetime[8:10], datetime[10:12], datetime[12:], p, f, c, nbhits))
except:
pass # ignore eventual wrong log lines
req.write("</table>")
else: # case B: display summary stats per day
yyyymm01 = int(time.strftime("%Y%m01", time.localtime()))
yyyymmdd = int(time.strftime("%Y%m%d", time.localtime()))
req.write("""<table border="1">""")
req.write("<tr><td><strong>%s</strong></td><td><strong>%s</strong></tr>" % ("Day", "Number of Queries"))
for day in range(yyyymm01, yyyymmdd + 1):
p = os.popen("grep -c ^%d %s/search.log" % (day, CFG_LOGDIR), 'r')
for line in p.readlines():
req.write("""<tr><td>%s</td><td align="right"><a href="%s/search/log?date=%d">%s</a></td></tr>""" % \
(day, CFG_SITE_URL, day, line))
p.close()
req.write("</table>")
req.write("</html>")
return "\n"
def get_all_field_values(tag):
"""
Return all existing values stored for a given tag.
@param tag: the full tag, e.g. 909C0b
@type tag: string
@return: the list of values
@rtype: list of strings
"""
table = 'bib%2dx' % int(tag[:2])
return [row[0] for row in run_sql("SELECT DISTINCT(value) FROM %s WHERE tag=%%s" % table, (tag, ))]
def get_most_popular_field_values(recids, tags, exclude_values=None, count_repetitive_values=True):
"""
Analyze RECIDS and look for TAGS and return most popular values
and the frequency with which they occur sorted according to
descending frequency.
If a value is found in EXCLUDE_VALUES, then do not count it.
If COUNT_REPETITIVE_VALUES is True, then we count every occurrence
of value in the tags. If False, then we count the value only once
regardless of the number of times it may appear in a record.
(But, if the same value occurs in another record, we count it, of
course.)
Example:
>>> get_most_popular_field_values(range(11,20), '980__a')
(('PREPRINT', 10), ('THESIS', 7), ...)
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'))
(('Ellis, J', 10), ('Ellis, N', 7), ...)
>>> get_most_popular_field_values(range(11,20), ('100__a', '700__a'), ('Ellis, J'))
(('Ellis, N', 7), ...)
"""
def _get_most_popular_field_values_helper_sorter(val1, val2):
"Compare VAL1 and VAL2 according to, firstly, frequency, then secondly, alphabetically."
compared_via_frequencies = cmp(valuefreqdict[val2], valuefreqdict[val1])
if compared_via_frequencies == 0:
return cmp(val1.lower(), val2.lower())
else:
return compared_via_frequencies
valuefreqdict = {}
## sanity check:
if not exclude_values:
exclude_values = []
if isinstance(tags, str):
tags = (tags,)
## find values to count:
vals_to_count = []
displaytmp = {}
if count_repetitive_values:
# counting technique A: can look up many records at once: (very fast)
for tag in tags:
vals_to_count.extend(get_fieldvalues(recids, tag, sort=False))
else:
# counting technique B: must count record-by-record: (slow)
for recid in recids:
vals_in_rec = []
for tag in tags:
for val in get_fieldvalues(recid, tag, False):
vals_in_rec.append(val)
# do not count repetitive values within this record
# (even across various tags, so need to unify again):
dtmp = {}
for val in vals_in_rec:
dtmp[val.lower()] = 1
displaytmp[val.lower()] = val
vals_in_rec = dtmp.keys()
vals_to_count.extend(vals_in_rec)
## are we to exclude some of found values?
for val in vals_to_count:
if val not in exclude_values:
if valuefreqdict.has_key(val):
valuefreqdict[val] += 1
else:
valuefreqdict[val] = 1
## sort by descending frequency of values:
out = ()
vals = valuefreqdict.keys()
vals.sort(_get_most_popular_field_values_helper_sorter)
for val in vals:
tmpdisplv = ''
if displaytmp.has_key(val):
tmpdisplv = displaytmp[val]
else:
tmpdisplv = val
out += (tmpdisplv, valuefreqdict[val]),
return out
def profile(p="", f="", c=CFG_SITE_NAME):
"""Profile search time."""
import profile
import pstats
profile.run("perform_request_search(p='%s',f='%s', c='%s')" % (p, f, c), "perform_request_search_profile")
p = pstats.Stats("perform_request_search_profile")
p.strip_dirs().sort_stats("cumulative").print_stats()
return 0
| jrbl/invenio | modules/websearch/lib/search_engine.py | Python | gpl-2.0 | 270,395 |
//@ sourceMappingURL=payment_method_nonce.map
// Generated by CoffeeScript 1.6.1
var AttributeSetter, PaymentMethodNonce,
__hasProp = {}.hasOwnProperty,
__extends = function(child, parent) { for (var key in parent) { if (__hasProp.call(parent, key)) child[key] = parent[key]; } function ctor() { this.constructor = child; } ctor.prototype = parent.prototype; child.prototype = new ctor(); child.__super__ = parent.prototype; return child; };
AttributeSetter = require('./attribute_setter').AttributeSetter;
PaymentMethodNonce = (function(_super) {
__extends(PaymentMethodNonce, _super);
function PaymentMethodNonce() {
return PaymentMethodNonce.__super__.constructor.apply(this, arguments);
}
return PaymentMethodNonce;
})(AttributeSetter);
exports.PaymentMethodNonce = PaymentMethodNonce;
| jeffprestes/node-braintree-demo | node_modules/braintree/lib/braintree/payment_method_nonce.js | JavaScript | gpl-2.0 | 813 |
import hashlib
import matplotlib
# Force matplotlib to not use any Xwindows backend.
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.colors as colors
import matplotlib.cm as cm
from matplotlib.patches import Rectangle
import os
import shutil
import tempfile
from sar_parser import SarParser
# If the there are more than 50 plots in a graph we move the legend to the
# bottom
LEGEND_THRESHOLD = 50
def ascii_date(d):
return "%s" % (d.strftime("%Y-%m-%d %H:%M"))
class SarGrapher(object):
def __init__(self, filenames, starttime=None, endtime=None):
"""Initializes the class, creates a SarParser class
given a list of files and also parsers the files"""
# Temporary dir where images are stored (one per graph)
# NB: This is done to keep the memory usage constant
# in spite of being a bit slower (before this change
# we could use > 12GB RAM for a simple sar file -
# matplotlib is simply inefficient in this area)
self._tempdir = tempfile.mkdtemp(prefix='sargrapher')
self.sar_parser = SarParser(filenames, starttime, endtime)
self.sar_parser.parse()
duplicate_timestamps = self.sar_parser._duplicate_timestamps
if duplicate_timestamps:
print("There are {0} lines with duplicate timestamps. First 10"
"line numbers at {1}".format(
len(duplicate_timestamps.keys()),
sorted(list(duplicate_timestamps.keys()))[:10]))
def _graph_filename(self, graph, extension='.png'):
"""Creates a unique constant file name given a graph or graph list"""
if isinstance(graph, list):
temp = "_".join(graph)
else:
temp = graph
temp = temp.replace('%', '_')
temp = temp.replace('/', '_')
digest = hashlib.sha1()
digest.update(temp.encode('utf-8'))
fname = os.path.join(self._tempdir, digest.hexdigest() + extension)
return fname
def datasets(self):
"""Returns a list of all the available datasets"""
return self.sar_parser.available_data_types()
def timestamps(self):
"""Returns a list of all the available datasets"""
return sorted(self.sar_parser.available_timestamps())
def plot_datasets(self, data, fname, extra_labels, showreboots=False,
output='pdf'):
""" Plot timeseries data (of type dataname). The data can be either
simple (one or no datapoint at any point in time, or indexed (by
indextype). dataname is assumed to be in the form of [title, [label1,
label2, ...], [data1, data2, ...]] extra_labels is a list of tuples
[(datetime, 'label'), ...] """
sar_parser = self.sar_parser
title = data[0][0]
unit = data[0][1]
axis_labels = data[0][2]
datanames = data[1]
if not isinstance(datanames, list):
raise Exception("plottimeseries expects a list of datanames: %s" %
data)
fig = plt.figure(figsize=(10.5, 6.5))
axes = fig.add_subplot(111)
axes.set_title('{0} time series'.format(title), fontsize=12)
axes.set_xlabel('Time')
axes.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M'))
# Twenty minutes. Could probably make it a parameter
axes.xaxis.set_minor_locator(mdates.MinuteLocator(interval=20))
fig.autofmt_xdate()
ylabel = title
if unit:
ylabel += " - " + unit
axes.set_ylabel(ylabel)
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
axes.yaxis.set_major_formatter(y_formatter)
axes.yaxis.get_major_formatter().set_scientific(False)
color_norm = colors.Normalize(vmin=0, vmax=len(datanames) - 1)
scalar_map = cm.ScalarMappable(norm=color_norm,
cmap=plt.get_cmap('Set1'))
timestamps = self.timestamps()
counter = 0
for i in datanames:
try:
dataset = [sar_parser._data[d][i] for d in timestamps]
except:
print("Key {0} does not exist in this graph".format(i))
raise
axes.plot(timestamps, dataset, 'o:', label=axis_labels[counter],
color=scalar_map.to_rgba(counter))
counter += 1
# Draw extra_labels
if extra_labels:
for extra in extra_labels:
axes.annotate(extra[1], xy=(mdates.date2num(extra[0]),
sar_parser.find_max(extra[0], datanames)),
xycoords='data', xytext=(30, 30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->",
connectionstyle="arc3,rad=.2"))
# If we have a sosreport draw the reboots
if showreboots and sar_parser.sosreport is not None and \
sar_parser.sosreport.reboots is not None:
reboots = sar_parser.sosreport.reboots
for reboot in reboots.keys():
reboot_date = reboots[reboot]['date']
rboot_x = mdates.date2num(reboot_date)
(xmin, xmax) = plt.xlim()
(ymin, ymax) = plt.ylim()
if rboot_x < xmin or rboot_x > xmax:
continue
axes.annotate('', xy=(mdates.date2num(reboot_date), ymin),
xycoords='data', xytext=(-30, -30),
textcoords='offset points',
arrowprops=dict(arrowstyle="->", color='blue',
connectionstyle="arc3,rad=-0.1"))
# Show any data collection gaps in the graph
gaps = sar_parser.find_data_gaps()
if len(gaps) > 0:
for i in gaps:
(g1, g2) = i
x1 = mdates.date2num(g1)
x2 = mdates.date2num(g2)
(ymin, ymax) = plt.ylim()
axes.add_patch(Rectangle((x1, ymin), x2 - x1,
ymax - ymin, facecolor="lightgrey"))
# Add a grid to the graph to ease visualization
axes.grid(True)
lgd = None
# Draw the legend only when needed
if len(datanames) > 1 or \
(len(datanames) == 1 and len(datanames[0].split('#')) > 1):
# We want the legends box roughly square shaped
# and not take up too much room
props = matplotlib.font_manager.FontProperties(size='xx-small')
if len(datanames) < LEGEND_THRESHOLD:
cols = int((len(datanames) ** 0.5))
lgd = axes.legend(loc=1, ncol=cols, shadow=True, prop=props)
else:
cols = int(len(datanames) ** 0.6)
lgd = axes.legend(loc=9, ncol=cols,
bbox_to_anchor=(0.5, -0.29),
shadow=True, prop=props)
if len(datanames) == 0:
return None
try:
if lgd:
plt.savefig(fname, bbox_extra_artists=(lgd,),
bbox_inches='tight')
else:
plt.savefig(fname, bbox_inches='tight')
except:
import traceback
print(traceback.format_exc())
import sys
sys.exit(-1)
plt.cla()
plt.clf()
plt.close('all')
def plot_svg(self, graphs, output, labels):
"""Given a list of graphs, output an svg file per graph.
Input is a list of strings. A graph with multiple datasets
is a string with datasets separated by comma"""
if output == 'out.pdf':
output = 'graph'
counter = 1
fnames = []
for i in graphs:
subgraphs = i.split(',')
fname = self._graph_filename(subgraphs, '.svg')
fnames.append(fname)
self.plot_datasets((['', None, subgraphs], subgraphs), fname,
labels)
dest = os.path.join(os.getcwd(), "{0}{1}.svg".format(
output, counter))
shutil.move(fname, dest)
print("Created: {0}".format(dest))
counter += 1
# removes all temporary files and directories
self.close()
def plot_ascii(self, graphs, def_columns=80, def_rows=25):
"""Displays a single graph in ASCII form on the terminal"""
import subprocess
sar_parser = self.sar_parser
timestamps = self.timestamps()
try:
rows, columns = os.popen('stty size', 'r').read().split()
except:
columns = def_columns
rows = def_rows
if columns > def_columns:
columns = def_columns
for graph in graphs:
try:
gnuplot = subprocess.Popen(["/usr/bin/gnuplot"],
stdin=subprocess.PIPE)
except Exception as e:
raise("Error launching gnuplot: {0}".format(e))
gnuplot.stdin.write("set term dumb {0} {1}\n".format(
columns, rows))
gnuplot.stdin.write("set xdata time\n")
gnuplot.stdin.write('set xlabel "Time"\n')
gnuplot.stdin.write('set timefmt \"%Y-%m-%d %H:%M\"\n')
gnuplot.stdin.write('set xrange [\"%s\":\"%s\"]\n' %
(ascii_date(timestamps[0]),
ascii_date(timestamps[-1])))
gnuplot.stdin.write('set ylabel "%s"\n' % (graph))
gnuplot.stdin.write('set datafile separator ","\n')
gnuplot.stdin.write('set autoscale y\n')
gnuplot.stdin.write('set title "%s - %s"\n' %
(graph, " ".join(sar_parser._files)))
# FIXME: do it through a method
try:
dataset = [sar_parser._data[d][graph] for d in timestamps]
except KeyError:
print("Key '{0}' could not be found")
return
txt = "plot '-' using 1:2 title '{0}' with linespoints \n".format(
graph)
gnuplot.stdin.write(txt)
for i, j in zip(timestamps, dataset):
s = '\"%s\",%f\n' % (ascii_date(i), j)
gnuplot.stdin.write(s)
gnuplot.stdin.write("e\n")
gnuplot.stdin.write("exit\n")
gnuplot.stdin.flush()
def export_csv(self):
return
def close(self):
"""Removes temporary directory and files"""
if os.path.isdir(self._tempdir):
shutil.rmtree(self._tempdir)
| mbaldessari/sarstats | sar_grapher.py | Python | gpl-2.0 | 10,858 |
// { dg-do compile { target c++11 } }
// 2008-09-16 Chris Fairles <chris.fairles@gmail.com>
// Copyright (C) 2008-2016 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library. This library is free
// software; you can redistribute it and/or modify it under the
// terms of the GNU General Public License as published by the
// Free Software Foundation; either version 3, or (at your option)
// any later version.
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
// You should have received a copy of the GNU General Public License along
// with this library; see the file COPYING3. If not see
// <http://www.gnu.org/licenses/>.
#include <algorithm>
#include <functional>
#include <testsuite_character.h>
namespace std
{
using __gnu_test::pod_int;
typedef pod_int value_type;
typedef value_type* iterator_type;
typedef std::less<value_type> compare_type;
template value_type max(initializer_list<value_type>);
template value_type max(initializer_list<value_type>, compare_type);
}
| paranoiacblack/gcc | libstdc++-v3/testsuite/25_algorithms/max/requirements/explicit_instantiation/pod2.cc | C++ | gpl-2.0 | 1,233 |
import { Component, OnInit } from '@angular/core';
@Component({
selector: 'app-report-home',
templateUrl: './report-home.component.html',
styleUrls: ['./report-home.component.css']
})
export class ReportHomeComponent implements OnInit {
constructor() { }
ngOnInit() {
}
}
| ijgomez/workspace-test | template-dashboard/src/app/views/reports/report-home/report-home.component.ts | TypeScript | gpl-2.0 | 288 |
/**
* Copyright (c) 2000-2012 Liferay, Inc. All rights reserved.
*
* This library is free software; you can redistribute it and/or modify it under
* the terms of the GNU Lesser General Public License as published by the Free
* Software Foundation; either version 2.1 of the License, or (at your option)
* any later version.
*
* This library is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
* FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
* details.
*/
package com.iucn.whp.dbservice.model.impl;
import com.iucn.whp.dbservice.model.assessing_threats_potential;
import com.liferay.portal.kernel.util.StringBundler;
import com.liferay.portal.kernel.util.StringPool;
import com.liferay.portal.model.CacheModel;
import java.io.Serializable;
/**
* The cache model class for representing assessing_threats_potential in entity cache.
*
* @author alok.sen
* @see assessing_threats_potential
* @generated
*/
public class assessing_threats_potentialCacheModel implements CacheModel<assessing_threats_potential>,
Serializable {
@Override
public String toString() {
StringBundler sb = new StringBundler(17);
sb.append("{potential_threat_id=");
sb.append(potential_threat_id);
sb.append(", assessment_version_id=");
sb.append(assessment_version_id);
sb.append(", potential_threat=");
sb.append(potential_threat);
sb.append(", justification=");
sb.append(justification);
sb.append(", threadExtentValue=");
sb.append(threadExtentValue);
sb.append(", threat_rating=");
sb.append(threat_rating);
sb.append(", inside_site=");
sb.append(inside_site);
sb.append(", outside_site=");
sb.append(outside_site);
sb.append("}");
return sb.toString();
}
public assessing_threats_potential toEntityModel() {
assessing_threats_potentialImpl assessing_threats_potentialImpl = new assessing_threats_potentialImpl();
assessing_threats_potentialImpl.setPotential_threat_id(potential_threat_id);
assessing_threats_potentialImpl.setAssessment_version_id(assessment_version_id);
if (potential_threat == null) {
assessing_threats_potentialImpl.setPotential_threat(StringPool.BLANK);
}
else {
assessing_threats_potentialImpl.setPotential_threat(potential_threat);
}
if (justification == null) {
assessing_threats_potentialImpl.setJustification(StringPool.BLANK);
}
else {
assessing_threats_potentialImpl.setJustification(justification);
}
if (threadExtentValue == null) {
assessing_threats_potentialImpl.setThreadExtentValue(StringPool.BLANK);
}
else {
assessing_threats_potentialImpl.setThreadExtentValue(threadExtentValue);
}
assessing_threats_potentialImpl.setThreat_rating(threat_rating);
assessing_threats_potentialImpl.setInside_site(inside_site);
assessing_threats_potentialImpl.setOutside_site(outside_site);
assessing_threats_potentialImpl.resetOriginalValues();
return assessing_threats_potentialImpl;
}
public long potential_threat_id;
public long assessment_version_id;
public String potential_threat;
public String justification;
public String threadExtentValue;
public long threat_rating;
public boolean inside_site;
public boolean outside_site;
} | iucn-whp/world-heritage-outlook | portlets/iucn-dbservice-portlet/docroot/WEB-INF/src/com/iucn/whp/dbservice/model/impl/assessing_threats_potentialCacheModel.java | Java | gpl-2.0 | 3,291 |
<?php
gatekeeper();
// get inputs
$q = get_input("translation_editor_search");
$language = get_input("language", "en");
$found = translation_editor_search_translation($q, $language);
$trans = get_installed_translations();
if(!array_key_exists($language, $trans)){
forward("translation_editor");
}
// build page elements
$title_text = elgg_echo("translation_editor:search");
$title = elgg_view_title($title_text);
elgg_push_breadcrumb(elgg_echo("translation_editor:menu:title"), "translation_editor");
elgg_push_breadcrumb(elgg_echo($language), "translation_editor/" . $language);
elgg_push_breadcrumb($title_text);
$body .= elgg_view("translation_editor/search", array("current_language" => $language, "query" => $q));
$body .= elgg_view("translation_editor/search_results", array("results" => $found, "current_language" => $language));
// Build page
$page_data = elgg_view_layout('one_column', array(
'content' => "<div class='elgg-head'>" . $title . "</div>" . $body
));
echo elgg_view_page($title_text, $page_data);
| ferpic87/nexu | mod/translation_editor/pages/search.php | PHP | gpl-2.0 | 1,091 |
<?php
if ( !defined ( 'DIR_CORE' )) {
header ( 'Location: static_pages/' );
}
class ModelExtensionDefaultStripe extends Model {
public function getMethod($address) {
$this->load->language('default_stripe/default_stripe');
if ($this->config->get('default_stripe_status')) {
$query = $this->db->query("SELECT * FROM `" . $this->db->table("zones_to_locations") . "` WHERE location_id = '" . (int)$this->config->get('default_stripe_location_id') . "' AND country_id = '" . (int)$address['country_id'] . "' AND (zone_id = '" . (int)$address['zone_id'] . "' OR zone_id = '0')");
if (!$this->config->get('default_stripe_location_id')) {
$status = TRUE;
} elseif ($query->num_rows) {
$status = TRUE;
} else {
$status = FALSE;
}
} else {
$status = FALSE;
}
$payment_data = array();
if ($status) {
$payment_data = array(
'id' => 'default_stripe',
'title' => $this->language->get('text_title'),
'sort_order' => $this->config->get('default_stripe_sort_order')
);
}
return $payment_data;
}
public function processPayment($pd, $customer_stripe_id = '') {
$response = '';
$this->load->model('checkout/order');
$this->load->language('default_stripe/default_stripe');
$order_info = $this->model_checkout_order->getOrder($pd['order_id']);
try {
require_once(DIR_EXT . 'default_stripe/core/stripe_modules.php');
grantStripeAccess($this->config);
//build charge data array
$charge_data = array();
$charge_data['amount'] = $pd['amount'];
$charge_data['currency'] = $pd['currency'];
$charge_data['description'] = $this->config->get('store_name').' Order #'.$pd['order_id'];
$charge_data['statement_descriptor'] = 'Order #'.$pd['order_id'];
$charge_data['receipt_email'] = $order_info['email'];
if ($this->config->get('default_stripe_settlement') == 'delayed') {
$charge_data['capture'] = false;
} else {
$charge_data['capture'] = true;
}
//build cc details
$cc_details = array(
'number' => $pd['cc_number'],
'exp_month' => $pd['cc_expire_month'],
'exp_year' => $pd['cc_expire_year'],
'cvc' => $pd['cc_cvv2'],
'name' => $pd['cc_owner']
);
$cc_details = array_merge($cc_details, array(
'address_line1' => $order_info['payment_address_1'],
'address_line2' => $order_info['payment_address_2'],
'address_city' => $order_info['payment_city'],
'address_zip' => $order_info['payment_postcode'],
'address_state' => $order_info['payment_zone'],
'address_country' => $order_info['payment_iso_code_2'],
));
//we need get the token for the card first
$token = array();
$token = Stripe_Token::create( array( 'card' => $cc_details ) );
if(!$token || !$token['id']){
$msg = new AMessage();
$msg->saveError(
'Stripe failed to get card token for order_id ' . $pd['order_id'],
'Unable to use card for customer' . $customer_stripe_id
);
$response['error'] = $this->language->get('error_system');
return $response;
}
$charge_data['card'] = $token['id'];
if ($order_info['shipping_method']) {
$charge_data['shipping'] = array(
'name' => $order_info['firstname'] . ' ' . $order_info['lastname'],
'phone' => $order_info['telephone'],
'address' => array(
'line1' => $order_info['shipping_address_1'],
'line2' => $order_info['shipping_address_2'],
'city' => $order_info['shipping_city'],
'postal_code' => $order_info['shipping_postcode'],
'state' => $order_info['shipping_zone'],
'country' => $order_info['shipping_iso_code_2'],
)
);
}
$charge_data['metadata'] = array();
$charge_data['metadata']['order_id'] = $pd['order_id'];
if ($this->customer->getId() > 0) {
$charge_data['metadata']['customer_id'] = (int)$this->customer->getId();
}
ADebug::variable('Processing stripe payment request: ', $charge_data);
$response = Stripe_Charge::create( $charge_data );
} catch(Stripe_CardError $e) {
// card errors
$body = $e->getJsonBody();
$response['error'] = $body['error']['message'];
$response['code'] = $body['error']['code'];
return $response;
} catch (Stripe_InvalidRequestError $e) {
// Invalid parameters were supplied to Stripe's API
$body = $e->getJsonBody();
$msg = new AMessage();
$msg->saveError(
'Stripe payment failed with invalid parameters!',
'Stripe payment failed. ' . $body['error']['message']
);
$response['error'] = $this->language->get('error_system');
return $response;
} catch (Stripe_AuthenticationError $e) {
// Authentication with Stripe's API failed
$body = $e->getJsonBody();
$msg = new AMessage();
$msg->saveError(
'Stripe payment failed to authenticate!',
'Stripe payment failed to authenticate to the server. ' . $body['error']['message']
);
$response['error'] = $this->language->get('error_system');
return $response;
} catch (Stripe_ApiConnectionError $e) {
// Network communication with Stripe failed
$body = $e->getJsonBody();
$msg = new AMessage();
$msg->saveError(
'Stripe payment connection has failed!',
'Stripe payment failed connecting to the server. ' . $body['error']['message']
);
$response['error'] = $this->language->get('error_system');
return $response;
} catch (Stripe_Error $e) {
// Display a very generic error to the user, and maybe send
$body = $e->getJsonBody();
$msg = new AMessage();
$msg->saveError(
'Stripe payment has failed!',
'Stripe processing failed. ' .$body['error']['message']
);
$response['error'] = $this->language->get('error_system');
return $response;
} catch (Exception $e) {
// Something else happened, completely unrelated to Stripe
$msg = new AMessage();
$msg->saveError(
'Unexpected error in stripe payment!',
'Stripe processing failed. ' . $e->getMessage() . "(".$e->getCode().")"
);
$response['error'] = $this->language->get('error_system');
//log in AException
$ae = new AException($e->getCode(), $e->getMessage(), $e->getFile(), $e->getLine());
ac_exception_handler($ae);
return $response;
}
//we still have no result. something unexpected happend
if (empty($response)) {
$response['error'] = $this->language->get('error_system');
return $response;
}
ADebug::variable('Processing stripe payment response: ', $response);
//Do we have an error? exit with no records
if ($response['failure_message'] || $response['failure_code']) {
$response['error'] = $response['failure_message'];
$response['code'] = $response['failure_code'];
return $response;
}
$message .= 'Order id: ' . (string)$pd['order_id'] . "\n";
$message .= 'Charge id: ' . (string)$response['id'] . "\n";
$message .= 'Transaction Timestamp: ' . (string)date('m/d/Y H:i:s', $response['created']);
if ($response['paid']) {
//finalize order only if payment is a success
$this->model_checkout_order->addHistory(
$pd['order_id'],
$this->config->get('config_order_status_id'),
$message
);
if ($this->config->get('default_stripe_settlement') == 'auto') {
//auto complete the order in sattled mode
$this->model_checkout_order->confirm(
$pd['order_id'],
$this->config->get('default_stripe_status_success_settled')
);
} else {
//complete the order in unsattled mode
$this->model_checkout_order->confirm(
$pd['order_id'],
$this->config->get('default_stripe_status_success_unsettled')
);
}
} else {
// Some other error, assume payment declined
$this->model_checkout_order->addHistory(
$pd['order_id'],
$this->config->get('default_stripe_status_decline'),
$message
);
$response['error'] = "Payment has failed! ".$response['failure_message'];
$response['code'] = $response['failure_code'];
}
return $response;
}
//tested, but not used for now//
public function createToken($cc, $customer_stripe_id) {
if (!has_value($customer_stripe_id) || !has_value($cc)) {
return null;
}
$card_data = array(
'number' => $cc['cc_number'],
'exp_month' => $cc['cc_expire_month'],
'exp_year' => $cc['cc_expire_year'],
'cvc' => $cc['cc_cvv2'],
'name' => $cc['cc_owner'],
);
try {
require_once(DIR_EXT . 'default_stripe/core/stripe_modules.php');
grantStripeAccess($this->config);
$token = Stripe_Token::create(array(
"card" => $card_data,
));
return $token['id'];
} catch (Exception $e) {
//log in AException
$ae = new AException($e->getCode(), $e->getMessage(), $e->getFile(), $e->getLine());
ac_exception_handler($ae);
return null;
}
}
} | dssailo/stockpileshop | public_html/extensions/default_stripe/storefront/model/extension/default_stripe.php | PHP | gpl-2.0 | 8,853 |
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>gui</title>
<link rel="stylesheet" href="epydoc.css" type="text/css" />
<script type="text/javascript" src="epydoc.js"></script>
</head>
<body bgcolor="white" text="black" link="blue" vlink="#204080"
alink="#204080">
<h1 class="toc">Module gui</h1>
<hr />
<h2 class="toc">Classes</h2>
<a target="mainFrame" href="MiGBox.gui.ui.AppUi-class.html"
>AppUi</a><br /> <h2 class="toc">Variables</h2>
<div class="private">
<a target="mainFrame" href="MiGBox.gui-module.html#__package__"
>__package__</a><br /> </div>
<hr />
<span class="options">[<a href="javascript:void(0);" class="privatelink"
onclick="toggle_private();">hide private</a>]</span>
<script type="text/javascript">
<!--
// Private objects are initially displayed (because if
// javascript is turned off then we want them to be
// visible); but by default, we want to hide them. So hide
// them unless we have a cookie that says to show them.
checkCookie();
// -->
</script>
</body>
</html>
| bertl4398/MiGBox | doc/toc-MiGBox.gui-module.html | HTML | gpl-2.0 | 1,236 |
/***************************************************************************
qgsmeshmemorydataprovider.cpp
-----------------------------
begin : April 2018
copyright : (C) 2018 by Peter Petrik
email : zilolv at gmail dot com
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
///@cond PRIVATE
#include "qgsmeshmemorydataprovider.h"
#include "qgsmeshdataprovidertemporalcapabilities.h"
#include "qgsmeshlayerutils.h"
#include "qgstriangularmesh.h"
#include <cstring>
#define TEXT_PROVIDER_KEY QStringLiteral( "mesh_memory" )
#define TEXT_PROVIDER_DESCRIPTION QStringLiteral( "Mesh memory provider" )
bool QgsMeshMemoryDataProvider::isValid() const
{
return true;
}
QString QgsMeshMemoryDataProvider::name() const
{
return TEXT_PROVIDER_KEY;
}
QString QgsMeshMemoryDataProvider::description() const
{
return TEXT_PROVIDER_DESCRIPTION;
}
QgsCoordinateReferenceSystem QgsMeshMemoryDataProvider::crs() const
{
return QgsCoordinateReferenceSystem();
}
QgsMeshMemoryDataProvider::QgsMeshMemoryDataProvider( const QString &uri, const ProviderOptions &options )
: QgsMeshDataProvider( uri, options )
{
QString data( uri );
// see QgsMeshLayer::setDataProvider how mDataSource is created for memory layers
if ( uri.contains( "&uid=" ) )
{
data = uri.split( "&uid=" )[0];
}
mIsValid = splitMeshSections( data );
temporalCapabilities()->setTemporalUnit( QgsUnitTypes::TemporalHours );
}
QString QgsMeshMemoryDataProvider::providerKey()
{
return TEXT_PROVIDER_KEY;
}
QString QgsMeshMemoryDataProvider::providerDescription()
{
return TEXT_PROVIDER_DESCRIPTION;
}
QgsMeshMemoryDataProvider *QgsMeshMemoryDataProvider::createProvider( const QString &uri, const ProviderOptions &options )
{
return new QgsMeshMemoryDataProvider( uri, options );
}
bool QgsMeshMemoryDataProvider::splitMeshSections( const QString &uri )
{
const QStringList sections = uri.split( QStringLiteral( "---" ), QString::SkipEmptyParts );
if ( sections.size() != 2 )
{
setError( QgsError( tr( "Invalid mesh definition, does not contain 2 sections" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
if ( addMeshVertices( sections[0] ) )
return addMeshFacesOrEdges( sections[1] );
else
return false;
}
bool QgsMeshMemoryDataProvider::addMeshVertices( const QString &def )
{
QVector<QgsMeshVertex> vertices;
const QStringList verticesCoords = def.split( '\n', QString::SkipEmptyParts );
for ( int i = 0; i < verticesCoords.size(); ++i )
{
const QStringList coords = verticesCoords[i].split( ',', QString::SkipEmptyParts );
if ( coords.size() != 2 )
{
setError( QgsError( tr( "Invalid mesh definition, vertex definition does not contain x, y" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
double x = coords.at( 0 ).toDouble();
double y = coords.at( 1 ).toDouble();
QgsMeshVertex vertex( x, y );
vertices.push_back( vertex );
}
mVertices = vertices;
return true;
}
bool QgsMeshMemoryDataProvider::addMeshFacesOrEdges( const QString &def )
{
QVector<QgsMeshFace> faces;
QVector<QgsMeshEdge> edges;
const QStringList elements = def.split( '\n', QString::SkipEmptyParts );
for ( int i = 0; i < elements.size(); ++i )
{
const QStringList vertices = elements[i].split( ',', QString::SkipEmptyParts );
if ( vertices.size() < 2 )
{
setError( QgsError( tr( "Invalid mesh definition, edge must contain at least 2 vertices" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
else if ( vertices.size() == 2 )
{
QgsMeshEdge edge;
edge.first = vertices[0].toInt();
edge.second = vertices[1].toInt();
if ( !checkVertexId( edge.first ) ) return false;
if ( !checkVertexId( edge.second ) ) return false;
edges.push_back( edge );
}
else
{
QgsMeshFace face;
for ( int j = 0; j < vertices.size(); ++j )
{
int vertex_id = vertices[j].toInt();
if ( !checkVertexId( vertex_id ) ) return false;
face.push_back( vertex_id );
}
faces.push_back( face );
}
}
mFaces = faces;
mEdges = edges;
if ( mFaces.size() > 0 && mEdges.size() > 0 )
{
setError( QgsError( tr( "Invalid mesh definition, unable to read mesh with both edges and faces" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
return true;
}
bool QgsMeshMemoryDataProvider::splitDatasetSections( const QString &uri, QgsMeshMemoryDatasetGroup &datasetGroup )
{
const QStringList sections = uri.split( QStringLiteral( "---" ), QString::SkipEmptyParts );
bool success = sections.size() > 2;
if ( !success )
{
setError( QgsError( tr( "Invalid dataset definition, does not contain 3+ sections" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
}
if ( success )
success = setDatasetGroupType( sections[0], datasetGroup );
if ( success )
success = addDatasetGroupMetadata( sections[1], datasetGroup );
for ( int i = 2; i < sections.size(); ++i )
{
if ( !success )
break;
std::shared_ptr<QgsMeshMemoryDataset> dataset = std::make_shared<QgsMeshMemoryDataset>();
success = addDatasetValues( sections[i], dataset, datasetGroup.isScalar() );
if ( success )
success = checkDatasetValidity( dataset, datasetGroup.dataType() );
if ( success )
datasetGroup.memoryDatasets.push_back( dataset );
}
return success;
}
bool QgsMeshMemoryDataProvider::setDatasetGroupType( const QString &def, QgsMeshMemoryDatasetGroup &datasetGroup )
{
const QStringList types = def.split( ' ', QString::SkipEmptyParts );
if ( types.size() != 3 )
{
setError( QgsError( tr( "Invalid type definition, must be Vertex/Edge/Face Vector/Scalar Name" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
QgsMeshDatasetGroupMetadata::DataType type;
if ( 0 == QString::compare( types[0].trimmed(), QStringLiteral( "vertex" ), Qt::CaseInsensitive ) )
type = QgsMeshDatasetGroupMetadata::DataOnVertices;
else if ( 0 == QString::compare( types[0].trimmed(), QStringLiteral( "edge" ), Qt::CaseInsensitive ) )
type = QgsMeshDatasetGroupMetadata::DataOnEdges;
else
type = QgsMeshDatasetGroupMetadata::DataOnFaces;
datasetGroup.setDataType( type );
datasetGroup.setIsScalar( 0 == QString::compare( types[1].trimmed(), QStringLiteral( "scalar" ), Qt::CaseInsensitive ) );
datasetGroup.setName( types[2].trimmed() );
return true;
}
bool QgsMeshMemoryDataProvider::addDatasetGroupMetadata( const QString &def, QgsMeshMemoryDatasetGroup &datasetGroup )
{
const QStringList metadataLines = def.split( '\n', QString::SkipEmptyParts );
for ( int i = 0; i < metadataLines.size(); ++i )
{
const QStringList keyVal = metadataLines[i].split( ':', QString::SkipEmptyParts );
if ( keyVal.size() != 2 )
{
setError( QgsError( tr( "Invalid dataset definition, dataset metadata does not contain key: value" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
datasetGroup.addExtraMetadata( keyVal.at( 0 ).trimmed(), keyVal.at( 1 ).trimmed() );
}
return true;
}
bool QgsMeshMemoryDataProvider::addDatasetValues( const QString &def, std::shared_ptr<QgsMeshMemoryDataset> &dataset, bool isScalar )
{
const QStringList valuesLines = def.split( '\n', QString::SkipEmptyParts );
// first line is time
if ( valuesLines.size() < 2 )
{
setError( QgsError( tr( "Invalid dataset definition, must contain at least 1 line (time)" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
dataset->time = valuesLines[0].toDouble();
for ( int i = 1; i < valuesLines.size(); ++i )
{
const QStringList values = valuesLines[i].split( ',', QString::SkipEmptyParts );
QgsMeshDatasetValue point;
if ( isScalar )
{
if ( values.size() != 1 )
{
setError( QgsError( tr( "Invalid dataset definition, dataset scalar values must be x" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
else
{
point.setX( values[0].toDouble() );
}
}
else
{
if ( values.size() < 2 )
{
setError( QgsError( tr( "Invalid dataset definition, dataset vector values must be x, y" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
else
{
point.setX( values[0].toDouble() );
point.setY( values[1].toDouble() );
}
}
dataset->values.push_back( point );
}
return true;
}
bool QgsMeshMemoryDataProvider::checkDatasetValidity( std::shared_ptr<QgsMeshMemoryDataset> &dataset, QgsMeshDatasetGroupMetadata::DataType dataType )
{
bool valid = true;
if ( dataType == QgsMeshDatasetGroupMetadata::DataOnVertices )
{
if ( dataset->values.count() != vertexCount() )
{
valid = false;
setError( QgsError( tr( "Dataset defined on vertices has {} values, but mesh {}" ).arg( dataset->values.count(), vertexCount() ),
QStringLiteral( "Mesh Memory Provider" ) ) );
}
}
else if ( dataType == QgsMeshDatasetGroupMetadata::DataOnFaces )
{
// on faces
if ( dataset->values.count() != faceCount() )
{
valid = false;
setError( QgsError( tr( "Dataset defined on faces has {} values, but mesh {}" ).arg( dataset->values.count(), faceCount() ),
QStringLiteral( "Mesh Memory Provider" ) ) );
}
}
else if ( dataType == QgsMeshDatasetGroupMetadata::DataOnEdges )
{
// on edges
if ( dataset->values.count() != edgeCount() )
{
valid = false;
setError( QgsError( tr( "Dataset defined on edges has {} values, but mesh {}" ).arg( dataset->values.count(), edgeCount() ),
QStringLiteral( "Mesh Memory Provider" ) ) );
}
}
dataset->valid = valid;
return valid;
}
bool QgsMeshMemoryDataProvider::checkVertexId( int vertexIndex )
{
if ( vertexIndex < 0 )
{
setError( QgsError( tr( "Invalid mesh definition, vertex index must be positive value" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
if ( mVertices.size() <= vertexIndex )
{
setError( QgsError( tr( "Invalid mesh definition, missing vertex id defined in face" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
return false;
}
return true;
}
void QgsMeshMemoryDataProvider::addGroupToTemporalCapabilities( int groupIndex, const QgsMeshMemoryDatasetGroup &group )
{
QgsMeshDataProviderTemporalCapabilities *tempCap = temporalCapabilities();
if ( !tempCap )
return;
if ( group.datasetCount() > 1 ) //non temporal dataset groups (count=1) have no time in the capabilities
{
for ( int i = 0; i < group.memoryDatasets.count(); ++i )
if ( group.memoryDatasets.at( i ) )
tempCap->addDatasetTime( groupIndex, group.memoryDatasets.at( i )->time );
}
}
int QgsMeshMemoryDataProvider::vertexCount() const
{
return mVertices.size();
}
int QgsMeshMemoryDataProvider::faceCount() const
{
return mFaces.size();
}
int QgsMeshMemoryDataProvider::edgeCount() const
{
return mEdges.size();
}
void QgsMeshMemoryDataProvider::populateMesh( QgsMesh *mesh ) const
{
if ( mesh )
{
mesh->faces = mFaces;
mesh->vertices = mVertices;
mesh->edges = mEdges;
}
}
QgsRectangle QgsMeshMemoryDataProvider::extent() const
{
return calculateExtent( );
}
bool QgsMeshMemoryDataProvider::addDataset( const QString &uri )
{
QgsMeshMemoryDatasetGroup group;
bool valid = false;
if ( mIsValid )
{
valid = splitDatasetSections( uri, group );
}
else
{
setError( QgsError( tr( "Unable to add dataset group to invalid mesh" ),
QStringLiteral( "Mesh Memory Provider" ) ) );
}
group.calculateStatistic();
mDatasetGroups.push_back( group );
addGroupToTemporalCapabilities( mDatasetGroups.count() - 1, group );
if ( valid )
{
if ( !mExtraDatasetUris.contains( uri ) )
mExtraDatasetUris << uri;
temporalCapabilities()->setHasTemporalCapabilities( true );
emit datasetGroupsAdded( 1 );
emit dataChanged();
}
return valid;
}
QStringList QgsMeshMemoryDataProvider::extraDatasets() const
{
return mExtraDatasetUris;
}
int QgsMeshMemoryDataProvider::datasetGroupCount() const
{
return mDatasetGroups.count();
}
int QgsMeshMemoryDataProvider::datasetCount( int groupIndex ) const
{
if ( ( groupIndex >= 0 ) && ( groupIndex < datasetGroupCount() ) )
return mDatasetGroups[groupIndex].memoryDatasets.count();
else
return 0;
}
QgsMeshDatasetGroupMetadata QgsMeshMemoryDataProvider::datasetGroupMetadata( int groupIndex ) const
{
if ( ( groupIndex >= 0 ) && ( groupIndex < datasetGroupCount() ) )
{
return mDatasetGroups[groupIndex].groupMetadata();
}
else
{
return QgsMeshDatasetGroupMetadata();
}
}
QgsMeshDatasetMetadata QgsMeshMemoryDataProvider::datasetMetadata( QgsMeshDatasetIndex index ) const
{
if ( ( index.group() >= 0 ) && ( index.group() < datasetGroupCount() ) &&
( index.dataset() >= 0 ) && ( index.dataset() < datasetCount( index.group() ) )
)
{
const QgsMeshMemoryDatasetGroup &grp = mDatasetGroups.at( index.group() );
QgsMeshDatasetMetadata metadata(
grp.memoryDatasets[index.dataset()]->time,
grp.memoryDatasets[index.dataset()]->valid,
grp.memoryDatasets[index.dataset()]->minimum,
grp.memoryDatasets[index.dataset()]->maximum,
0
);
return metadata;
}
else
{
return QgsMeshDatasetMetadata();
}
}
QgsMeshDatasetValue QgsMeshMemoryDataProvider::datasetValue( QgsMeshDatasetIndex index, int valueIndex ) const
{
if ( ( index.group() >= 0 ) && ( index.group() < datasetGroupCount() ) &&
( index.dataset() >= 0 ) && ( index.dataset() < datasetCount( index.group() ) ) &&
( valueIndex >= 0 ) && ( valueIndex < mDatasetGroups[index.group()].memoryDatasets[index.dataset()]->values.count() ) )
{
return mDatasetGroups[index.group()].memoryDatasets[index.dataset()]->values[valueIndex];
}
else
{
return QgsMeshDatasetValue();
}
}
QgsMeshDataBlock QgsMeshMemoryDataProvider::datasetValues( QgsMeshDatasetIndex index, int valueIndex, int count ) const
{
if ( ( index.group() >= 0 ) && ( index.group() < datasetGroupCount() ) )
{
const QgsMeshMemoryDatasetGroup group = mDatasetGroups[index.group()];
bool isScalar = group.isScalar();
if ( ( index.dataset() >= 0 ) && ( index.dataset() < group.memoryDatasets.size() ) )
{
return group.memoryDatasets[index.dataset()]->datasetValues( isScalar, valueIndex, count );
}
else
{
return QgsMeshDataBlock();
}
}
else
{
return QgsMeshDataBlock();
}
}
QgsMesh3dDataBlock QgsMeshMemoryDataProvider::dataset3dValues( QgsMeshDatasetIndex, int, int ) const
{
// 3d stacked meshes are not supported by memory provider
return QgsMesh3dDataBlock();
}
bool QgsMeshMemoryDataProvider::isFaceActive( QgsMeshDatasetIndex index, int faceIndex ) const
{
if ( mDatasetGroups[index.group()].memoryDatasets[index.dataset()]->active.isEmpty() )
return true;
else
return mDatasetGroups[index.group()].memoryDatasets[index.dataset()]->active[faceIndex];
}
QgsMeshDataBlock QgsMeshMemoryDataProvider::areFacesActive( QgsMeshDatasetIndex index, int faceIndex, int count ) const
{
if ( ( index.group() >= 0 ) && ( index.group() < datasetGroupCount() ) )
{
const QgsMeshMemoryDatasetGroup group = mDatasetGroups[index.group()];
if ( ( index.dataset() >= 0 ) && ( index.dataset() < group.memoryDatasets.size() ) )
{
return group.memoryDatasets[index.dataset()]->areFacesActive( faceIndex, count );
}
else
{
return QgsMeshDataBlock();
}
}
else
{
return QgsMeshDataBlock();
}
}
bool QgsMeshMemoryDataProvider::persistDatasetGroup( const QString &outputFilePath,
const QString &outputDriver,
const QgsMeshDatasetGroupMetadata &meta,
const QVector<QgsMeshDataBlock> &datasetValues,
const QVector<QgsMeshDataBlock> &datasetActive,
const QVector<double> × )
{
Q_UNUSED( outputFilePath )
Q_UNUSED( outputDriver )
Q_UNUSED( meta )
Q_UNUSED( datasetValues )
Q_UNUSED( datasetActive )
Q_UNUSED( times )
return true; // not implemented/supported
}
bool QgsMeshMemoryDataProvider::persistDatasetGroup( const QString &outputFilePath,
const QString &outputDriver,
QgsMeshDatasetSourceInterface *source,
int datasetGroupIndex )
{
Q_UNUSED( outputFilePath )
Q_UNUSED( outputDriver )
Q_UNUSED( source )
Q_UNUSED( datasetGroupIndex )
return true; // not implemented/supported
}
QgsRectangle QgsMeshMemoryDataProvider::calculateExtent() const
{
QgsRectangle rec;
rec.setMinimal();
for ( const QgsMeshVertex &v : mVertices )
{
rec.setXMinimum( std::min( rec.xMinimum(), v.x() ) );
rec.setYMinimum( std::min( rec.yMinimum(), v.y() ) );
rec.setXMaximum( std::max( rec.xMaximum(), v.x() ) );
rec.setYMaximum( std::max( rec.yMaximum(), v.y() ) );
}
return rec;
}
///@endcond
| havatv/QGIS | src/core/providers/meshmemory/qgsmeshmemorydataprovider.cpp | C++ | gpl-2.0 | 18,123 |
<?php
/*
* This file is part of PHPExifTool.
*
* (c) 2012 Romain Neutron <imprec@gmail.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
namespace PHPExiftool\Driver\Tag\DICOM;
use PHPExiftool\Driver\AbstractTag;
class IonRangeCompensatorSequence extends AbstractTag
{
protected $Id = '300A,02EA';
protected $Name = 'IonRangeCompensatorSequence';
protected $FullName = 'DICOM::Main';
protected $GroupName = 'DICOM';
protected $g0 = 'DICOM';
protected $g1 = 'DICOM';
protected $g2 = 'Image';
protected $Type = '?';
protected $Writable = false;
protected $Description = 'Ion Range Compensator Sequence';
}
| Droces/casabio | vendor/phpexiftool/phpexiftool/lib/PHPExiftool/Driver/Tag/DICOM/IonRangeCompensatorSequence.php | PHP | gpl-2.0 | 751 |
#!/usr/bin/env python
##################################################
## DEPENDENCIES
import sys
import os
import os.path
try:
import builtins as builtin
except ImportError:
import __builtin__ as builtin
from os.path import getmtime, exists
import time
import types
from Cheetah.Version import MinCompatibleVersion as RequiredCheetahVersion
from Cheetah.Version import MinCompatibleVersionTuple as RequiredCheetahVersionTuple
from Cheetah.Template import Template
from Cheetah.DummyTransaction import *
from Cheetah.NameMapper import NotFound, valueForName, valueFromSearchList, valueFromFrameOrSearchList
from Cheetah.CacheRegion import CacheRegion
import Cheetah.Filters as Filters
import Cheetah.ErrorCatchers as ErrorCatchers
##################################################
## MODULE CONSTANTS
VFFSL=valueFromFrameOrSearchList
VFSL=valueFromSearchList
VFN=valueForName
currentTime=time.time
__CHEETAH_version__ = '2.4.4'
__CHEETAH_versionTuple__ = (2, 4, 4, 'development', 0)
__CHEETAH_genTime__ = 1406885498.501688
__CHEETAH_genTimestamp__ = 'Fri Aug 1 18:31:38 2014'
__CHEETAH_src__ = '/home/wslee2/models/5-wo/force1plus/openpli3.0/build-force1plus/tmp/work/mips32el-oe-linux/enigma2-plugin-extensions-openwebif-1+git5+3c0c4fbdb28d7153bf2140459b553b3d5cdd4149-r0/git/plugin/controllers/views/web/gettags.tmpl'
__CHEETAH_srcLastModified__ = 'Fri Aug 1 18:30:05 2014'
__CHEETAH_docstring__ = 'Autogenerated by Cheetah: The Python-Powered Template Engine'
if __CHEETAH_versionTuple__ < RequiredCheetahVersionTuple:
raise AssertionError(
'This template was compiled with Cheetah version'
' %s. Templates compiled before version %s must be recompiled.'%(
__CHEETAH_version__, RequiredCheetahVersion))
##################################################
## CLASSES
class gettags(Template):
##################################################
## CHEETAH GENERATED METHODS
def __init__(self, *args, **KWs):
super(gettags, self).__init__(*args, **KWs)
if not self._CHEETAH__instanceInitialized:
cheetahKWArgs = {}
allowedKWs = 'searchList namespaces filter filtersLib errorCatcher'.split()
for k,v in KWs.items():
if k in allowedKWs: cheetahKWArgs[k] = v
self._initCheetahInstance(**cheetahKWArgs)
def respond(self, trans=None):
## CHEETAH: main method generated for this template
if (not trans and not self._CHEETAH__isBuffering and not callable(self.transaction)):
trans = self.transaction # is None unless self.awake() was called
if not trans:
trans = DummyTransaction()
_dummyTrans = True
else: _dummyTrans = False
write = trans.response().write
SL = self._CHEETAH__searchList
_filter = self._CHEETAH__currentFilter
########################################
## START - generated method body
_orig_filter_91099948 = _filter
filterName = u'WebSafe'
if self._CHEETAH__filters.has_key("WebSafe"):
_filter = self._CHEETAH__currentFilter = self._CHEETAH__filters[filterName]
else:
_filter = self._CHEETAH__currentFilter = \
self._CHEETAH__filters[filterName] = getattr(self._CHEETAH__filtersLib, filterName)(self).filter
write(u'''<?xml version="1.0" encoding="UTF-8"?>
<e2tags>
''')
for tag in VFFSL(SL,"tags",True): # generated from line 4, col 2
write(u'''\t\t<e2tag>''')
_v = VFFSL(SL,"tag",True) # u'$tag' on line 5, col 10
if _v is not None: write(_filter(_v, rawExpr=u'$tag')) # from line 5, col 10.
write(u'''</e2tag>
''')
write(u'''</e2tags>
''')
_filter = self._CHEETAH__currentFilter = _orig_filter_91099948
########################################
## END - generated method body
return _dummyTrans and trans.response().getvalue() or ""
##################################################
## CHEETAH GENERATED ATTRIBUTES
_CHEETAH__instanceInitialized = False
_CHEETAH_version = __CHEETAH_version__
_CHEETAH_versionTuple = __CHEETAH_versionTuple__
_CHEETAH_genTime = __CHEETAH_genTime__
_CHEETAH_genTimestamp = __CHEETAH_genTimestamp__
_CHEETAH_src = __CHEETAH_src__
_CHEETAH_srcLastModified = __CHEETAH_srcLastModified__
_mainCheetahMethod_for_gettags= 'respond'
## END CLASS DEFINITION
if not hasattr(gettags, '_initCheetahAttributes'):
templateAPIClass = getattr(gettags, '_CHEETAH_templateClass', Template)
templateAPIClass._addCheetahPlumbingCodeToClass(gettags)
# CHEETAH was developed by Tavis Rudd and Mike Orr
# with code, advice and input from many other volunteers.
# For more information visit http://www.CheetahTemplate.org/
##################################################
## if run from command line:
if __name__ == '__main__':
from Cheetah.TemplateCmdLineIface import CmdLineIface
CmdLineIface(templateObj=gettags()).run()
| MOA-2011/enigma2-plugin-extensions-openwebif | plugin/controllers/views/web/gettags.py | Python | gpl-2.0 | 5,093 |
/**
* ${project-name} - a GNU/Linux console-based file manager
*
* Implementation of different actions related to iface
*
* Copyright 2008 Sergey I. Sharybin <g.ulairi@gmail.com>
* Copyright 2008 Alex A. Smirnov <sceptic13@gmail.com>
*
* This program can be distributed under the terms of the GNU GPL.
* See the file COPYING.
*/
#include "iface.h"
#include "messages.h"
#include "i18n.h"
#include "util.h"
/********
* User's backend
*/
/**
* Prompted exiting from program
*/
void
iface_act_exit (void)
{
if (message_box (L"fm", _(L"Are you sure you want to quit?"),
MB_YESNO | MB_DEFBUTTON_1) == MR_YES)
{
do_exit ();
}
}
| Nazg-Gul/fm | src/iface-act.c | C | gpl-2.0 | 674 |
package edu.stanford.nlp.hcoref;
import java.io.File;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Locale;
import java.util.Properties;
import java.util.Set;
import edu.stanford.nlp.hcoref.data.Dictionaries.MentionType;
import edu.stanford.nlp.hcoref.sieve.Sieve.ClassifierType;
import edu.stanford.nlp.util.Generics;
import edu.stanford.nlp.util.PropertiesUtils;
public class CorefProperties {
public enum CorefInputType { RAW, CONLL, ACE, MUC };
public enum MentionDetectionType { RULE, HYBRID, DEPENDENCY };
// general
public static final String LANG_PROP = "coref.language";
public static final String SIEVES_PROP = "coref.sieves";
public static final String ALLOW_REPARSING_PROP = "coref.allowReparsing";
public static final String SCORE_PROP = "coref.doScore";
public static final String PARSER_PROP = "coref.useConstituencyTree";
public static final String THREADS_PROP = "coref.threadCount";
public static final String INPUT_TYPE_PROP = "coref.input.type";
public static final String POSTPROCESSING_PROP = "coref.postprocessing";
public static final String MD_TYPE_PROP = "coref.md.type";
public static final String USE_SINGLETON_PREDICTOR_PROP = "coref.useSingletonPredictor";
public static final String SEED_PROP = "coref.seed";
public static final String CONLL_AUTO_PROP = "coref.conll.auto";
public static final String MD_TRAIN_PROP = "coref.mdTrain"; // train MD classifier
public static final String USE_SEMANTICS_PROP = "coref.useSemantics"; // load semantics if true
public static final String CURRENT_SIEVE_FOR_TRAIN_PROP = "coref.currentSieveForTrain";
public static final String STORE_TRAINDATA_PROP = "coref.storeTrainData";
public static final String USE_GOLD_NE_PROP = "coref.useGoldNE";
public static final String USE_GOLD_PARSES_PROP = "coref.useGoldParse";
public static final String USE_GOLD_POS_PROP = "coref.useGoldPOS";
private static final String REMOVE_NESTED = "removeNested";
private static final String ADD_MISSING_ANNOTATIONS = "coref.addMissingAnnotations";
// logging & system check & analysis
public static final String DEBUG_PROP = "coref.debug";
public static final String LOG_PROP = "coref.logFile";
public static final String TIMER_PROP = "coref.checkTime";
public static final String MEMORY_PROP = "coref.checkMemory";
public static final String PRINT_MDLOG_PROP = "coref.print.md.log";
public static final String CALCULATE_IMPORTANCE_PROP = "coref.calculateFeatureImportance";
public static final String DO_ANALYSIS_PROP = "coref.analysis.doAnalysis";
public static final String ANALYSIS_SKIP_MTYPE_PROP = "coref.analysis.skip.mType";
public static final String ANALYSIS_SKIP_ATYPE_PROP = "coref.analysis.skip.aType";
// data & io
public static final String STATES_PROP = "coref.states";
public static final String DEMONYM_PROP = "coref.demonym";
public static final String ANIMATE_PROP = "coref.animate";
public static final String INANIMATE_PROP = "coref.inanimate";
public static final String MALE_PROP = "coref.male";
public static final String NEUTRAL_PROP = "coref.neutral";
public static final String FEMALE_PROP = "coref.female";
public static final String PLURAL_PROP = "coref.plural";
public static final String SINGULAR_PROP = "coref.singular";
public static final String GENDER_NUMBER_PROP = "coref.big.gender.number";
public static final String COUNTRIES_PROP = "coref.countries";
public static final String STATES_PROVINCES_PROP = "coref.states.provinces";
public static final String DICT_LIST_PROP = "coref.dictlist";
public static final String DICT_PMI_PROP = "coref.dictpmi";
public static final String SIGNATURES_PROP = "coref.signatures";
public static final String LOAD_WORD_EMBEDDING_PROP = "coref.loadWordEmbedding";
public static final String WORD2VEC_PROP = "coref.path.word2vec";
public static final String WORD2VEC_SERIALIZED_PROP = "coref.path.word2vecSerialized";
public static final String PATH_SCORER_PROP = "coref.path.scorer.conll";
public static final String PATH_INPUT_PROP = "coref.path.input";
public static final String PATH_OUTPUT_PROP = "coref.path.output";
public static final String PATH_TRAIN_PROP = "coref.path.traindata";
public static final String PATH_EVAL_PROP = "coref.path.evaldata";
public static final String PATH_SERIALIZED_PROP = "coref.path.serialized";
// models
public static final String PATH_SINGLETON_PREDICTOR_PROP = "coref.path.singletonPredictor";
public static final String PATH_MODEL_PROP = "coref.SIEVENAME.model";
public static final String MENTION_DETECTION_MODEL_PROP = "coref.mentionDetectionModel";
// sieve option
public static final String CLASSIFIER_TYPE_PROP = "coref.SIEVENAME.classifierType";
public static final String NUM_TREE_PROP = "coref.SIEVENAME.numTrees";
public static final String NUM_FEATURES_PROP = "coref.SIEVENAME.numFeatures";
public static final String TREE_DEPTH_PROP = "coref.SIEVENAME.treeDepth";
public static final String MAX_SENT_DIST_PROP = "coref.SIEVENAME.maxSentDist";
public static final String MTYPE_PROP = "coref.SIEVENAME.mType";
public static final String ATYPE_PROP = "coref.SIEVENAME.aType";
public static final String DOWNSAMPLE_RATE_PROP = "coref.SIEVENAME.downsamplingRate";
public static final String THRES_FEATURECOUNT_PROP = "coref.SIEVENAME.thresFeatureCount";
public static final String FEATURE_SELECTION_PROP = "coref.SIEVENAME.featureSelection";
public static final String THRES_MERGE_PROP = "coref.SIEVENAME.merge.thres";
public static final String THRES_FEATURE_SELECTION_PROP = "coref.SIEVENAME.pmi.thres";
public static final String DEFAULT_PRONOUN_AGREEMENT_PROP = "coref.defaultPronounAgreement";
// features
public static final String USE_BASIC_FEATURES_PROP = "coref.SIEVENAME.useBasicFeatures";
public static final String COMBINE_OBJECTROLE_PROP = "coref.SIEVENAME.combineObjectRole";
public static final String USE_MD_FEATURES_PROP = "coref.SIEVENAME.useMentionDetectionFeatures";
public static final String USE_DCOREFRULE_FEATURES_PROP = "coref.SIEVENAME.useDcorefRuleFeatures";
public static final String USE_POS_FEATURES_PROP = "coref.SIEVENAME.usePOSFeatures";
public static final String USE_LEXICAL_FEATURES_PROP = "coref.SIEVENAME.useLexicalFeatures";
public static final String USE_WORD_EMBEDDING_FEATURES_PROP = "coref.SIEVENAME.useWordEmbeddingFeatures";
public static final Locale LANGUAGE_DEFAULT = Locale.ENGLISH;
public static final int MONITOR_DIST_CMD_FINISHED_WAIT_MILLIS = 60000;
/** if true, use truecase annotator */
public static final boolean USE_TRUECASE = false;
/** if true, remove appositives, predicate nominatives in post processing */
public static final boolean REMOVE_APPOSITION_PREDICATENOMINATIVES = true;
/** if true, remove singletons in post processing */
public static final boolean REMOVE_SINGLETONS = true;
// current list of dcoref sieves
private static final Set<String> dcorefSieveNames = new HashSet<>(Arrays.asList("MarkRole", "DiscourseMatch",
"ExactStringMatch", "RelaxedExactStringMatch", "PreciseConstructs", "StrictHeadMatch1",
"StrictHeadMatch2", "StrictHeadMatch3", "StrictHeadMatch4", "RelaxedHeadMatch", "PronounMatch", "SpeakerMatch",
"ChineseHeadMatch"));
public static boolean doScore(Properties props) {
return PropertiesUtils.getBool(props, SCORE_PROP, false);
}
public static boolean checkTime(Properties props) {
return PropertiesUtils.getBool(props, TIMER_PROP, false);
}
public static boolean checkMemory(Properties props) {
return PropertiesUtils.getBool(props, MEMORY_PROP, false);
}
public static boolean useConstituencyTree(Properties props) {
return PropertiesUtils.getBool(props, PARSER_PROP, false);
}
/** Input data for CorefDocMaker. It is traindata for training, or testdata for evaluation */
public static String getPathInput(Properties props) {
return PropertiesUtils.getString(props, PATH_INPUT_PROP, null);
}
public static String getPathOutput(Properties props) {
return PropertiesUtils.getString(props, PATH_OUTPUT_PROP, "/home/heeyoung/log-hcoref/conlloutput/");
}
public static String getPathTrainData(Properties props) {
return PropertiesUtils.getString(props, PATH_TRAIN_PROP, "/scr/nlp/data/conll-2012/v4/data/train/data/english/annotations/");
}
public static String getPathEvalData(Properties props) {
return PropertiesUtils.getString(props, PATH_EVAL_PROP, "/scr/nlp/data/conll-2012/v9/data/test/data/english/annotations");
}
public static int getThreadCounts(Properties props) {
return PropertiesUtils.getInt(props, THREADS_PROP, Runtime.getRuntime().availableProcessors());
}
public static String getPathScorer(Properties props) {
return PropertiesUtils.getString(props, PATH_SCORER_PROP, "/scr/nlp/data/conll-2012/scorer/v8.01/scorer.pl");
}
public static CorefInputType getInputType(Properties props) {
String inputType = PropertiesUtils.getString(props, INPUT_TYPE_PROP, "raw");
return CorefInputType.valueOf(inputType.toUpperCase());
}
public static Locale getLanguage(Properties props) {
String lang = PropertiesUtils.getString(props, LANG_PROP, "en");
if(lang.equalsIgnoreCase("en") || lang.equalsIgnoreCase("english")) return Locale.ENGLISH;
else if(lang.equalsIgnoreCase("zh") || lang.equalsIgnoreCase("chinese")) return Locale.CHINESE;
else throw new RuntimeException("unsupported language");
}
public static boolean printMDLog(Properties props) {
return PropertiesUtils.getBool(props, PRINT_MDLOG_PROP, false);
}
public static boolean doPostProcessing(Properties props) {
return PropertiesUtils.getBool(props, POSTPROCESSING_PROP, false);
}
/** if true, use conll auto files, else use conll gold files */
public static boolean useCoNLLAuto(Properties props) {
return PropertiesUtils.getBool(props, CONLL_AUTO_PROP, true);
}
public static MentionDetectionType getMDType(Properties props) {
String defaultMD;
if (getLanguage(props).equals(Locale.ENGLISH)) {
// defaultMD for English should be RULE since this is highest performing score for scoref
defaultMD = "RULE";
} else if (getLanguage(props).equals(Locale.CHINESE)) {
// defaultMD for Chinese should be RULE for now
defaultMD = "RULE";
} else {
// general default is "RULE" for now
defaultMD = "RULE";
}
String type = PropertiesUtils.getString(props, MD_TYPE_PROP, defaultMD);
if(type.equalsIgnoreCase("dep")) type = "DEPENDENCY";
return MentionDetectionType.valueOf(type.toUpperCase());
}
public static boolean useSingletonPredictor(Properties props) {
return PropertiesUtils.getBool(props, USE_SINGLETON_PREDICTOR_PROP, false);
}
public static String getPathSingletonPredictor(Properties props) {
return PropertiesUtils.getString(props, PATH_SINGLETON_PREDICTOR_PROP, "edu/stanford/nlp/models/dcoref/singleton.predictor.ser");
}
public static String getPathModel(Properties props, String sievename) {
return props.getProperty(PATH_SERIALIZED_PROP) + File.separator +
props.getProperty(PATH_MODEL_PROP.replace("SIEVENAME", sievename), "MISSING_MODEL_FOR_"+sievename);
}
public static boolean debug(Properties props) {
return PropertiesUtils.getBool(props, DEBUG_PROP, false);
}
public static ClassifierType getClassifierType(Properties props, String sievename) {
if(dcorefSieveNames.contains(sievename)) return ClassifierType.RULE;
if(sievename.toLowerCase().endsWith("-rf")) return ClassifierType.RF;
if(sievename.toLowerCase().endsWith("-oracle")) return ClassifierType.ORACLE;
String classifierType = PropertiesUtils.getString(props, CLASSIFIER_TYPE_PROP.replace("SIEVENAME", sievename), null);
return ClassifierType.valueOf(classifierType);
}
public static double getMergeThreshold(Properties props, String sievename) {
String key = THRES_MERGE_PROP.replace("SIEVENAME", sievename);
return PropertiesUtils.getDouble(props, key, 0.3);
}
public static void setMergeThreshold(Properties props, String sievename, double value) {
String key = THRES_MERGE_PROP.replace("SIEVENAME", sievename);
props.setProperty(key, String.valueOf(value));
}
public static int getNumTrees(Properties props, String sievename) {
return PropertiesUtils.getInt(props, NUM_TREE_PROP.replace("SIEVENAME", sievename), 100);
}
public static int getSeed(Properties props) {
return PropertiesUtils.getInt(props, SEED_PROP, 1);
}
public static int getNumFeatures(Properties props, String sievename) {
return PropertiesUtils.getInt(props, NUM_FEATURES_PROP.replace("SIEVENAME", sievename), 30);
}
public static int getTreeDepth(Properties props, String sievename) {
return PropertiesUtils.getInt(props, TREE_DEPTH_PROP.replace("SIEVENAME", sievename), 0);
}
public static boolean calculateFeatureImportance(Properties props) {
return PropertiesUtils.getBool(props, CALCULATE_IMPORTANCE_PROP, false);
}
public static int getMaxSentDistForSieve(Properties props, String sievename) {
return PropertiesUtils.getInt(props, MAX_SENT_DIST_PROP.replace("SIEVENAME", sievename), 1000);
}
public static String getMentionDetectionModel(Properties props) {
return PropertiesUtils.getString(props, MENTION_DETECTION_MODEL_PROP,
"edu/stanford/nlp/models/hcoref/md-model.ser");
}
public static Set<MentionType> getMentionType(Properties props, String sievename) {
return getMentionTypes(props, MTYPE_PROP.replace("SIEVENAME", sievename));
}
public static Set<MentionType> getAntecedentType(Properties props, String sievename) {
return getMentionTypes(props, ATYPE_PROP.replace("SIEVENAME", sievename));
}
private static Set<MentionType> getMentionTypes(Properties props, String propKey) {
if(!props.containsKey(propKey) || props.getProperty(propKey).equalsIgnoreCase("all")){
return new HashSet<>(Arrays.asList(MentionType.values()));
}
Set<MentionType> types = new HashSet<>();
for(String type : props.getProperty(propKey).trim().split(",\\s*")) {
if(type.toLowerCase().matches("i|you|we|they|it|she|he")) type = "PRONOMINAL";
types.add(MentionType.valueOf(type));
}
return types;
}
public static double getDownsamplingRate(Properties props, String sievename) {
return PropertiesUtils.getDouble(props, DOWNSAMPLE_RATE_PROP.replace("SIEVENAME", sievename), 1);
}
public static int getFeatureCountThreshold(Properties props, String sievename) {
return PropertiesUtils.getInt(props, THRES_FEATURECOUNT_PROP.replace("SIEVENAME", sievename), 20);
}
public static boolean useBasicFeatures(Properties props, String sievename) {
return PropertiesUtils.getBool(props, USE_BASIC_FEATURES_PROP.replace("SIEVENAME", sievename), true);
}
public static boolean combineObjectRoles(Properties props, String sievename) {
return PropertiesUtils.getBool(props, COMBINE_OBJECTROLE_PROP.replace("SIEVENAME", sievename), true);
}
public static boolean useMentionDetectionFeatures(Properties props, String sievename) {
return PropertiesUtils.getBool(props, USE_MD_FEATURES_PROP.replace("SIEVENAME", sievename), true);
}
public static boolean useDcorefRules(Properties props, String sievename) {
return PropertiesUtils.getBool(props, USE_DCOREFRULE_FEATURES_PROP.replace("SIEVENAME", sievename), true);
}
public static boolean usePOSFeatures(Properties props, String sievename) {
return PropertiesUtils.getBool(props, USE_POS_FEATURES_PROP.replace("SIEVENAME", sievename), true);
}
public static boolean useLexicalFeatures(Properties props, String sievename) {
return PropertiesUtils.getBool(props, USE_LEXICAL_FEATURES_PROP.replace("SIEVENAME", sievename), true);
}
public static boolean useWordEmbedding(Properties props, String sievename) {
return PropertiesUtils.getBool(props, USE_WORD_EMBEDDING_FEATURES_PROP.replace("SIEVENAME", sievename), true);
}
private static Set<String> getMentionTypeStr(Properties props, String sievename, String whichMention) {
Set<String> strs = Generics.newHashSet();
String propKey = whichMention;
if (!props.containsKey(propKey)) {
String prefix = "coref." + sievename + ".";
propKey = prefix + propKey;
}
if(props.containsKey(propKey)) strs.addAll(Arrays.asList(props.getProperty(propKey).split(",")));
return strs;
}
public static Set<String> getMentionTypeStr(Properties props, String sievename) {
return getMentionTypeStr(props, sievename, "mType");
}
public static Set<String> getAntecedentTypeStr(Properties props, String sievename) {
return getMentionTypeStr(props, sievename, "aType");
}
public static String getSieves(Properties props) {
return PropertiesUtils.getString(props, SIEVES_PROP, "SpeakerMatch,PreciseConstructs,pp-rf,cc-rf,pc-rf,ll-rf,pr-rf");
}
public static String getPathSerialized(Properties props) {
return props.getProperty(PATH_SERIALIZED_PROP);
}
public static boolean doPMIFeatureSelection(Properties props, String sievename) {
return PropertiesUtils.getString(props, FEATURE_SELECTION_PROP.replace("SIEVENAME", sievename), "pmi").equalsIgnoreCase("pmi");
}
public static double getPMIThres(Properties props, String sievename) {
return PropertiesUtils.getDouble(props, THRES_FEATURE_SELECTION_PROP.replace("SIEVENAME", sievename), 0.0001);
}
public static boolean doAnalysis(Properties props) {
return PropertiesUtils.getBool(props, DO_ANALYSIS_PROP, false);
}
public static String getSkipMentionType(Properties props) {
return PropertiesUtils.getString(props, ANALYSIS_SKIP_MTYPE_PROP, null);
}
public static String getSkipAntecedentType(Properties props) {
return PropertiesUtils.getString(props, ANALYSIS_SKIP_ATYPE_PROP, null);
}
public static boolean useSemantics(Properties props) {
return PropertiesUtils.getBool(props, USE_SEMANTICS_PROP, true);
}
public static String getPathSerializedWordVectors(Properties props) {
return PropertiesUtils.getString(props, WORD2VEC_SERIALIZED_PROP, "/scr/nlp/data/coref/wordvectors/en/vector.ser.gz");
}
public static String getCurrentSieveForTrain(Properties props) {
return PropertiesUtils.getString(props, CURRENT_SIEVE_FOR_TRAIN_PROP, null);
}
// public static String getCurrentSieve(Properties props) {
// return PropertiesUtils.getString(props, CURRENT_SIEVE_PROP, null);
// }
public static boolean loadWordEmbedding(Properties props) {
return PropertiesUtils.getBool(props, LOAD_WORD_EMBEDDING_PROP, true);
}
public static String getPathWord2Vec(Properties props) {
return PropertiesUtils.getString(props, WORD2VEC_PROP, null);
}
public static String getGenderNumber(Properties props) {
return PropertiesUtils.getString(props, GENDER_NUMBER_PROP, "edu/stanford/nlp/models/dcoref/gender.data.gz");
}
public static boolean storeTrainData(Properties props) {
return PropertiesUtils.getBool(props, STORE_TRAINDATA_PROP, false);
}
public static boolean allowReparsing(Properties props) {
return PropertiesUtils.getBool(props, ALLOW_REPARSING_PROP, true);
}
public static boolean useGoldNE(Properties props) {
return PropertiesUtils.getBool(props, USE_GOLD_NE_PROP, true);
}
public static boolean useGoldParse(Properties props) {
return PropertiesUtils.getBool(props, USE_GOLD_PARSES_PROP, true);
}
public static boolean useGoldPOS(Properties props) {
return PropertiesUtils.getBool(props, USE_GOLD_POS_PROP, true);
}
public static boolean isMentionDetectionTraining(Properties props) {
return PropertiesUtils.getBool(props, CorefProperties.MD_TRAIN_PROP, false);
}
public static void setRemoveNested(Properties props,boolean bool){
props.setProperty(REMOVE_NESTED, String.valueOf(bool));
}
public static boolean removeNested(Properties props){
return PropertiesUtils.getBool(props, CorefProperties.REMOVE_NESTED, true);
}
public static boolean useDefaultPronounAgreement(Properties props){
return PropertiesUtils.getBool(props, CorefProperties.DEFAULT_PRONOUN_AGREEMENT_PROP,false);
}
public static boolean addMissingAnnotations(Properties props) {
return PropertiesUtils.getBool(props, ADD_MISSING_ANNOTATIONS, false);
}
}
| hbbpb/stanford-corenlp-gv | src/edu/stanford/nlp/hcoref/CorefProperties.java | Java | gpl-2.0 | 20,475 |
/*
* QEMU Object Model
*
* Copyright IBM, Corp. 2011
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
*
*/
#ifndef QEMU_OBJECT_H
#define QEMU_OBJECT_H
#include <glib.h>
#include <stdint.h>
#include <stdbool.h>
#include "qemu/queue.h"
#include "qemu/typedefs.h"
#include "qapi/error.h"
struct TypeImpl;
typedef struct TypeImpl *Type;
typedef struct ObjectClass ObjectClass;
typedef struct Object Object;
typedef struct TypeInfo TypeInfo;
typedef struct InterfaceClass InterfaceClass;
typedef struct InterfaceInfo InterfaceInfo;
#define TYPE_OBJECT "object"
/**
* SECTION:object.h
* @title:Base Object Type System
* @short_description: interfaces for creating new types and objects
*
* The QEMU Object Model provides a framework for registering user creatable
* types and instantiating objects from those types. QOM provides the following
* features:
*
* - System for dynamically registering types
* - Support for single-inheritance of types
* - Multiple inheritance of stateless interfaces
*
* <example>
* <title>Creating a minimal type</title>
* <programlisting>
* #include "qdev.h"
*
* #define TYPE_MY_DEVICE "my-device"
*
* // No new virtual functions: we can reuse the typedef for the
* // superclass.
* typedef DeviceClass MyDeviceClass;
* typedef struct MyDevice
* {
* DeviceState parent;
*
* int reg0, reg1, reg2;
* } MyDevice;
*
* static const TypeInfo my_device_info = {
* .name = TYPE_MY_DEVICE,
* .parent = TYPE_DEVICE,
* .instance_size = sizeof(MyDevice),
* };
*
* static void my_device_register_types(void)
* {
* type_register_static(&my_device_info);
* }
*
* type_init(my_device_register_types)
* </programlisting>
* </example>
*
* In the above example, we create a simple type that is described by #TypeInfo.
* #TypeInfo describes information about the type including what it inherits
* from, the instance and class size, and constructor/destructor hooks.
*
* Every type has an #ObjectClass associated with it. #ObjectClass derivatives
* are instantiated dynamically but there is only ever one instance for any
* given type. The #ObjectClass typically holds a table of function pointers
* for the virtual methods implemented by this type.
*
* Using object_new(), a new #Object derivative will be instantiated. You can
* cast an #Object to a subclass (or base-class) type using
* object_dynamic_cast(). You typically want to define macro wrappers around
* OBJECT_CHECK() and OBJECT_CLASS_CHECK() to make it easier to convert to a
* specific type:
*
* <example>
* <title>Typecasting macros</title>
* <programlisting>
* #define MY_DEVICE_GET_CLASS(obj) \
* OBJECT_GET_CLASS(MyDeviceClass, obj, TYPE_MY_DEVICE)
* #define MY_DEVICE_CLASS(klass) \
* OBJECT_CLASS_CHECK(MyDeviceClass, klass, TYPE_MY_DEVICE)
* #define MY_DEVICE(obj) \
* OBJECT_CHECK(MyDevice, obj, TYPE_MY_DEVICE)
* </programlisting>
* </example>
*
* # Class Initialization #
*
* Before an object is initialized, the class for the object must be
* initialized. There is only one class object for all instance objects
* that is created lazily.
*
* Classes are initialized by first initializing any parent classes (if
* necessary). After the parent class object has initialized, it will be
* copied into the current class object and any additional storage in the
* class object is zero filled.
*
* The effect of this is that classes automatically inherit any virtual
* function pointers that the parent class has already initialized. All
* other fields will be zero filled.
*
* Once all of the parent classes have been initialized, #TypeInfo::class_init
* is called to let the class being instantiated provide default initialize for
* its virtual functions. Here is how the above example might be modified
* to introduce an overridden virtual function:
*
* <example>
* <title>Overriding a virtual function</title>
* <programlisting>
* #include "qdev.h"
*
* void my_device_class_init(ObjectClass *klass, void *class_data)
* {
* DeviceClass *dc = DEVICE_CLASS(klass);
* dc->reset = my_device_reset;
* }
*
* static const TypeInfo my_device_info = {
* .name = TYPE_MY_DEVICE,
* .parent = TYPE_DEVICE,
* .instance_size = sizeof(MyDevice),
* .class_init = my_device_class_init,
* };
* </programlisting>
* </example>
*
* Introducing new virtual methods requires a class to define its own
* struct and to add a .class_size member to the #TypeInfo. Each method
* will also have a wrapper function to call it easily:
*
* <example>
* <title>Defining an abstract class</title>
* <programlisting>
* #include "qdev.h"
*
* typedef struct MyDeviceClass
* {
* DeviceClass parent;
*
* void (*frobnicate) (MyDevice *obj);
* } MyDeviceClass;
*
* static const TypeInfo my_device_info = {
* .name = TYPE_MY_DEVICE,
* .parent = TYPE_DEVICE,
* .instance_size = sizeof(MyDevice),
* .abstract = true, // or set a default in my_device_class_init
* .class_size = sizeof(MyDeviceClass),
* };
*
* void my_device_frobnicate(MyDevice *obj)
* {
* MyDeviceClass *klass = MY_DEVICE_GET_CLASS(obj);
*
* klass->frobnicate(obj);
* }
* </programlisting>
* </example>
*
* # Interfaces #
*
* Interfaces allow a limited form of multiple inheritance. Instances are
* similar to normal types except for the fact that are only defined by
* their classes and never carry any state. You can dynamically cast an object
* to one of its #Interface types and vice versa.
*
* # Methods #
*
* A <emphasis>method</emphasis> is a function within the namespace scope of
* a class. It usually operates on the object instance by passing it as a
* strongly-typed first argument.
* If it does not operate on an object instance, it is dubbed
* <emphasis>class method</emphasis>.
*
* Methods cannot be overloaded. That is, the #ObjectClass and method name
* uniquely identity the function to be called; the signature does not vary
* except for trailing varargs.
*
* Methods are always <emphasis>virtual</emphasis>. Overriding a method in
* #TypeInfo.class_init of a subclass leads to any user of the class obtained
* via OBJECT_GET_CLASS() accessing the overridden function.
* The original function is not automatically invoked. It is the responsibility
* of the overriding class to determine whether and when to invoke the method
* being overridden.
*
* To invoke the method being overridden, the preferred solution is to store
* the original value in the overriding class before overriding the method.
* This corresponds to |[ {super,base}.method(...) ]| in Java and C#
* respectively; this frees the overriding class from hardcoding its parent
* class, which someone might choose to change at some point.
*
* <example>
* <title>Overriding a virtual method</title>
* <programlisting>
* typedef struct MyState MyState;
*
* typedef void (*MyDoSomething)(MyState *obj);
*
* typedef struct MyClass {
* ObjectClass parent_class;
*
* MyDoSomething do_something;
* } MyClass;
*
* static void my_do_something(MyState *obj)
* {
* // do something
* }
*
* static void my_class_init(ObjectClass *oc, void *data)
* {
* MyClass *mc = MY_CLASS(oc);
*
* mc->do_something = my_do_something;
* }
*
* static const TypeInfo my_type_info = {
* .name = TYPE_MY,
* .parent = TYPE_OBJECT,
* .instance_size = sizeof(MyState),
* .class_size = sizeof(MyClass),
* .class_init = my_class_init,
* };
*
* typedef struct DerivedClass {
* MyClass parent_class;
*
* MyDoSomething parent_do_something;
* } DerivedClass;
*
* static void derived_do_something(MyState *obj)
* {
* DerivedClass *dc = DERIVED_GET_CLASS(obj);
*
* // do something here
* dc->parent_do_something(obj);
* // do something else here
* }
*
* static void derived_class_init(ObjectClass *oc, void *data)
* {
* MyClass *mc = MY_CLASS(oc);
* DerivedClass *dc = DERIVED_CLASS(oc);
*
* dc->parent_do_something = mc->do_something;
* mc->do_something = derived_do_something;
* }
*
* static const TypeInfo derived_type_info = {
* .name = TYPE_DERIVED,
* .parent = TYPE_MY,
* .class_size = sizeof(DerivedClass),
* .class_init = derived_class_init,
* };
* </programlisting>
* </example>
*
* Alternatively, object_class_by_name() can be used to obtain the class and
* its non-overridden methods for a specific type. This would correspond to
* |[ MyClass::method(...) ]| in C++.
*
* The first example of such a QOM method was #CPUClass.reset,
* another example is #DeviceClass.realize.
*/
/**
* ObjectPropertyAccessor:
* @obj: the object that owns the property
* @v: the visitor that contains the property data
* @name: the name of the property
* @opaque: the object property opaque
* @errp: a pointer to an Error that is filled if getting/setting fails.
*
* Called when trying to get/set a property.
*/
typedef void (ObjectPropertyAccessor)(Object *obj,
Visitor *v,
const char *name,
void *opaque,
Error **errp);
/**
* ObjectPropertyResolve:
* @obj: the object that owns the property
* @opaque: the opaque registered with the property
* @part: the name of the property
*
* Resolves the #Object corresponding to property @part.
*
* The returned object can also be used as a starting point
* to resolve a relative path starting with "@part".
*
* Returns: If @path is the path that led to @obj, the function
* returns the #Object corresponding to "@path/@part".
* If "@path/@part" is not a valid object path, it returns #NULL.
*/
typedef Object *(ObjectPropertyResolve)(Object *obj,
void *opaque,
const char *part);
/**
* ObjectPropertyRelease:
* @obj: the object that owns the property
* @name: the name of the property
* @opaque: the opaque registered with the property
*
* Called when a property is removed from a object.
*/
typedef void (ObjectPropertyRelease)(Object *obj,
const char *name,
void *opaque);
typedef struct ObjectProperty
{
gchar *name;
gchar *type;
gchar *description;
ObjectPropertyAccessor *get;
ObjectPropertyAccessor *set;
ObjectPropertyResolve *resolve;
ObjectPropertyRelease *release;
void *opaque;
} ObjectProperty;
/**
* ObjectUnparent:
* @obj: the object that is being removed from the composition tree
*
* Called when an object is being removed from the QOM composition tree.
* The function should remove any backlinks from children objects to @obj.
*/
typedef void (ObjectUnparent)(Object *obj);
/**
* ObjectFree:
* @obj: the object being freed
*
* Called when an object's last reference is removed.
*/
typedef void (ObjectFree)(void *obj);
#define OBJECT_CLASS_CAST_CACHE 4
/**
* ObjectClass:
*
* The base for all classes. The only thing that #ObjectClass contains is an
* integer type handle.
*/
struct ObjectClass
{
/*< private >*/
Type type;
GSList *interfaces;
const char *object_cast_cache[OBJECT_CLASS_CAST_CACHE];
const char *class_cast_cache[OBJECT_CLASS_CAST_CACHE];
ObjectUnparent *unparent;
GHashTable *properties;
};
/**
* Object:
*
* The base for all objects. The first member of this object is a pointer to
* a #ObjectClass. Since C guarantees that the first member of a structure
* always begins at byte 0 of that structure, as long as any sub-object places
* its parent as the first member, we can cast directly to a #Object.
*
* As a result, #Object contains a reference to the objects type as its
* first member. This allows identification of the real type of the object at
* run time.
*/
struct Object
{
/*< private >*/
ObjectClass *class;
ObjectFree *free;
GHashTable *properties;
uint32_t ref;
Object *parent;
};
/**
* TypeInfo:
* @name: The name of the type.
* @parent: The name of the parent type.
* @instance_size: The size of the object (derivative of #Object). If
* @instance_size is 0, then the size of the object will be the size of the
* parent object.
* @instance_init: This function is called to initialize an object. The parent
* class will have already been initialized so the type is only responsible
* for initializing its own members.
* @instance_post_init: This function is called to finish initialization of
* an object, after all @instance_init functions were called.
* @instance_finalize: This function is called during object destruction. This
* is called before the parent @instance_finalize function has been called.
* An object should only free the members that are unique to its type in this
* function.
* @abstract: If this field is true, then the class is considered abstract and
* cannot be directly instantiated.
* @class_size: The size of the class object (derivative of #ObjectClass)
* for this object. If @class_size is 0, then the size of the class will be
* assumed to be the size of the parent class. This allows a type to avoid
* implementing an explicit class type if they are not adding additional
* virtual functions.
* @class_init: This function is called after all parent class initialization
* has occurred to allow a class to set its default virtual method pointers.
* This is also the function to use to override virtual methods from a parent
* class.
* @class_base_init: This function is called for all base classes after all
* parent class initialization has occurred, but before the class itself
* is initialized. This is the function to use to undo the effects of
* memcpy from the parent class to the descendents.
* @class_finalize: This function is called during class destruction and is
* meant to release and dynamic parameters allocated by @class_init.
* @class_data: Data to pass to the @class_init, @class_base_init and
* @class_finalize functions. This can be useful when building dynamic
* classes.
* @interfaces: The list of interfaces associated with this type. This
* should point to a static array that's terminated with a zero filled
* element.
*/
struct TypeInfo
{
const char *name;
const char *parent;
size_t instance_size;
void (*instance_init)(Object *obj);
void (*instance_post_init)(Object *obj);
void (*instance_finalize)(Object *obj);
bool abstract;
size_t class_size;
void (*class_init)(ObjectClass *klass, void *data);
void (*class_base_init)(ObjectClass *klass, void *data);
void (*class_finalize)(ObjectClass *klass, void *data);
void *class_data;
InterfaceInfo *interfaces;
};
/**
* OBJECT:
* @obj: A derivative of #Object
*
* Converts an object to a #Object. Since all objects are #Objects,
* this function will always succeed.
*/
#define OBJECT(obj) \
((Object *)(obj))
/**
* OBJECT_CLASS:
* @class: A derivative of #ObjectClass.
*
* Converts a class to an #ObjectClass. Since all objects are #Objects,
* this function will always succeed.
*/
#define OBJECT_CLASS(class) \
((ObjectClass *)(class))
/**
* OBJECT_CHECK:
* @type: The C type to use for the return value.
* @obj: A derivative of @type to cast.
* @name: The QOM typename of @type
*
* A type safe version of @object_dynamic_cast_assert. Typically each class
* will define a macro based on this type to perform type safe dynamic_casts to
* this object type.
*
* If an invalid object is passed to this function, a run time assert will be
* generated.
*/
#define OBJECT_CHECK(type, obj, name) \
((type *)object_dynamic_cast_assert(OBJECT(obj), (name), \
__FILE__, __LINE__, __func__))
/**
* OBJECT_CLASS_CHECK:
* @class_type: The C type to use for the return value.
* @class: A derivative class of @class_type to cast.
* @name: the QOM typename of @class_type.
*
* A type safe version of @object_class_dynamic_cast_assert. This macro is
* typically wrapped by each type to perform type safe casts of a class to a
* specific class type.
*/
#define OBJECT_CLASS_CHECK(class_type, class, name) \
((class_type *)object_class_dynamic_cast_assert(OBJECT_CLASS(class), (name), \
__FILE__, __LINE__, __func__))
/**
* OBJECT_GET_CLASS:
* @class: The C type to use for the return value.
* @obj: The object to obtain the class for.
* @name: The QOM typename of @obj.
*
* This function will return a specific class for a given object. Its generally
* used by each type to provide a type safe macro to get a specific class type
* from an object.
*/
#define OBJECT_GET_CLASS(class, obj, name) \
OBJECT_CLASS_CHECK(class, object_get_class(OBJECT(obj)), name)
/**
* InterfaceInfo:
* @type: The name of the interface.
*
* The information associated with an interface.
*/
struct InterfaceInfo {
const char *type;
};
/**
* InterfaceClass:
* @parent_class: the base class
*
* The class for all interfaces. Subclasses of this class should only add
* virtual methods.
*/
struct InterfaceClass
{
ObjectClass parent_class;
/*< private >*/
ObjectClass *concrete_class;
Type interface_type;
};
#define TYPE_INTERFACE "interface"
/**
* INTERFACE_CLASS:
* @klass: class to cast from
* Returns: An #InterfaceClass or raise an error if cast is invalid
*/
#define INTERFACE_CLASS(klass) \
OBJECT_CLASS_CHECK(InterfaceClass, klass, TYPE_INTERFACE)
/**
* INTERFACE_CHECK:
* @interface: the type to return
* @obj: the object to convert to an interface
* @name: the interface type name
*
* Returns: @obj casted to @interface if cast is valid, otherwise raise error.
*/
#define INTERFACE_CHECK(interface, obj, name) \
((interface *)object_dynamic_cast_assert(OBJECT((obj)), (name), \
__FILE__, __LINE__, __func__))
/**
* object_new:
* @typename: The name of the type of the object to instantiate.
*
* This function will initialize a new object using heap allocated memory.
* The returned object has a reference count of 1, and will be freed when
* the last reference is dropped.
*
* Returns: The newly allocated and instantiated object.
*/
Object *object_new(const char *typename);
/**
* object_new_with_type:
* @type: The type of the object to instantiate.
*
* This function will initialize a new object using heap allocated memory.
* The returned object has a reference count of 1, and will be freed when
* the last reference is dropped.
*
* Returns: The newly allocated and instantiated object.
*/
Object *object_new_with_type(Type type);
/**
* object_new_with_props:
* @typename: The name of the type of the object to instantiate.
* @parent: the parent object
* @id: The unique ID of the object
* @errp: pointer to error object
* @...: list of property names and values
*
* This function will initialize a new object using heap allocated memory.
* The returned object has a reference count of 1, and will be freed when
* the last reference is dropped.
*
* The @id parameter will be used when registering the object as a
* child of @parent in the composition tree.
*
* The variadic parameters are a list of pairs of (propname, propvalue)
* strings. The propname of %NULL indicates the end of the property
* list. If the object implements the user creatable interface, the
* object will be marked complete once all the properties have been
* processed.
*
* <example>
* <title>Creating an object with properties</title>
* <programlisting>
* Error *err = NULL;
* Object *obj;
*
* obj = object_new_with_props(TYPE_MEMORY_BACKEND_FILE,
* object_get_objects_root(),
* "hostmem0",
* &err,
* "share", "yes",
* "mem-path", "/dev/shm/somefile",
* "prealloc", "yes",
* "size", "1048576",
* NULL);
*
* if (!obj) {
* g_printerr("Cannot create memory backend: %s\n",
* error_get_pretty(err));
* }
* </programlisting>
* </example>
*
* The returned object will have one stable reference maintained
* for as long as it is present in the object hierarchy.
*
* Returns: The newly allocated, instantiated & initialized object.
*/
Object *object_new_with_props(const char *typename,
Object *parent,
const char *id,
Error **errp,
...) QEMU_SENTINEL;
/**
* object_new_with_propv:
* @typename: The name of the type of the object to instantiate.
* @parent: the parent object
* @id: The unique ID of the object
* @errp: pointer to error object
* @vargs: list of property names and values
*
* See object_new_with_props() for documentation.
*/
Object *object_new_with_propv(const char *typename,
Object *parent,
const char *id,
Error **errp,
va_list vargs);
/**
* object_set_props:
* @obj: the object instance to set properties on
* @errp: pointer to error object
* @...: list of property names and values
*
* This function will set a list of properties on an existing object
* instance.
*
* The variadic parameters are a list of pairs of (propname, propvalue)
* strings. The propname of %NULL indicates the end of the property
* list.
*
* <example>
* <title>Update an object's properties</title>
* <programlisting>
* Error *err = NULL;
* Object *obj = ...get / create object...;
*
* obj = object_set_props(obj,
* &err,
* "share", "yes",
* "mem-path", "/dev/shm/somefile",
* "prealloc", "yes",
* "size", "1048576",
* NULL);
*
* if (!obj) {
* g_printerr("Cannot set properties: %s\n",
* error_get_pretty(err));
* }
* </programlisting>
* </example>
*
* The returned object will have one stable reference maintained
* for as long as it is present in the object hierarchy.
*
* Returns: -1 on error, 0 on success
*/
int object_set_props(Object *obj,
Error **errp,
...) QEMU_SENTINEL;
/**
* object_set_propv:
* @obj: the object instance to set properties on
* @errp: pointer to error object
* @vargs: list of property names and values
*
* See object_set_props() for documentation.
*
* Returns: -1 on error, 0 on success
*/
int object_set_propv(Object *obj,
Error **errp,
va_list vargs);
/**
* object_initialize_with_type:
* @data: A pointer to the memory to be used for the object.
* @size: The maximum size available at @data for the object.
* @type: The type of the object to instantiate.
*
* This function will initialize an object. The memory for the object should
* have already been allocated. The returned object has a reference count of 1,
* and will be finalized when the last reference is dropped.
*/
void object_initialize_with_type(void *data, size_t size, Type type);
/**
* object_initialize:
* @obj: A pointer to the memory to be used for the object.
* @size: The maximum size available at @obj for the object.
* @typename: The name of the type of the object to instantiate.
*
* This function will initialize an object. The memory for the object should
* have already been allocated. The returned object has a reference count of 1,
* and will be finalized when the last reference is dropped.
*/
void object_initialize(void *obj, size_t size, const char *typename);
/**
* object_dynamic_cast:
* @obj: The object to cast.
* @typename: The @typename to cast to.
*
* This function will determine if @obj is-a @typename. @obj can refer to an
* object or an interface associated with an object.
*
* Returns: This function returns @obj on success or #NULL on failure.
*/
Object *object_dynamic_cast(Object *obj, const char *typename);
/**
* object_dynamic_cast_assert:
*
* See object_dynamic_cast() for a description of the parameters of this
* function. The only difference in behavior is that this function asserts
* instead of returning #NULL on failure if QOM cast debugging is enabled.
* This function is not meant to be called directly, but only through
* the wrapper macro OBJECT_CHECK.
*/
Object *object_dynamic_cast_assert(Object *obj, const char *typename,
const char *file, int line, const char *func);
/**
* object_get_class:
* @obj: A derivative of #Object
*
* Returns: The #ObjectClass of the type associated with @obj.
*/
ObjectClass *object_get_class(Object *obj);
/**
* object_get_typename:
* @obj: A derivative of #Object.
*
* Returns: The QOM typename of @obj.
*/
const char *object_get_typename(Object *obj);
/**
* type_register_static:
* @info: The #TypeInfo of the new type.
*
* @info and all of the strings it points to should exist for the life time
* that the type is registered.
*
* Returns: 0 on failure, the new #Type on success.
*/
Type type_register_static(const TypeInfo *info);
/**
* type_register:
* @info: The #TypeInfo of the new type
*
* Unlike type_register_static(), this call does not require @info or its
* string members to continue to exist after the call returns.
*
* Returns: 0 on failure, the new #Type on success.
*/
Type type_register(const TypeInfo *info);
/**
* object_class_dynamic_cast_assert:
* @klass: The #ObjectClass to attempt to cast.
* @typename: The QOM typename of the class to cast to.
*
* See object_class_dynamic_cast() for a description of the parameters
* of this function. The only difference in behavior is that this function
* asserts instead of returning #NULL on failure if QOM cast debugging is
* enabled. This function is not meant to be called directly, but only through
* the wrapper macros OBJECT_CLASS_CHECK and INTERFACE_CHECK.
*/
ObjectClass *object_class_dynamic_cast_assert(ObjectClass *klass,
const char *typename,
const char *file, int line,
const char *func);
/**
* object_class_dynamic_cast:
* @klass: The #ObjectClass to attempt to cast.
* @typename: The QOM typename of the class to cast to.
*
* Returns: If @typename is a class, this function returns @klass if
* @typename is a subtype of @klass, else returns #NULL.
*
* If @typename is an interface, this function returns the interface
* definition for @klass if @klass implements it unambiguously; #NULL
* is returned if @klass does not implement the interface or if multiple
* classes or interfaces on the hierarchy leading to @klass implement
* it. (FIXME: perhaps this can be detected at type definition time?)
*/
ObjectClass *object_class_dynamic_cast(ObjectClass *klass,
const char *typename);
/**
* object_class_get_parent:
* @klass: The class to obtain the parent for.
*
* Returns: The parent for @klass or %NULL if none.
*/
ObjectClass *object_class_get_parent(ObjectClass *klass);
/**
* object_class_get_name:
* @klass: The class to obtain the QOM typename for.
*
* Returns: The QOM typename for @klass.
*/
const char *object_class_get_name(ObjectClass *klass);
/**
* object_class_is_abstract:
* @klass: The class to obtain the abstractness for.
*
* Returns: %true if @klass is abstract, %false otherwise.
*/
bool object_class_is_abstract(ObjectClass *klass);
/**
* object_class_by_name:
* @typename: The QOM typename to obtain the class for.
*
* Returns: The class for @typename or %NULL if not found.
*/
ObjectClass *object_class_by_name(const char *typename);
void object_class_foreach(void (*fn)(ObjectClass *klass, void *opaque),
const char *implements_type, bool include_abstract,
void *opaque);
/**
* object_class_get_list:
* @implements_type: The type to filter for, including its derivatives.
* @include_abstract: Whether to include abstract classes.
*
* Returns: A singly-linked list of the classes in reverse hashtable order.
*/
GSList *object_class_get_list(const char *implements_type,
bool include_abstract);
/**
* object_ref:
* @obj: the object
*
* Increase the reference count of a object. A object cannot be freed as long
* as its reference count is greater than zero.
*/
void object_ref(Object *obj);
/**
* qdef_unref:
* @obj: the object
*
* Decrease the reference count of a object. A object cannot be freed as long
* as its reference count is greater than zero.
*/
void object_unref(Object *obj);
/**
* object_property_add:
* @obj: the object to add a property to
* @name: the name of the property. This can contain any character except for
* a forward slash. In general, you should use hyphens '-' instead of
* underscores '_' when naming properties.
* @type: the type name of the property. This namespace is pretty loosely
* defined. Sub namespaces are constructed by using a prefix and then
* to angle brackets. For instance, the type 'virtio-net-pci' in the
* 'link' namespace would be 'link<virtio-net-pci>'.
* @get: The getter to be called to read a property. If this is NULL, then
* the property cannot be read.
* @set: the setter to be called to write a property. If this is NULL,
* then the property cannot be written.
* @release: called when the property is removed from the object. This is
* meant to allow a property to free its opaque upon object
* destruction. This may be NULL.
* @opaque: an opaque pointer to pass to the callbacks for the property
* @errp: returns an error if this function fails
*
* Returns: The #ObjectProperty; this can be used to set the @resolve
* callback for child and link properties.
*/
ObjectProperty *object_property_add(Object *obj, const char *name,
const char *type,
ObjectPropertyAccessor *get,
ObjectPropertyAccessor *set,
ObjectPropertyRelease *release,
void *opaque, Error **errp);
void object_property_del(Object *obj, const char *name, Error **errp);
ObjectProperty *object_class_property_add(ObjectClass *klass, const char *name,
const char *type,
ObjectPropertyAccessor *get,
ObjectPropertyAccessor *set,
ObjectPropertyRelease *release,
void *opaque, Error **errp);
/**
* object_property_find:
* @obj: the object
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Look up a property for an object and return its #ObjectProperty if found.
*/
ObjectProperty *object_property_find(Object *obj, const char *name,
Error **errp);
ObjectProperty *object_class_property_find(ObjectClass *klass, const char *name,
Error **errp);
typedef struct ObjectPropertyIterator {
ObjectClass *nextclass;
GHashTableIter iter;
} ObjectPropertyIterator;
/**
* object_property_iter_init:
* @obj: the object
*
* Initializes an iterator for traversing all properties
* registered against an object instance, its class and all parent classes.
*
* It is forbidden to modify the property list while iterating,
* whether removing or adding properties.
*
* Typical usage pattern would be
*
* <example>
* <title>Using object property iterators</title>
* <programlisting>
* ObjectProperty *prop;
* ObjectPropertyIterator iter;
*
* object_property_iter_init(&iter, obj);
* while ((prop = object_property_iter_next(&iter))) {
* ... do something with prop ...
* }
* </programlisting>
* </example>
*/
void object_property_iter_init(ObjectPropertyIterator *iter,
Object *obj);
/**
* object_property_iter_next:
* @iter: the iterator instance
*
* Return the next available property. If no further properties
* are available, a %NULL value will be returned and the @iter
* pointer should not be used again after this point without
* re-initializing it.
*
* Returns: the next property, or %NULL when all properties
* have been traversed.
*/
ObjectProperty *object_property_iter_next(ObjectPropertyIterator *iter);
void object_unparent(Object *obj);
/**
* object_property_get:
* @obj: the object
* @v: the visitor that will receive the property value. This should be an
* Output visitor and the data will be written with @name as the name.
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Reads a property from a object.
*/
void object_property_get(Object *obj, Visitor *v, const char *name,
Error **errp);
/**
* object_property_set_str:
* @value: the value to be written to the property
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Writes a string value to a property.
*/
void object_property_set_str(Object *obj, const char *value,
const char *name, Error **errp);
/**
* object_property_get_str:
* @obj: the object
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Returns: the value of the property, converted to a C string, or NULL if
* an error occurs (including when the property value is not a string).
* The caller should free the string.
*/
char *object_property_get_str(Object *obj, const char *name,
Error **errp);
/**
* object_property_set_link:
* @value: the value to be written to the property
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Writes an object's canonical path to a property.
*/
void object_property_set_link(Object *obj, Object *value,
const char *name, Error **errp);
/**
* object_property_get_link:
* @obj: the object
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Returns: the value of the property, resolved from a path to an Object,
* or NULL if an error occurs (including when the property value is not a
* string or not a valid object path).
*/
Object *object_property_get_link(Object *obj, const char *name,
Error **errp);
/**
* object_property_set_bool:
* @value: the value to be written to the property
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Writes a bool value to a property.
*/
void object_property_set_bool(Object *obj, bool value,
const char *name, Error **errp);
/**
* object_property_get_bool:
* @obj: the object
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Returns: the value of the property, converted to a boolean, or NULL if
* an error occurs (including when the property value is not a bool).
*/
bool object_property_get_bool(Object *obj, const char *name,
Error **errp);
/**
* object_property_set_int:
* @value: the value to be written to the property
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Writes an integer value to a property.
*/
void object_property_set_int(Object *obj, int64_t value,
const char *name, Error **errp);
/**
* object_property_get_int:
* @obj: the object
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Returns: the value of the property, converted to an integer, or negative if
* an error occurs (including when the property value is not an integer).
*/
int64_t object_property_get_int(Object *obj, const char *name,
Error **errp);
/**
* object_property_get_enum:
* @obj: the object
* @name: the name of the property
* @typename: the name of the enum data type
* @errp: returns an error if this function fails
*
* Returns: the value of the property, converted to an integer, or
* undefined if an error occurs (including when the property value is not
* an enum).
*/
int object_property_get_enum(Object *obj, const char *name,
const char *typename, Error **errp);
/**
* object_property_get_uint16List:
* @obj: the object
* @name: the name of the property
* @list: the returned int list
* @errp: returns an error if this function fails
*
* Returns: the value of the property, converted to integers, or
* undefined if an error occurs (including when the property value is not
* an list of integers).
*/
void object_property_get_uint16List(Object *obj, const char *name,
uint16List **list, Error **errp);
/**
* object_property_set:
* @obj: the object
* @v: the visitor that will be used to write the property value. This should
* be an Input visitor and the data will be first read with @name as the
* name and then written as the property value.
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Writes a property to a object.
*/
void object_property_set(Object *obj, Visitor *v, const char *name,
Error **errp);
/**
* object_property_parse:
* @obj: the object
* @string: the string that will be used to parse the property value.
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Parses a string and writes the result into a property of an object.
*/
void object_property_parse(Object *obj, const char *string,
const char *name, Error **errp);
/**
* object_property_print:
* @obj: the object
* @name: the name of the property
* @human: if true, print for human consumption
* @errp: returns an error if this function fails
*
* Returns a string representation of the value of the property. The
* caller shall free the string.
*/
char *object_property_print(Object *obj, const char *name, bool human,
Error **errp);
/**
* object_property_get_type:
* @obj: the object
* @name: the name of the property
* @errp: returns an error if this function fails
*
* Returns: The type name of the property.
*/
const char *object_property_get_type(Object *obj, const char *name,
Error **errp);
/**
* object_get_root:
*
* Returns: the root object of the composition tree
*/
Object *object_get_root(void);
/**
* object_get_objects_root:
*
* Get the container object that holds user created
* object instances. This is the object at path
* "/objects"
*
* Returns: the user object container
*/
Object *object_get_objects_root(void);
/**
* object_get_canonical_path_component:
*
* Returns: The final component in the object's canonical path. The canonical
* path is the path within the composition tree starting from the root.
*/
gchar *object_get_canonical_path_component(Object *obj);
/**
* object_get_canonical_path:
*
* Returns: The canonical path for a object. This is the path within the
* composition tree starting from the root.
*/
gchar *object_get_canonical_path(Object *obj);
/**
* object_resolve_path:
* @path: the path to resolve
* @ambiguous: returns true if the path resolution failed because of an
* ambiguous match
*
* There are two types of supported paths--absolute paths and partial paths.
*
* Absolute paths are derived from the root object and can follow child<> or
* link<> properties. Since they can follow link<> properties, they can be
* arbitrarily long. Absolute paths look like absolute filenames and are
* prefixed with a leading slash.
*
* Partial paths look like relative filenames. They do not begin with a
* prefix. The matching rules for partial paths are subtle but designed to make
* specifying objects easy. At each level of the composition tree, the partial
* path is matched as an absolute path. The first match is not returned. At
* least two matches are searched for. A successful result is only returned if
* only one match is found. If more than one match is found, a flag is
* returned to indicate that the match was ambiguous.
*
* Returns: The matched object or NULL on path lookup failure.
*/
Object *object_resolve_path(const char *path, bool *ambiguous);
/**
* object_resolve_path_type:
* @path: the path to resolve
* @typename: the type to look for.
* @ambiguous: returns true if the path resolution failed because of an
* ambiguous match
*
* This is similar to object_resolve_path. However, when looking for a
* partial path only matches that implement the given type are considered.
* This restricts the search and avoids spuriously flagging matches as
* ambiguous.
*
* For both partial and absolute paths, the return value goes through
* a dynamic cast to @typename. This is important if either the link,
* or the typename itself are of interface types.
*
* Returns: The matched object or NULL on path lookup failure.
*/
Object *object_resolve_path_type(const char *path, const char *typename,
bool *ambiguous);
/**
* object_resolve_path_component:
* @parent: the object in which to resolve the path
* @part: the component to resolve.
*
* This is similar to object_resolve_path with an absolute path, but it
* only resolves one element (@part) and takes the others from @parent.
*
* Returns: The resolved object or NULL on path lookup failure.
*/
Object *object_resolve_path_component(Object *parent, const gchar *part);
/**
* object_property_add_child:
* @obj: the object to add a property to
* @name: the name of the property
* @child: the child object
* @errp: if an error occurs, a pointer to an area to store the area
*
* Child properties form the composition tree. All objects need to be a child
* of another object. Objects can only be a child of one object.
*
* There is no way for a child to determine what its parent is. It is not
* a bidirectional relationship. This is by design.
*
* The value of a child property as a C string will be the child object's
* canonical path. It can be retrieved using object_property_get_str().
* The child object itself can be retrieved using object_property_get_link().
*/
void object_property_add_child(Object *obj, const char *name,
Object *child, Error **errp);
typedef enum {
/* Unref the link pointer when the property is deleted */
OBJ_PROP_LINK_UNREF_ON_RELEASE = 0x1,
} ObjectPropertyLinkFlags;
/**
* object_property_allow_set_link:
*
* The default implementation of the object_property_add_link() check()
* callback function. It allows the link property to be set and never returns
* an error.
*/
void object_property_allow_set_link(Object *, const char *,
Object *, Error **);
/**
* object_property_add_link:
* @obj: the object to add a property to
* @name: the name of the property
* @type: the qobj type of the link
* @child: a pointer to where the link object reference is stored
* @check: callback to veto setting or NULL if the property is read-only
* @flags: additional options for the link
* @errp: if an error occurs, a pointer to an area to store the area
*
* Links establish relationships between objects. Links are unidirectional
* although two links can be combined to form a bidirectional relationship
* between objects.
*
* Links form the graph in the object model.
*
* The <code>@check()</code> callback is invoked when
* object_property_set_link() is called and can raise an error to prevent the
* link being set. If <code>@check</code> is NULL, the property is read-only
* and cannot be set.
*
* Ownership of the pointer that @child points to is transferred to the
* link property. The reference count for <code>*@child</code> is
* managed by the property from after the function returns till the
* property is deleted with object_property_del(). If the
* <code>@flags</code> <code>OBJ_PROP_LINK_UNREF_ON_RELEASE</code> bit is set,
* the reference count is decremented when the property is deleted.
*/
void object_property_add_link(Object *obj, const char *name,
const char *type, Object **child,
void (*check)(Object *obj, const char *name,
Object *val, Error **errp),
ObjectPropertyLinkFlags flags,
Error **errp);
/**
* object_property_add_str:
* @obj: the object to add a property to
* @name: the name of the property
* @get: the getter or NULL if the property is write-only. This function must
* return a string to be freed by g_free().
* @set: the setter or NULL if the property is read-only
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add a string property using getters/setters. This function will add a
* property of type 'string'.
*/
void object_property_add_str(Object *obj, const char *name,
char *(*get)(Object *, Error **),
void (*set)(Object *, const char *, Error **),
Error **errp);
void object_class_property_add_str(ObjectClass *klass, const char *name,
char *(*get)(Object *, Error **),
void (*set)(Object *, const char *,
Error **),
Error **errp);
/**
* object_property_add_bool:
* @obj: the object to add a property to
* @name: the name of the property
* @get: the getter or NULL if the property is write-only.
* @set: the setter or NULL if the property is read-only
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add a bool property using getters/setters. This function will add a
* property of type 'bool'.
*/
void object_property_add_bool(Object *obj, const char *name,
bool (*get)(Object *, Error **),
void (*set)(Object *, bool, Error **),
Error **errp);
void object_class_property_add_bool(ObjectClass *klass, const char *name,
bool (*get)(Object *, Error **),
void (*set)(Object *, bool, Error **),
Error **errp);
/**
* object_property_add_enum:
* @obj: the object to add a property to
* @name: the name of the property
* @typename: the name of the enum data type
* @get: the getter or %NULL if the property is write-only.
* @set: the setter or %NULL if the property is read-only
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an enum property using getters/setters. This function will add a
* property of type '@typename'.
*/
void object_property_add_enum(Object *obj, const char *name,
const char *typename,
const char * const *strings,
int (*get)(Object *, Error **),
void (*set)(Object *, int, Error **),
Error **errp);
void object_class_property_add_enum(ObjectClass *klass, const char *name,
const char *typename,
const char * const *strings,
int (*get)(Object *, Error **),
void (*set)(Object *, int, Error **),
Error **errp);
/**
* object_property_add_tm:
* @obj: the object to add a property to
* @name: the name of the property
* @get: the getter or NULL if the property is write-only.
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add a read-only struct tm valued property using a getter function.
* This function will add a property of type 'struct tm'.
*/
void object_property_add_tm(Object *obj, const char *name,
void (*get)(Object *, struct tm *, Error **),
Error **errp);
void object_class_property_add_tm(ObjectClass *klass, const char *name,
void (*get)(Object *, struct tm *, Error **),
Error **errp);
/**
* object_property_add_uint8_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an integer property in memory. This function will add a
* property of type 'uint8'.
*/
void object_property_add_uint8_ptr(Object *obj, const char *name,
const uint8_t *v, Error **errp);
void object_class_property_add_uint8_ptr(ObjectClass *klass, const char *name,
const uint8_t *v, Error **errp);
/**
* object_property_add_uint16_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an integer property in memory. This function will add a
* property of type 'uint16'.
*/
void object_property_add_uint16_ptr(Object *obj, const char *name,
const uint16_t *v, Error **errp);
void object_class_property_add_uint16_ptr(ObjectClass *klass, const char *name,
const uint16_t *v, Error **errp);
/**
* object_property_add_uint32_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an integer property in memory. This function will add a
* property of type 'uint32'.
*/
void object_property_add_uint32_ptr(Object *obj, const char *name,
const uint32_t *v, Error **errp);
void object_class_property_add_uint32_ptr(ObjectClass *klass, const char *name,
const uint32_t *v, Error **errp);
/**
* object_property_add_uint64_ptr:
* @obj: the object to add a property to
* @name: the name of the property
* @v: pointer to value
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an integer property in memory. This function will add a
* property of type 'uint64'.
*/
void object_property_add_uint64_ptr(Object *obj, const char *name,
const uint64_t *v, Error **Errp);
void object_class_property_add_uint64_ptr(ObjectClass *klass, const char *name,
const uint64_t *v, Error **Errp);
/**
* object_property_add_alias:
* @obj: the object to add a property to
* @name: the name of the property
* @target_obj: the object to forward property access to
* @target_name: the name of the property on the forwarded object
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an alias for a property on an object. This function will add a property
* of the same type as the forwarded property.
*
* The caller must ensure that <code>@target_obj</code> stays alive as long as
* this property exists. In the case of a child object or an alias on the same
* object this will be the case. For aliases to other objects the caller is
* responsible for taking a reference.
*/
void object_property_add_alias(Object *obj, const char *name,
Object *target_obj, const char *target_name,
Error **errp);
/**
* object_property_add_const_link:
* @obj: the object to add a property to
* @name: the name of the property
* @target: the object to be referred by the link
* @errp: if an error occurs, a pointer to an area to store the error
*
* Add an unmodifiable link for a property on an object. This function will
* add a property of type link<TYPE> where TYPE is the type of @target.
*
* The caller must ensure that @target stays alive as long as
* this property exists. In the case @target is a child of @obj,
* this will be the case. Otherwise, the caller is responsible for
* taking a reference.
*/
void object_property_add_const_link(Object *obj, const char *name,
Object *target, Error **errp);
/**
* object_property_set_description:
* @obj: the object owning the property
* @name: the name of the property
* @description: the description of the property on the object
* @errp: if an error occurs, a pointer to an area to store the error
*
* Set an object property's description.
*
*/
void object_property_set_description(Object *obj, const char *name,
const char *description, Error **errp);
void object_class_property_set_description(ObjectClass *klass, const char *name,
const char *description,
Error **errp);
/**
* object_child_foreach:
* @obj: the object whose children will be navigated
* @fn: the iterator function to be called
* @opaque: an opaque value that will be passed to the iterator
*
* Call @fn passing each child of @obj and @opaque to it, until @fn returns
* non-zero.
*
* It is forbidden to add or remove children from @obj from the @fn
* callback.
*
* Returns: The last value returned by @fn, or 0 if there is no child.
*/
int object_child_foreach(Object *obj, int (*fn)(Object *child, void *opaque),
void *opaque);
/**
* object_child_foreach_recursive:
* @obj: the object whose children will be navigated
* @fn: the iterator function to be called
* @opaque: an opaque value that will be passed to the iterator
*
* Call @fn passing each child of @obj and @opaque to it, until @fn returns
* non-zero. Calls recursively, all child nodes of @obj will also be passed
* all the way down to the leaf nodes of the tree. Depth first ordering.
*
* It is forbidden to add or remove children from @obj (or its
* child nodes) from the @fn callback.
*
* Returns: The last value returned by @fn, or 0 if there is no child.
*/
int object_child_foreach_recursive(Object *obj,
int (*fn)(Object *child, void *opaque),
void *opaque);
/**
* container_get:
* @root: root of the #path, e.g., object_get_root()
* @path: path to the container
*
* Return a container object whose path is @path. Create more containers
* along the path if necessary.
*
* Returns: the container object.
*/
Object *container_get(Object *root, const char *path);
#endif
| SPICE/qemu | include/qom/object.h | C | gpl-2.0 | 55,824 |
/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __APR_US_H__
#define __APR_US_H__
#include <mach/qdsp6v2/apr.h>
/* */
/* */
#define USM_SESSION_CMD_RUN 0x00012306
struct usm_stream_cmd_run {
struct apr_hdr hdr;
u32 flags;
u32 msw_ts;
u32 lsw_ts;
} __packed;
/* */
#define USM_STREAM_CMD_OPEN_READ 0x00012309
struct usm_stream_cmd_open_read {
struct apr_hdr hdr;
u32 uMode;
u32 src_endpoint;
u32 pre_proc_top;
u32 format;
} __packed;
#define USM_STREAM_CMD_OPEN_WRITE 0x00011271
struct usm_stream_cmd_open_write {
struct apr_hdr hdr;
u32 format;
} __packed;
#define USM_STREAM_CMD_CLOSE 0x0001230A
/* */
#define USM_STREAM_CMD_SET_ENC_PARAM 0x0001230B
/* */
#define USM_DATA_CMD_MEDIA_FORMAT_UPDATE 0x00011272
/* */
#define USM_PARAM_ID_ENCDEC_ENC_CFG_BLK 0x0001230D
/* */
/* */
struct usm_cfg_common {
u16 ch_cfg;
u16 bits_per_sample;
u32 sample_rate;
u32 dev_id;
u32 data_map;
} __packed;
/* */
#define USM_MAX_CFG_DATA_SIZE 100
struct usm_encode_cfg_blk {
u32 frames_per_buf;
u32 format_id;
/* */
u32 cfg_size;
struct usm_cfg_common cfg_common;
/* */
u8 transp_data[USM_MAX_CFG_DATA_SIZE];
} __packed;
struct usm_stream_cmd_encdec_cfg_blk {
struct apr_hdr hdr;
u32 param_id;
u32 param_size;
struct usm_encode_cfg_blk enc_blk;
} __packed;
struct us_encdec_cfg {
u32 format_id;
struct usm_cfg_common cfg_common;
u16 params_size;
u8 *params;
} __packed;
struct usm_stream_media_format_update {
struct apr_hdr hdr;
u32 format_id;
/* */
u32 cfg_size;
struct usm_cfg_common cfg_common;
/* */
u8 transp_data[USM_MAX_CFG_DATA_SIZE];
} __packed;
/* */
#define USM_SESSION_CMD_SIGNAL_DETECT_MODE 0x00012719
struct usm_session_cmd_detect_info {
struct apr_hdr hdr;
u32 detect_mode;
u32 skip_interval;
u32 algorithm_cfg_size;
} __packed;
/* */
#define USM_SESSION_EVENT_SIGNAL_DETECT_RESULT 0x00012720
#endif /* */
| TeamLGOG/android_kernel_lge_d800 | arch/arm/mach-msm/include/mach/qdsp6v2/apr_us.h | C | gpl-2.0 | 3,109 |
export * from './get_ldap';
export * from './jwt_verify';
export * from './auth';
export * from './post_snow'; | Alex-Shilman/Drupal8Node | nodeServer/api/index.js | JavaScript | gpl-2.0 | 110 |
/*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
* 02111-1307, USA.
*
* http://www.gnu.org/copyleft/gpl.html
*/
package net.sf.l2j.gameserver.script;
import net.sf.l2j.gameserver.Announcements;
import net.sf.l2j.gameserver.GameTimeController;
import net.sf.l2j.gameserver.RecipeController;
import net.sf.l2j.gameserver.datatables.CharNameTable;
import net.sf.l2j.gameserver.datatables.CharTemplateTable;
import net.sf.l2j.gameserver.datatables.ClanTable;
import net.sf.l2j.gameserver.datatables.ItemTable;
import net.sf.l2j.gameserver.datatables.LevelUpData;
import net.sf.l2j.gameserver.datatables.MapRegionTable;
import net.sf.l2j.gameserver.datatables.NpcTable;
import net.sf.l2j.gameserver.datatables.SkillTable;
import net.sf.l2j.gameserver.datatables.SkillTreeTable;
import net.sf.l2j.gameserver.datatables.SpawnTable;
import net.sf.l2j.gameserver.datatables.TeleportLocationTable;
import net.sf.l2j.gameserver.idfactory.IdFactory;
import net.sf.l2j.gameserver.model.L2World;
/**
* @author Luis Arias TODO To change the template for this generated type comment go to Window - Preferences - Java - Code Style - Code Templates
*/
public interface EngineInterface
{
// * keep the references of Singletons to prevent garbage collection
public CharNameTable charNametable = CharNameTable.getInstance();
public IdFactory idFactory = IdFactory.getInstance();
public ItemTable itemTable = ItemTable.getInstance();
public SkillTable skillTable = SkillTable.getInstance();
public RecipeController recipeController = RecipeController.getInstance();
public SkillTreeTable skillTreeTable = SkillTreeTable.getInstance();
public CharTemplateTable charTemplates = CharTemplateTable.getInstance();
public ClanTable clanTable = ClanTable.getInstance();
public NpcTable npcTable = NpcTable.getInstance();
public TeleportLocationTable teleTable = TeleportLocationTable.getInstance();
public LevelUpData levelUpData = LevelUpData.getInstance();
public L2World world = L2World.getInstance();
public SpawnTable spawnTable = SpawnTable.getInstance();
public GameTimeController gameTimeController = GameTimeController.getInstance();
public Announcements announcements = Announcements.getInstance();
public MapRegionTable mapRegions = MapRegionTable.getInstance();
// public ArrayList getAllPlayers();
// public Player getPlayer(String characterName);
public void addQuestDrop(int npcID, int itemID, int min, int max, int chance, String questID, String[] states);
public void addEventDrop(int[] items, int[] count, double chance, DateRange range);
public void onPlayerLogin(String[] message, DateRange range);
}
| oonym/l2InterludeServer | L2J_Server/java/net/sf/l2j/gameserver/script/EngineInterface.java | Java | gpl-2.0 | 3,353 |
/* Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/devfreq.h>
#include <linux/math64.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/ftrace.h>
#include <linux/msm_adreno_devfreq.h>
#include <soc/qcom/scm.h>
#include "governor.h"
extern bool mdss_screen_on;
static DEFINE_SPINLOCK(tz_lock);
/*
* FLOOR is 5msec to capture up to 3 re-draws
* per frame for 60fps content.
*/
#define FLOOR 5000
/*
* MIN_BUSY is 1 msec for the sample to be sent
*/
#define MIN_BUSY 1000
#define LONG_FLOOR 50000
#define HIST 5
#define TARGET 80
#define CAP 75
#define BUSY_BIN 95
#define LONG_FRAME 25000
#define MAX_TZ_VERSION 0
/*
* CEILING is 50msec, larger than any standard
* frame length, but less than the idle timer.
*/
#define CEILING 50000
#define TZ_RESET_ID 0x3
#define TZ_UPDATE_ID 0x4
#define TZ_INIT_ID 0x6
#define TZ_RESET_ID_64 0x7
#define TZ_UPDATE_ID_64 0x8
#define TZ_INIT_ID_64 0x9
#define TAG "msm_adreno_tz: "
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_reset_entry2(unsigned int *scm_data, u32 size_scm_data,
bool is_64)
{
int ret;
/* sync memory before sending the commands to tz*/
__iowmb();
if (!is_64) {
spin_lock(&tz_lock);
ret = scm_call_atomic2(SCM_SVC_IO, TZ_RESET_ID, scm_data[0],
scm_data[1]);
spin_unlock(&tz_lock);
} else {
ret = scm_call(SCM_SVC_DCVS, TZ_RESET_ID_64, scm_data,
size_scm_data, NULL, 0);
}
return ret;
}
static int __secure_tz_entry3(u32 cmd, u32 val1, u32 val2, u32 val3)
{
int ret;
spin_lock(&tz_lock);
/* sync memory before sending the commands to tz*/
__iowmb();
ret = scm_call_atomic3(SCM_SVC_IO, cmd, val1, val2, val3);
spin_unlock(&tz_lock);
return ret;
}
/* Boolean to detect if pm has entered suspend mode */
static bool suspended = false;
static int __secure_tz_update_entry3(unsigned int *scm_data, u32 size_scm_data,
int *val, u32 size_val, bool is_64)
{
int ret;
/* sync memory before sending the commands to tz*/
__iowmb();
if (!is_64) {
spin_lock(&tz_lock);
ret = scm_call_atomic3(SCM_SVC_IO, TZ_UPDATE_ID,
scm_data[0], scm_data[1], scm_data[2]);
spin_unlock(&tz_lock);
*val = ret;
} else {
ret = scm_call(SCM_SVC_DCVS, TZ_UPDATE_ID_64, scm_data,
size_scm_data, val, size_val);
}
return ret;
}
static int tz_init(struct devfreq_msm_adreno_tz_data *priv,
unsigned int *tz_pwrlevels, u32 size_pwrlevels,
unsigned int *version, u32 size_version)
{
int ret;
/* Make sure all CMD IDs are avaialble */
if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID)) {
ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID, tz_pwrlevels,
size_pwrlevels, NULL, 0);
*version = 0;
} else if (scm_is_call_available(SCM_SVC_DCVS, TZ_INIT_ID_64) &&
scm_is_call_available(SCM_SVC_DCVS, TZ_UPDATE_ID_64) &&
scm_is_call_available(SCM_SVC_DCVS, TZ_RESET_ID_64)) {
ret = scm_call(SCM_SVC_DCVS, TZ_INIT_ID_64, tz_pwrlevels,
size_pwrlevels, version, size_version);
if (!ret)
priv->is_64 = true;
} else
ret = -EINVAL;
return ret;
}
static void _update_cutoff(struct devfreq_msm_adreno_tz_data *priv,
unsigned int norm_max)
{
int i;
priv->bus.max = norm_max;
for (i = 0; i < priv->bus.num; i++) {
priv->bus.up[i] = priv->bus.p_up[i] * norm_max / 100;
priv->bus.down[i] = priv->bus.p_down[i] * norm_max / 100;
}
}
#ifdef CONFIG_SIMPLE_GPU_ALGORITHM
extern int simple_gpu_active;
extern int simple_gpu_algorithm(int level,
struct devfreq_msm_adreno_tz_data *priv);
#endif
#ifdef CONFIG_ADRENO_IDLER
extern int adreno_idler(struct devfreq_dev_status stats, struct devfreq *devfreq,
unsigned long *freq);
#endif
static int tz_get_target_freq(struct devfreq *devfreq, unsigned long *freq,
u32 *flag)
{
int result = 0;
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
struct devfreq_dev_status stats;
struct xstats b;
int val, level = 0;
int act_level;
int norm_cycles;
int gpu_percent;
static int busy_bin, frame_flag;
unsigned int scm_data[3];
if (priv->bus.num)
stats.private_data = &b;
else
stats.private_data = NULL;
result = devfreq->profile->get_dev_status(devfreq->dev.parent, &stats);
if (result) {
pr_err(TAG "get_status failed %d\n", result);
return result;
}
/* Prevent overflow */
if (stats.busy_time >= (1 << 24) || stats.total_time >= (1 << 24)) {
stats.busy_time >>= 7;
stats.total_time >>= 7;
}
*freq = stats.current_frequency;
*flag = 0;
/*
* Force to use & record as min freq when system has
* entered pm-suspend or screen-off state.
*/
if (suspended || !mdss_screen_on) {
*freq = devfreq->profile->freq_table[devfreq->profile->max_state - 1];
return 0;
}
#ifdef CONFIG_ADRENO_IDLER
if (adreno_idler(stats, devfreq, freq)) {
/* adreno_idler has asked to bail out now */
return 0;
}
#endif
priv->bin.total_time += stats.total_time;
priv->bin.busy_time += stats.busy_time;
if (priv->bus.num) {
priv->bus.total_time += stats.total_time;
priv->bus.gpu_time += stats.busy_time;
priv->bus.ram_time += b.ram_time;
priv->bus.ram_time += b.ram_wait;
}
/*
* Do not waste CPU cycles running this algorithm if
* the GPU just started, or if less than FLOOR time
* has passed since the last run or the gpu hasn't been
* busier than MIN_BUSY.
*/
if ((stats.total_time == 0) ||
(priv->bin.total_time < FLOOR) ||
(unsigned int) priv->bin.busy_time < MIN_BUSY) {
return 0;
}
if ((stats.busy_time * 100 / stats.total_time) > BUSY_BIN) {
busy_bin += stats.busy_time;
if (stats.total_time > LONG_FRAME)
frame_flag = 1;
} else {
busy_bin = 0;
frame_flag = 0;
}
level = devfreq_get_freq_level(devfreq, stats.current_frequency);
if (level < 0) {
pr_err(TAG "bad freq %ld\n", stats.current_frequency);
return level;
}
/*
* If there is an extended block of busy processing,
* increase frequency. Otherwise run the normal algorithm.
*/
if (priv->bin.busy_time > CEILING ||
(busy_bin > CEILING && frame_flag)) {
val = -1 * level;
busy_bin = 0;
frame_flag = 0;
} else {
#ifdef CONFIG_SIMPLE_GPU_ALGORITHM
if (simple_gpu_active != 0)
val = simple_gpu_algorithm(level, priv);
else
val = __secure_tz_entry3(TZ_UPDATE_ID,
level,
priv->bin.total_time,
priv->bin.busy_time);
#else
val = __secure_tz_entry3(TZ_UPDATE_ID,
level,
priv->bin.total_time,
priv->bin.busy_time);
#endif
scm_data[0] = level;
scm_data[1] = priv->bin.total_time;
scm_data[2] = priv->bin.busy_time;
__secure_tz_update_entry3(scm_data, sizeof(scm_data),
&val, sizeof(val), priv->is_64);
}
priv->bin.total_time = 0;
priv->bin.busy_time = 0;
/*
* If the decision is to move to a different level, make sure the GPU
* frequency changes.
*/
if (val) {
level += val;
level = max(level, 0);
level = min_t(int, level, devfreq->profile->max_state - 1);
goto clear;
}
if (priv->bus.total_time < LONG_FLOOR)
goto end;
norm_cycles = (unsigned int)priv->bus.ram_time /
(unsigned int) priv->bus.total_time;
gpu_percent = (100 * (unsigned int)priv->bus.gpu_time) /
(unsigned int) priv->bus.total_time;
/*
* If there's a new high watermark, update the cutoffs and send the
* FAST hint. Otherwise check the current value against the current
* cutoffs.
*/
if (norm_cycles > priv->bus.max) {
_update_cutoff(priv, norm_cycles);
*flag = DEVFREQ_FLAG_FAST_HINT;
} else {
/* GPU votes for IB not AB so don't under vote the system */
norm_cycles = (100 * norm_cycles) / TARGET;
act_level = priv->bus.index[level] + b.mod;
act_level = (act_level < 0) ? 0 : act_level;
act_level = (act_level >= priv->bus.num) ?
(priv->bus.num - 1) : act_level;
if (norm_cycles > priv->bus.up[act_level] &&
gpu_percent > CAP)
*flag = DEVFREQ_FLAG_FAST_HINT;
else if (norm_cycles < priv->bus.down[act_level] && level)
*flag = DEVFREQ_FLAG_SLOW_HINT;
}
clear:
priv->bus.total_time = 0;
priv->bus.gpu_time = 0;
priv->bus.ram_time = 0;
end:
*freq = devfreq->profile->freq_table[level];
return 0;
}
static int tz_notify(struct notifier_block *nb, unsigned long type, void *devp)
{
int result = 0;
struct devfreq *devfreq = devp;
switch (type) {
case ADRENO_DEVFREQ_NOTIFY_IDLE:
case ADRENO_DEVFREQ_NOTIFY_RETIRE:
mutex_lock(&devfreq->lock);
result = update_devfreq(devfreq);
mutex_unlock(&devfreq->lock);
break;
/* ignored by this governor */
case ADRENO_DEVFREQ_NOTIFY_SUBMIT:
default:
break;
}
return notifier_from_errno(result);
}
static int tz_start(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv;
unsigned int tz_pwrlevels[MSM_ADRENO_MAX_PWRLEVELS + 1];
unsigned int t1, t2 = 2 * HIST;
int i, out, ret;
unsigned int version;
struct msm_adreno_extended_profile *ext_profile = container_of(
(devfreq->profile),
struct msm_adreno_extended_profile,
profile);
/*
* Assuming that we have only one instance of the adreno device
* connected to this governor,
* can safely restore the pointer to the governor private data
* from the container of the device profile
*/
devfreq->data = ext_profile->private_data;
priv = devfreq->data;
priv->nb.notifier_call = tz_notify;
out = 1;
if (devfreq->profile->max_state < MSM_ADRENO_MAX_PWRLEVELS) {
for (i = 0; i < devfreq->profile->max_state; i++)
tz_pwrlevels[out++] = devfreq->profile->freq_table[i];
tz_pwrlevels[0] = i;
} else {
pr_err(TAG "tz_pwrlevels[] is too short\n");
return -EINVAL;
}
ret = tz_init(priv, tz_pwrlevels, sizeof(tz_pwrlevels), &version,
sizeof(version));
if (ret != 0 || version > MAX_TZ_VERSION) {
pr_err(TAG "tz_init failed\n");
return ret;
}
/* Set up the cut-over percentages for the bus calculation. */
if (priv->bus.num) {
for (i = 0; i < priv->bus.num; i++) {
t1 = (u32)(100 * priv->bus.ib[i]) /
(u32)priv->bus.ib[priv->bus.num - 1];
priv->bus.p_up[i] = t1 - HIST;
priv->bus.p_down[i] = t2 - 2 * HIST;
t2 = t1;
}
/* Set the upper-most and lower-most bounds correctly. */
priv->bus.p_down[0] = 0;
priv->bus.p_down[1] = (priv->bus.p_down[1] > (2 * HIST)) ?
priv->bus.p_down[1] : (2 * HIST);
if (priv->bus.num - 1 >= 0)
priv->bus.p_up[priv->bus.num - 1] = 100;
_update_cutoff(priv, priv->bus.max);
}
return kgsl_devfreq_add_notifier(devfreq->dev.parent, &priv->nb);
}
static int tz_stop(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
kgsl_devfreq_del_notifier(devfreq->dev.parent, &priv->nb);
/* leaving the governor and cleaning the pointer to private data */
devfreq->data = NULL;
return 0;
}
static int tz_resume(struct devfreq *devfreq)
{
struct devfreq_dev_profile *profile = devfreq->profile;
unsigned long freq;
suspended = false;
freq = profile->initial_freq;
return profile->target(devfreq->dev.parent, &freq, 0);
}
static int tz_suspend(struct devfreq *devfreq)
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
struct devfreq_dev_profile *profile = devfreq->profile;
unsigned long freq;
unsigned int scm_data[2] = {0, 0};
__secure_tz_reset_entry2(scm_data, sizeof(scm_data), priv->is_64);
suspended = true;
priv->bin.total_time = 0;
priv->bin.busy_time = 0;
priv->bus.total_time = 0;
priv->bus.gpu_time = 0;
priv->bus.ram_time = 0;
freq = profile->freq_table[profile->max_state - 1];
return profile->target(devfreq->dev.parent, &freq, 0);
}
static int tz_handler(struct devfreq *devfreq, unsigned int event, void *data)
{
int result;
BUG_ON(devfreq == NULL);
switch (event) {
case DEVFREQ_GOV_START:
result = tz_start(devfreq);
break;
case DEVFREQ_GOV_STOP:
result = tz_stop(devfreq);
break;
case DEVFREQ_GOV_SUSPEND:
result = tz_suspend(devfreq);
break;
case DEVFREQ_GOV_RESUME:
result = tz_resume(devfreq);
break;
case DEVFREQ_GOV_INTERVAL:
/* ignored, this governor doesn't use polling */
default:
result = 0;
break;
}
return result;
}
static struct devfreq_governor msm_adreno_tz = {
.name = "msm-adreno-tz",
.get_target_freq = tz_get_target_freq,
.event_handler = tz_handler,
};
static int __init msm_adreno_tz_init(void)
{
return devfreq_add_governor(&msm_adreno_tz);
}
subsys_initcall(msm_adreno_tz_init);
static void __exit msm_adreno_tz_exit(void)
{
int ret;
ret = devfreq_remove_governor(&msm_adreno_tz);
if (ret)
pr_err(TAG "failed to remove governor %d\n", ret);
return;
}
module_exit(msm_adreno_tz_exit);
MODULE_LICENSE("GPLv2");
| XileForce/Vindicator | drivers/devfreq/governor_msm_adreno_tz.c | C | gpl-2.0 | 13,226 |
package com.wellheadstone.nemms.server.util;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Map;
import org.apache.commons.lang3.ArrayUtils;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.wellheadstone.nemms.server.domain.po.DeviceDataPo;
import com.wellheadstone.nemms.server.domain.po.DeviceParamPo;
import com.wellheadstone.nemms.server.domain.service.ServiceFacade;
import com.wellheadstone.nemms.server.message.CMCCFDSMessage;
import com.wellheadstone.nemms.server.message.SocketIOMessage;
public class MessageUtils {
private final static Logger logger = LoggerFactory.getLogger(MessageUtils.class);
public static CMCCFDSMessage getHeartResMessage(CMCCFDSMessage msg) {
msg.setVpLayerFlag((byte) 0x00);
msg.setRespFlag((byte) 0x00);
return msg;
}
public static CMCCFDSMessage getParamListReqMessage(CMCCFDSMessage msg) {
msg.setVpLayerFlag((byte) 0x80);
msg.setRespFlag((byte) 0xff);
return msg;
}
public static CMCCFDSMessage getParamListReqMessage(SocketIOMessage data) {
CMCCFDSMessage message = MessageUtils.SocketIOMessage2TcpUdpMessage(data);
byte[] pdu = Converter.getReverseBytes(MessageUtils.getParamListPDU(message.getMcp()));
message.setCmdId((byte) 0x02);
message.setPDU(pdu);
return message;
}
public static CMCCFDSMessage getQueryAllReqMessage(SocketIOMessage data) {
CMCCFDSMessage message = MessageUtils.SocketIOMessage2TcpUdpMessage(data);
message.setCmdId((byte) 0x02);
message.setPDU(new byte[] { 0x00 });
return message;
}
public static CMCCFDSMessage getQuerySelectedReqMessage(SocketIOMessage data) {
CMCCFDSMessage message = MessageUtils.SocketIOMessage2TcpUdpMessage(data);
message.setCmdId((byte) 0x02);
message.setPDU(new byte[] { 0x00 });
return message;
}
public static CMCCFDSMessage getSetupReqMessage(SocketIOMessage data) {
CMCCFDSMessage message = MessageUtils.SocketIOMessage2TcpUdpMessage(data);
message.setCmdId((byte) 0x03);
message.setPDU(new byte[] { 0x00 });
return message;
}
public static CMCCFDSMessage SocketIOMessage2TcpUdpMessage(SocketIOMessage msg) {
CMCCFDSMessage message = new CMCCFDSMessage();
message.setStartFlag((byte) 0x7e);
message.setAp(Byte.valueOf(msg.getApProtocol()));
message.setVp((byte) 0x01);
message.setSiteId(getSiteId(msg.getUid()));
message.setDeviceId(getDeviceId(msg.getUid()));
message.setPacketId((short) 0x00);
message.setVpLayerFlag((byte) 0x80);
message.setMcp(Byte.valueOf(msg.getMcpProtocol()));
message.setRespFlag((byte) 0xff);
message.setEndFlag((byte) 0x7e);
return message;
}
public static int getSiteId(String siteId) {
if (siteId.length() < 10 || siteId.length() > 12) {
return Integer.MAX_VALUE;
}
String uid = siteId.substring(2, 10);
return Integer.valueOf(uid, 16);
}
public static byte getDeviceId(String siteId) {
if (siteId.length() < 10 || siteId.length() > 12) {
return 0;
}
if (siteId.length() == 10) {
return Byte.valueOf("0");
}
String uid = siteId.substring(10);
return Short.valueOf(uid, 16).byteValue();
}
public static void parseDataUnit(String siteUid, byte mcp, byte[] pdu) {
if (pdu == null || pdu.length < 4) {
return;
}
int byteCount = mcp == 1 ? 2 : 4;
int size = pdu.length / byteCount;
List<DeviceDataPo> entities = new ArrayList<DeviceDataPo>(size);
Map<String, DeviceParamPo> paramMap = ServiceFacade.getDeviceParamMap();
for (int i = 0; i < pdu.length;) {
int unitLength = pdu[i];
int startIndex = i + 1;
int endIndex = startIndex + byteCount;
String hexUid = Converter.getReverseHexString(pdu, startIndex, endIndex);
String paramUid = MessageUtils.getParamUid(hexUid);
DeviceParamPo paramPo = paramMap.get(MessageUtils.getDeviceParamKey(paramUid, mcp));
DeviceDataPo po = new DeviceDataPo();
po.setSiteUid(siteUid);
po.setParamUid(paramUid);
po.setMcpId((int) mcp);
po.setValue(MessageUtils.getParamValue(pdu, endIndex, i + unitLength, paramPo));
po.setValue(MessageUtils.getParamValue(hexUid, po.getValue()));
entities.add(po);
i = i + unitLength;
}
ServiceFacade.updateParamListValue(entities);
}
public static String getParamValue(byte[] pdu, int startIndex, int endIndex, DeviceParamPo po) {
if (po == null) {
return "N/A";
}
byte[] bytes = Arrays.copyOfRange(pdu, startIndex, endIndex);
if (po.getValueType().equals("uint1")) {
byte[] src = Converter.getBytes(Converter.getReverseBytes(bytes), 2);
return getFormattedParamValue(String.valueOf(Converter.getShort(src) / po.getRatio()));
}
if (po.getValueType().equals("uint2")) {
byte[] src = Converter.getBytes(Converter.getReverseBytes(bytes), 4);
return getFormattedParamValue(String.valueOf(Converter.getInt(src) / po.getRatio()));
}
if (po.getValueType().equals("uint3")) {
byte[] src = Converter.getBytes(Converter.getReverseBytes(bytes), 4);
return getFormattedParamValue(String.valueOf(Converter.getInt(src) / po.getRatio()));
}
if (po.getValueType().equals("uint4")) {
byte[] src = Converter.getBytes(Converter.getReverseBytes(bytes), 8);
return getFormattedParamValue(String.valueOf(Converter.getLong(src) / po.getRatio()));
}
if (po.getValueType().equals("sint1")) {
byte src = Byte.valueOf(bytes[0]);
return getFormattedParamValue(String.valueOf(src / po.getRatio()));
}
if (po.getValueType().equals("sint2")) {
byte[] src = Converter.getBytes(Converter.getReverseBytes(bytes), 2);
return getFormattedParamValue(String.valueOf(Converter.getShort(src) / po.getRatio()));
}
if (po.getValueType().equals("bit")) {
byte[] src = Converter.getBytes(Converter.getReverseBytes(bytes), 2);
return String.valueOf(Converter.getShort(src));
}
if (po.getValueType().equals("str")) {
if (bytes[0] == 0x30 || bytes[0] == 0x00) {
return "0";
}
int index = ArrayUtils.indexOf(bytes, (byte) 0x00);
if (index > 0) {
return new String(Arrays.copyOfRange(bytes, 0, index), Charset.forName("ISO-8859-1"));
}
return new String(bytes, Charset.forName("ISO-8859-1"));
}
if (po.getValueType().equals("dstr")) {
return getDStringValue(po.getFormat(), bytes);
}
return "N/A";
}
private static String getFormattedParamValue(String value) {
String[] seg = StringUtils.split(value, '.');
if (seg.length == 2) {
return Integer.valueOf(seg[1]) > 0 ? value : seg[0];
}
if (seg.length == 1) {
return seg[0];
}
return StringUtils.EMPTY;
}
public static byte[] getParamValueBytes(String value, DeviceParamPo po) {
try {
double ratio = po.getRatio();
if (po.getValueType().equals("uint1")) {
byte[] src = Converter.getShortBytes(Double.valueOf(value), ratio);
return Converter.getReverseBytes(src, 1, 2);
}
if (po.getValueType().equals("uint2")) {
byte[] src = Converter.getIntBytes(Double.valueOf(value), ratio);
return Converter.getReverseBytes(src, 2, 4);
}
if (po.getValueType().equals("uint3")) {
byte[] src = Converter.getIntBytes(Double.valueOf(value), ratio);
return Converter.getReverseBytes(src, 1, 4);
}
if (po.getValueType().equals("uint4")) {
byte[] src = Converter.getLongBytes(Double.valueOf(value), ratio);
return Converter.getReverseBytes(src, 4, 8);
}
if (po.getValueType().equals("sint1")) {
byte[] src = Converter.getShortBytes(Double.valueOf(value), ratio);
return Converter.getReverseBytes(src, 1, 2);
}
if (po.getValueType().equals("sint2")) {
byte[] src = Converter.getIntBytes(Double.valueOf(value), ratio);
return Converter.getReverseBytes(src, 2, 4);
}
if (po.getValueType().equals("bit")) {
return new byte[] { Byte.valueOf(value) };
}
if (po.getValueType().equals("str")) {
return value.getBytes("ISO-8859-1");
}
if (po.getValueType().equals("dstr")) {
return getDStringValueBytes(po.getFormat(), value);
}
} catch (Exception e) {
logger.error("getParamValueBytes error", e);
}
return null;
}
public static byte[] getUnitBytes(int mcp, DeviceParamPo po) {
int length = (mcp == 1) ? (3 + po.getValueLen()) : (5 + po.getValueLen());
ByteBuffer buffer = ByteBuffer.allocate(length);
buffer.put((byte) length);
buffer.put(getParamIdBytes(mcp, po.getUid().trim()));
buffer.put(Converter.getFixedLengthBytes(po.getValueLen()));
buffer.flip();
return buffer.array();
}
public static byte[] getUnitBytes(int mcp, DeviceParamPo po, String value) {
byte[] valueBytes = getParamValueBytes(value, po);
if (valueBytes == null) {
return null;
}
int length = (mcp == 1) ? (3 + po.getValueLen()) : (5 + po.getValueLen());
ByteBuffer buffer = ByteBuffer.allocate(length);
buffer.put((byte) length);
buffer.put(getParamIdBytes(mcp, po.getUid().trim()));
buffer.put(valueBytes);
buffer.flip();
return buffer.array();
}
public static byte[] getParamIdBytes(int mcp, String id) {
if (mcp == 1) {
return Converter.getReverseBytes(Short.valueOf(id.substring(2), 16).shortValue());
}
return Converter.getReverseBytes(Integer.valueOf(id.substring(2), 16).intValue());
}
private static byte[] getParamListPDU(byte mcp) {
// mcp:a 参数标识为2字节
if (mcp == 1) {
return new byte[] { 0x01, 0x01, 0x00, 0x09, 0x05 };
}
// mcp:c 参数标识为4字节
if (mcp == 3) {
return new byte[] { 0x01, 0x01, 0x00, 0x00, 0x00, 0x09, 0x07 };
}
return new byte[] { 0x00 };
}
public static String getDeviceParamKey(String uid, int mcpId) {
return String.format("%s-%s", uid.trim().toUpperCase(), mcpId);
}
public static String getDStringValue(String format, byte[] bytes) {
if ("ip".equals(format.trim().toLowerCase()) && bytes != null && bytes.length > 3) {
return String.format("%s.%s.%s.%s",
Converter.byteToShort(bytes[0]),
Converter.byteToShort(bytes[1]),
Converter.byteToShort(bytes[2]),
Converter.byteToShort(bytes[3]));
}
if ("dt".equals(format.trim().toLowerCase()) && bytes != null && bytes.length > 6) {
return String.format("%02x%02x%02x%02x %02x:%02x:%02x",
Converter.byteToShort(bytes[0]),
Converter.byteToShort(bytes[1]),
Converter.byteToShort(bytes[2]),
Converter.byteToShort(bytes[3]),
Converter.byteToShort(bytes[4]),
Converter.byteToShort(bytes[5]),
Converter.byteToShort(bytes[6]));
}
return "";
}
public static byte[] getDStringValueBytes(String format, String value) {
if ("ip".equals(format.trim().toLowerCase())) {
String[] seg = StringUtils.split(value, '.');
if (seg == null || seg.length < 3) {
return null;
}
return new byte[] {
Short.valueOf(seg[0]).byteValue(),
Short.valueOf(seg[1]).byteValue(),
Short.valueOf(seg[2]).byteValue(),
Short.valueOf(seg[3]).byteValue()
};
}
if ("dt".equals(format.trim().toLowerCase())) {
String[] seg = StringUtils.split(value, ' ');
if (seg == null || seg.length < 2) {
return null;
}
char[] date = seg[0].toCharArray();
String[] times = StringUtils.split(seg[1], ':');
if (date.length < 8 || times.length < 3) {
return null;
}
return new byte[] {
Byte.valueOf(String.copyValueOf(date, 0, 2), 16),
Byte.valueOf(String.copyValueOf(date, 2, 2), 16),
Byte.valueOf(String.copyValueOf(date, 4, 2), 16),
Byte.valueOf(String.copyValueOf(date, 6, 2), 16),
Byte.valueOf(times[0], 16),
Byte.valueOf(times[1], 16),
Byte.valueOf(times[2], 16)
};
}
return null;
}
public static String getSiteUid(CMCCFDSMessage msg) {
String devId = Converter.getHexString(msg.getDeviceId());
String siteUid = Converter.getHexStringWith0X(Converter.getHexString(msg.getSiteId()));
if (devId.equals("00")) {
return siteUid;
}
return siteUid + devId;
}
public static String getParamUid(String hexUid) {
char[] chars = hexUid.toCharArray();
if (chars[0] != '0') {
chars[0] = '0';
hexUid = new String(chars);
}
return Converter.getHexStringWith0X(hexUid);
}
public static String getParamValue(String hexUid, String value) {
char[] chars = hexUid.toCharArray();
if (chars[0] != '0') {
return getErrorParamValue(chars[0]);
}
return value;
}
/**
*
* 0:正常
* 1:监控数据标识无法识别。
* 2:监控数据的设置值超出范围。接收到此错误代码后,监控管理中心应提示“设置数据超出范围”。
* 3:监控数据标识与监控数据的值不符合要求,比如:非要求的ASCII码范围。
* 4:监控数据标识与监控数据长度不匹配。
* 5:监控数据的检测值低于工作范围。接收到此错误代码后,监控管理中心应显示该数据的值为“--”。
* 6:监控数据的检测值高于工作范围。接收到此错误代码后,监控管理中心应显示该数据的值为“++”。
* 7:监控数据无法检测。接收到此错误代码后,监控管理中心应显示该数据的值为“**”。
* 8:系统保留(厂家不能占用)。
* 9:未列出的其它错误。
* 10~15:厂家自定义(监控管理中心不作处理)。
*/
public static String getErrorParamValue(char errorCode) {
if (errorCode == '5') {
return "--";
}
if (errorCode == '6') {
return "++";
}
if (errorCode == '7') {
return "**";
}
return String.valueOf(errorCode);
}
}
| tomdeng/NEMMS | nemms-mina/src/main/java/com/wellheadstone/nemms/server/util/MessageUtils.java | Java | gpl-2.0 | 13,851 |
/*
* Lupus in Tabula
* ...un progetto di Edoardo Morassutto
* Contributors:
* - 2014 Edoardo Morassutto <edoardo.morassutto@gmail.com>
*/
var path = "";
var APIdir = path + "/api";
var errorCount = 0;
var lastError = 0;
var lastNotificationUpdate = null;
function logout() {
$.ajax({
url: APIdir+"/logout",
type: "GET",
dataType: "json",
async: false,
success: function() {
window.location.href = path + "/login";
},
error: function(e) {
window.location.href = path + "/login";
}
});
}
function isShortName(name) {
var regex = /^[a-zA-Z][a-zA-Z0-9]{0,9}$/;
return regex.test(name);
}
function isValidDescr(descr) {
var regex = /^[a-zA-Z0-9][a-zA-Z0-9 ]{0,43}[a-zA-Z0-9]$/;
return regex.test(descr);
}
function showError(message) {
var div = $("<div>")
.addClass("alert")
.addClass("alert-danger")
.addClass("alert-dismissable")
.attr("id", "error-"+(errorCount++));
div.append('<button type="button" class="close" data-dismiss="alert" aria-hidden="true">×</button>');
div.append(message.error);
$("nav.navbar").after(div);
setTimeout(removeError, 2000);
}
function getErrorMessage(jqError) {
try {
return JSON.parse(jqError.responseText);
} catch (e) {
return JSON.stringify(jqError);
}
}
function removeError() {
$("#error-"+(lastError++)).fadeTo(500, 0).slideUp(500, function(){
$(this).remove();
});
}
function removeNotification() {
var $this = $(this);
var id_notification = $this.attr('data-id-notification');
$.ajax({
url: APIdir + "/notification/dismiss",
data: { id_notification: id_notification },
dataType: 'json',
type: 'POST',
success: function(data) {
$this.closest('.notification-element').remove();
if ($('.notification-element').length == 0)
$('.dropdown-menu').append("<p id='notifications-empty'>Nessuna notifica recente...</p>");
},
error: function(error) {
console.error(error);
showError(getErrorMessage(error));
}
});
return false;
}
function refreshNotifications() {
$.ajax({
url: APIdir + "/notification/update",
data: { since: lastNotificationUpdate },
dataType: 'json',
type: 'GET',
success: function (data) {
lastNotificationUpdate = new Date();
appendNotifications(data);
},
error: function (error) {
console.error(error);
showError(getErrorMessage(error));
}
});
return false;
}
function appendNotifications(list) {
if (list.length == 0) return;
var $notifications = $("#notifications .dropdown-menu");
$("#notifications-empty").remove();
while (list.length) {
var noti = list.pop();
var $noti = $("<a href='"+noti.link+"' class='notification-element'>");
$noti.append(
$('<div class="btn-dismiss-notification" data-id-notification="'+noti.id_notification+'">')
.append('<div class="glyphicon glyphicon-remove"></div>')
.click(removeNotification)
);
$noti.append(noti.message);
$noti.insertAfter("#notifications-title");
}
}
$(function() {
if (location.pathname != path + "/login") {
$(".btn-dismiss-notification").click(removeNotification);
$("#notifications-refresh").click(refreshNotifications);
$("#notifications-toggle").dropdown();
$("#notifications").on("show.bs.dropdown", function () {
refreshNotifications();
});
refreshNotifications();
}
});
| lupus-dev/lupus | js/default.js | JavaScript | gpl-2.0 | 3,279 |
/*
* OCaml Support For IntelliJ Platform.
* Copyright (C) 2010 Maxim Manuylov
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/gpl-2.0.html>.
*/
package manuylov.maxim.ocaml.lang.parser.psi.element;
/**
* @author Maxim.Manuylov
* Date: 21.03.2009
*/
public interface OCamlInitializerClassFieldDefinition extends OCamlClassFieldDefinition
{
}
| consulo/consulo-ocaml | src/main/java/manuylov/maxim/ocaml/lang/parser/psi/element/OCamlInitializerClassFieldDefinition.java | Java | gpl-2.0 | 954 |
package experiment.Neo4j.algorithms;
import testgdbb.*;
public class graphsumm{
public static void main(String args[]){
Test t = new TestGraphSummarization();
t.testAlgorithm(Integer.parseInt( args[0]), 3);
}
}
| gpalma/gdbb | experiment/Neo4j/algorithms/graphsumm.java | Java | gpl-2.0 | 218 |
using System.Collections.Generic;
namespace meridian.diagram
{
public enum ElementType
{
None,
Proto,
Field,
Inherit,
Aggregation,
Composition,
InlineComposition,
Association,
Entity,
View,
Set,
Foreign,
Primary,
StoredProcedure
}
public static class ElementTypeMapper
{
static ElementTypeMapper()
{
m_Mapper["NONE"] = ElementType.None;
m_Mapper["PROTO"] = ElementType.Proto;
m_Mapper["FIELD"] = ElementType.Field;
m_Mapper["INHERIT"] = ElementType.Inherit;
m_Mapper["AGGREGATION"] = ElementType.Aggregation;
m_Mapper["COMPOSITION"] = ElementType.Composition;
m_Mapper["INLINECOMPOSITION"] = ElementType.InlineComposition;
m_Mapper["ASSOCIATION"] = ElementType.Association;
m_Mapper["ENTITY"] = ElementType.Entity;
m_Mapper["VIEW"] = ElementType.View;
m_Mapper["SET"] = ElementType.Set;
m_Mapper["FOREIGN"] = ElementType.Foreign;
m_Mapper["PRIMARY"] = ElementType.Primary;
}
public static bool IsType(string _type)
{
return m_Mapper.IndexOfKey(_type.Trim().ToString().ToUpper()) != -1;
}
public static ElementType Map(string _type)
{
return m_Mapper[_type.Trim().ToString().ToUpper()];
}
private static SortedList<string, ElementType> m_Mapper = new SortedList<string,ElementType>();
}
} | seavan/meridian | branches/0.2/meridian.diagram/elements/ElementType.cs | C# | gpl-2.0 | 1,641 |
/***************************************************************************
* __________ __ ___.
* Open \______ \ ____ ____ | | _\_ |__ _______ ___
* Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
* Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
* Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
* \/ \/ \/ \/ \/
* $Id$
*
* Copyright (C) 2007 by Michael Sevakis
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
* KIND, either express or implied.
*
****************************************************************************/
#include "config.h"
#include "system.h"
#include "avic-imx31.h"
#include "spi-imx31.h"
#include "mc13783.h"
#include "ccm-imx31.h"
#include "sdma-imx31.h"
#include "dvfs_dptc-imx31.h"
#include "kernel.h"
#include "thread.h"
static __attribute__((interrupt("IRQ"))) void EPIT1_HANDLER(void)
{
EPITSR1 = EPITSR_OCIF; /* Clear the pending status */
/* Run through the list of tick tasks */
call_tick_tasks();
}
void INIT_ATTR tick_start(unsigned int interval_in_ms)
{
ccm_module_clock_gating(CG_EPIT1, CGM_ON_RUN_WAIT); /* EPIT1 module
clock ON - before writing
regs! */
EPITCR1 &= ~(EPITCR_OCIEN | EPITCR_EN); /* Disable the counter */
CCM_WIMR0 &= ~CCM_WIMR0_IPI_INT_EPIT1; /* Clear wakeup mask */
/* mcu_main_clk = 528MHz = 27MHz * 2 * ((9 + 7/9) / 1)
* CLKSRC = ipg_clk = 528MHz / 4 / 2 = 66MHz,
* EPIT Output Disconnected,
* Enabled in wait mode
* Prescale 1/2640 for 25KHz
* Reload from modulus register,
* Compare interrupt enabled,
* Count from load value */
EPITCR1 = EPITCR_CLKSRC_IPG_CLK | EPITCR_WAITEN | EPITCR_IOVW |
((2640-1) << EPITCR_PRESCALER_POS) | EPITCR_RLD |
EPITCR_OCIEN | EPITCR_ENMOD;
EPITLR1 = interval_in_ms*25; /* Count down from interval */
EPITCMPR1 = 0; /* Event when counter reaches 0 */
EPITSR1 = EPITSR_OCIF; /* Clear any pending interrupt */
avic_enable_int(INT_EPIT1, INT_TYPE_IRQ, INT_PRIO_DEFAULT,
EPIT1_HANDLER);
EPITCR1 |= EPITCR_EN; /* Enable the counter */
}
void INIT_ATTR kernel_device_init(void)
{
sdma_init();
spi_init();
enable_interrupt(IRQ_FIQ_STATUS);
mc13783_init();
dvfs_dptc_init(); /* Init also sets default points */
#ifndef BOOTLOADER
dvfs_wfi_monitor(true); /* Monitor the WFI signal */
dvfs_start(); /* Should be ok to start even so early */
dptc_start();
#endif
}
void tick_stop(void)
{
avic_disable_int(INT_EPIT1); /* Disable insterrupt */
EPITCR1 &= ~(EPITCR_OCIEN | EPITCR_EN); /* Disable counter */
EPITSR1 = EPITSR_OCIF; /* Clear pending */
ccm_module_clock_gating(CG_EPIT1, CGM_OFF); /* Turn off module clock */
}
| renolui/RenoStudio | Player/firmware/target/arm/imx31/gigabeat-s/kernel-gigabeat-s.c | C | gpl-2.0 | 3,298 |
/* ----------------------------------------------------------------------------
* This file was automatically generated by SWIG (http://www.swig.org).
* Version 2.0.7
*
* Do not make changes to this file unless you know what you are doing--modify
* the SWIG interface file instead.
* ----------------------------------------------------------------------------- */
namespace cAudio {
using System;
using System.Runtime.InteropServices;
public class DoubleVector : IDisposable, System.Collections.IEnumerable
#if !SWIG_DOTNET_1
, System.Collections.Generic.IList<double>
#endif
{
private HandleRef swigCPtr;
protected bool swigCMemOwn;
internal DoubleVector(IntPtr cPtr, bool cMemoryOwn) {
swigCMemOwn = cMemoryOwn;
swigCPtr = new HandleRef(this, cPtr);
}
internal static HandleRef getCPtr(DoubleVector obj) {
return (obj == null) ? new HandleRef(null, IntPtr.Zero) : obj.swigCPtr;
}
~DoubleVector() {
Dispose();
}
public virtual void Dispose() {
lock(this) {
if (swigCPtr.Handle != IntPtr.Zero) {
if (swigCMemOwn) {
swigCMemOwn = false;
cAudioCSharpWrapperPINVOKE.delete_DoubleVector(swigCPtr);
}
swigCPtr = new HandleRef(null, IntPtr.Zero);
}
GC.SuppressFinalize(this);
}
}
public DoubleVector(System.Collections.ICollection c) : this() {
if (c == null)
throw new ArgumentNullException("c");
foreach (double element in c) {
this.Add(element);
}
}
public bool IsFixedSize {
get {
return false;
}
}
public bool IsReadOnly {
get {
return false;
}
}
public double this[int index] {
get {
return getitem(index);
}
set {
setitem(index, value);
}
}
public int Capacity {
get {
return (int)capacity();
}
set {
if (value < size())
throw new ArgumentOutOfRangeException("Capacity");
reserve((uint)value);
}
}
public int Count {
get {
return (int)size();
}
}
public bool IsSynchronized {
get {
return false;
}
}
#if SWIG_DOTNET_1
public void CopyTo(System.Array array)
#else
public void CopyTo(double[] array)
#endif
{
CopyTo(0, array, 0, this.Count);
}
#if SWIG_DOTNET_1
public void CopyTo(System.Array array, int arrayIndex)
#else
public void CopyTo(double[] array, int arrayIndex)
#endif
{
CopyTo(0, array, arrayIndex, this.Count);
}
#if SWIG_DOTNET_1
public void CopyTo(int index, System.Array array, int arrayIndex, int count)
#else
public void CopyTo(int index, double[] array, int arrayIndex, int count)
#endif
{
if (array == null)
throw new ArgumentNullException("array");
if (index < 0)
throw new ArgumentOutOfRangeException("index", "Value is less than zero");
if (arrayIndex < 0)
throw new ArgumentOutOfRangeException("arrayIndex", "Value is less than zero");
if (count < 0)
throw new ArgumentOutOfRangeException("count", "Value is less than zero");
if (array.Rank > 1)
throw new ArgumentException("Multi dimensional array.", "array");
if (index+count > this.Count || arrayIndex+count > array.Length)
throw new ArgumentException("Number of elements to copy is too large.");
for (int i=0; i<count; i++)
array.SetValue(getitemcopy(index+i), arrayIndex+i);
}
#if !SWIG_DOTNET_1
System.Collections.Generic.IEnumerator<double> System.Collections.Generic.IEnumerable<double>.GetEnumerator() {
return new DoubleVectorEnumerator(this);
}
#endif
System.Collections.IEnumerator System.Collections.IEnumerable.GetEnumerator() {
return new DoubleVectorEnumerator(this);
}
public DoubleVectorEnumerator GetEnumerator() {
return new DoubleVectorEnumerator(this);
}
// Type-safe enumerator
/// Note that the IEnumerator documentation requires an InvalidOperationException to be thrown
/// whenever the collection is modified. This has been done for changes in the size of the
/// collection but not when one of the elements of the collection is modified as it is a bit
/// tricky to detect unmanaged code that modifies the collection under our feet.
public sealed class DoubleVectorEnumerator : System.Collections.IEnumerator
#if !SWIG_DOTNET_1
, System.Collections.Generic.IEnumerator<double>
#endif
{
private DoubleVector collectionRef;
private int currentIndex;
private object currentObject;
private int currentSize;
public DoubleVectorEnumerator(DoubleVector collection) {
collectionRef = collection;
currentIndex = -1;
currentObject = null;
currentSize = collectionRef.Count;
}
// Type-safe iterator Current
public double Current {
get {
if (currentIndex == -1)
throw new InvalidOperationException("Enumeration not started.");
if (currentIndex > currentSize - 1)
throw new InvalidOperationException("Enumeration finished.");
if (currentObject == null)
throw new InvalidOperationException("Collection modified.");
return (double)currentObject;
}
}
// Type-unsafe IEnumerator.Current
object System.Collections.IEnumerator.Current {
get {
return Current;
}
}
public bool MoveNext() {
int size = collectionRef.Count;
bool moveOkay = (currentIndex+1 < size) && (size == currentSize);
if (moveOkay) {
currentIndex++;
currentObject = collectionRef[currentIndex];
} else {
currentObject = null;
}
return moveOkay;
}
public void Reset() {
currentIndex = -1;
currentObject = null;
if (collectionRef.Count != currentSize) {
throw new InvalidOperationException("Collection modified.");
}
}
#if !SWIG_DOTNET_1
public void Dispose() {
currentIndex = -1;
currentObject = null;
}
#endif
}
public void Clear() {
cAudioCSharpWrapperPINVOKE.DoubleVector_Clear(swigCPtr);
}
public void Add(double x) {
cAudioCSharpWrapperPINVOKE.DoubleVector_Add(swigCPtr, x);
}
private uint size() {
uint ret = cAudioCSharpWrapperPINVOKE.DoubleVector_size(swigCPtr);
return ret;
}
private uint capacity() {
uint ret = cAudioCSharpWrapperPINVOKE.DoubleVector_capacity(swigCPtr);
return ret;
}
private void reserve(uint n) {
cAudioCSharpWrapperPINVOKE.DoubleVector_reserve(swigCPtr, n);
}
public DoubleVector() : this(cAudioCSharpWrapperPINVOKE.new_DoubleVector__SWIG_0(), true) {
}
public DoubleVector(DoubleVector other) : this(cAudioCSharpWrapperPINVOKE.new_DoubleVector__SWIG_1(DoubleVector.getCPtr(other)), true) {
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public DoubleVector(int capacity) : this(cAudioCSharpWrapperPINVOKE.new_DoubleVector__SWIG_2(capacity), true) {
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
private double getitemcopy(int index) {
double ret = cAudioCSharpWrapperPINVOKE.DoubleVector_getitemcopy(swigCPtr, index);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
return ret;
}
private double getitem(int index) {
double ret = cAudioCSharpWrapperPINVOKE.DoubleVector_getitem(swigCPtr, index);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
return ret;
}
private void setitem(int index, double val) {
cAudioCSharpWrapperPINVOKE.DoubleVector_setitem(swigCPtr, index, val);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public void AddRange(DoubleVector values) {
cAudioCSharpWrapperPINVOKE.DoubleVector_AddRange(swigCPtr, DoubleVector.getCPtr(values));
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public DoubleVector GetRange(int index, int count) {
IntPtr cPtr = cAudioCSharpWrapperPINVOKE.DoubleVector_GetRange(swigCPtr, index, count);
DoubleVector ret = (cPtr == IntPtr.Zero) ? null : new DoubleVector(cPtr, true);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
return ret;
}
public void Insert(int index, double x) {
cAudioCSharpWrapperPINVOKE.DoubleVector_Insert(swigCPtr, index, x);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public void InsertRange(int index, DoubleVector values) {
cAudioCSharpWrapperPINVOKE.DoubleVector_InsertRange(swigCPtr, index, DoubleVector.getCPtr(values));
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public void RemoveAt(int index) {
cAudioCSharpWrapperPINVOKE.DoubleVector_RemoveAt(swigCPtr, index);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public void RemoveRange(int index, int count) {
cAudioCSharpWrapperPINVOKE.DoubleVector_RemoveRange(swigCPtr, index, count);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public static DoubleVector Repeat(double value, int count) {
IntPtr cPtr = cAudioCSharpWrapperPINVOKE.DoubleVector_Repeat(value, count);
DoubleVector ret = (cPtr == IntPtr.Zero) ? null : new DoubleVector(cPtr, true);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
return ret;
}
public void Reverse() {
cAudioCSharpWrapperPINVOKE.DoubleVector_Reverse__SWIG_0(swigCPtr);
}
public void Reverse(int index, int count) {
cAudioCSharpWrapperPINVOKE.DoubleVector_Reverse__SWIG_1(swigCPtr, index, count);
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public void SetRange(int index, DoubleVector values) {
cAudioCSharpWrapperPINVOKE.DoubleVector_SetRange(swigCPtr, index, DoubleVector.getCPtr(values));
if (cAudioCSharpWrapperPINVOKE.SWIGPendingException.Pending) throw cAudioCSharpWrapperPINVOKE.SWIGPendingException.Retrieve();
}
public bool Contains(double value) {
bool ret = cAudioCSharpWrapperPINVOKE.DoubleVector_Contains(swigCPtr, value);
return ret;
}
public int IndexOf(double value) {
int ret = cAudioCSharpWrapperPINVOKE.DoubleVector_IndexOf(swigCPtr, value);
return ret;
}
public int LastIndexOf(double value) {
int ret = cAudioCSharpWrapperPINVOKE.DoubleVector_LastIndexOf(swigCPtr, value);
return ret;
}
public bool Remove(double value) {
bool ret = cAudioCSharpWrapperPINVOKE.DoubleVector_Remove(swigCPtr, value);
return ret;
}
}
}
| LiXizhi/NPLRuntime | Client/trunk/cAudio_2.4.0/cAudioCSharp/DoubleVector.cs | C# | gpl-2.0 | 11,232 |
# Emb_Lin
for further information read "~/doc/bericht.rst" or "~/doc/bericht.html"
## BeagleBone Black
* Building your own costom Linux Distro using Yocto
* Working with GPIO
* building a Wifi accesspoint using hostapd, isc-dhcp-server and dnsmasq
## Geo-Module
A simple mobile hotspot for geo-caching or similar purposes. Using a BeagleBone Black to create a mobile Wifi accesspoint with independant Power Supply using a solar panel and a battery back-up.
| M-A-D/Emb_Lin | README.md | Markdown | gpl-2.0 | 471 |
<?php
/* @var $this LoginController */
/* @var $model Login */
$this->breadcrumbs=array(
'Logins'=>array('index'),
$model->Email=>array('view','id'=>$model->idLogin),
'Update',
);
$this->menu=array(
array('label'=>'Logins Home', 'url'=>array('index')),
array('label'=>'Create Logins', 'url'=>array('create')),
array('label'=>'View Login', 'url'=>array('view', 'id'=>$model->idLogin)),
array('label'=>'Manage Logins', 'url'=>array('admin')),
);
?>
<h1>Update Login: <?php echo $model->FirstName . ' ' . $model->LastName; ?></h1>
<?php echo $this->renderPartial('_form', array('model'=>$model)); ?> | pluginbot/server | PluginBotSrv/protected/views/login/update.php | PHP | gpl-2.0 | 607 |
/*
* Copyright (C) 2011 Hendrik Leppkes
* http://www.1f0.de
*
* This Program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This Program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to
* the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
* http://www.gnu.org/copyleft/gpl.html
*/
#pragma once
#include <Unknwn.h> // IUnknown and GUID Macros
// {3E114919-E6F7-41EA-91D6-929821EB0993}
DEFINE_GUID(IID_ILAVFSettings,
0x3e114919, 0xe6f7, 0x41ea, 0x91, 0xd6, 0x92, 0x98, 0x21, 0xeb, 0x9, 0x93);
[uuid("3E114919-E6F7-41EA-91D6-929821EB0993")]
interface ILAVFSettings : public IUnknown
{
// Switch to Runtime Config mode. This will reset all settings to default, and no changes to the settings will be saved
// You can use this to programmatically configure LAV Splitter without interfering with the users settings in the registry.
// Subsequent calls to this function will reset all settings back to defaults, even if the mode does not change.
//
// Note that calling this function during playback is not supported and may exhibit undocumented behaviour.
// For smooth operations, it must be called before LAV Splitter opens a file.
STDMETHOD(SetRuntimeConfig)(BOOL bRuntimeConfig) = 0;
// Retrieve the preferred languages as ISO 639-2 language codes, comma seperated
// If the result is NULL, no language has been set
// Memory for the string will be allocated, and has to be free'ed by the caller with CoTaskMemFree
STDMETHOD(GetPreferredLanguages)(WCHAR **ppLanguages) = 0;
// Set the preferred languages as ISO 639-2 language codes, comma seperated
// To reset to no preferred language, pass NULL or the empty string
STDMETHOD(SetPreferredLanguages)(WCHAR *pLanguages) = 0;
// Retrieve the preferred subtitle languages as ISO 639-2 language codes, comma seperated
// If the result is NULL, no language has been set
// If no subtitle language is set, the main language preference is used.
// Memory for the string will be allocated, and has to be free'ed by the caller with CoTaskMemFree
STDMETHOD(GetPreferredSubtitleLanguages)(WCHAR **ppLanguages) = 0;
// Set the preferred subtitle languages as ISO 639-2 language codes, comma seperated
// To reset to no preferred language, pass NULL or the empty string
// If no subtitle language is set, the main language preference is used.
STDMETHOD(SetPreferredSubtitleLanguages)(WCHAR *pLanguages) = 0;
// Get the current subtitle mode
// 0 = No Subs; 1 = Forced Subs; 2 = All subs
STDMETHOD_(DWORD,GetSubtitleMode)() = 0;
// Set the current subtitle mode
// 0 = No Subs; 1 = Forced Subs; 2 = All subs
STDMETHOD(SetSubtitleMode)(DWORD dwMode) = 0;
// Get the subtitle matching language flag
// TRUE = Only subtitles with a language in the preferred list will be used; FALSE = All subtitles will be used
STDMETHOD_(BOOL,GetSubtitleMatchingLanguage)() = 0;
// Set the subtitle matching language flag
// TRUE = Only subtitles with a language in the preferred list will be used; FALSE = All subtitles will be used
STDMETHOD(SetSubtitleMatchingLanguage)(BOOL dwMode) = 0;
// Control wether a special "Forced Subtitles" stream will be created for PGS subs
STDMETHOD_(BOOL,GetPGSForcedStream)() = 0;
// Control wether a special "Forced Subtitles" stream will be created for PGS subs
STDMETHOD(SetPGSForcedStream)(BOOL bFlag) = 0;
// Get the PGS forced subs config
// TRUE = only forced PGS frames will be shown, FALSE = all frames will be shown
STDMETHOD_(BOOL,GetPGSOnlyForced)() = 0;
// Set the PGS forced subs config
// TRUE = only forced PGS frames will be shown, FALSE = all frames will be shown
STDMETHOD(SetPGSOnlyForced)(BOOL bForced) = 0;
// Get the VC-1 Timestamp Processing mode
// 0 - No Timestamp Correction, 1 - Always Timestamp Correction, 2 - Auto (Correction for Decoders that need it)
STDMETHOD_(int,GetVC1TimestampMode)() = 0;
// Set the VC-1 Timestamp Processing mode
// 0 - No Timestamp Correction, 1 - Always Timestamp Correction, 2 - Auto (Correction for Decoders that need it)
STDMETHOD(SetVC1TimestampMode)(int iMode) = 0;
// Set whether substreams (AC3 in TrueHD, for example) should be shown as a seperate stream
STDMETHOD(SetSubstreamsEnabled)(BOOL bSubStreams) = 0;
// Check whether substreams (AC3 in TrueHD, for example) should be shown as a seperate stream
STDMETHOD_(BOOL,GetSubstreamsEnabled)() = 0;
// Set if the ffmpeg parsers should be used for video streams
STDMETHOD(SetVideoParsingEnabled)(BOOL bEnabled) = 0;
// Query if the ffmpeg parsers are being used for video streams
STDMETHOD_(BOOL,GetVideoParsingEnabled)() = 0;
// Set if LAV Splitter should try to fix broken HD-PVR streams
STDMETHOD(SetFixBrokenHDPVR)(BOOL bEnabled) = 0;
// Query if LAV Splitter should try to fix broken HD-PVR streams
STDMETHOD_(BOOL,GetFixBrokenHDPVR)() = 0;
// Control wether the givne format is enabled
STDMETHOD_(HRESULT,SetFormatEnabled)(const char *strFormat, BOOL bEnabled) = 0;
// Check if the given format is enabled
STDMETHOD_(BOOL,IsFormatEnabled)(const char *strFormat) = 0;
// Set if LAV Splitter should always completely remove the filter connected to its Audio Pin when the audio stream is changed
STDMETHOD(SetStreamSwitchRemoveAudio)(BOOL bEnabled) = 0;
// Query if LAV Splitter should always completely remove the filter connected to its Audio Pin when the audio stream is changed
STDMETHOD_(BOOL,GetStreamSwitchRemoveAudio)() = 0;
};
| pchapmanvc/LAVFSplitter | developer_info/LAVSplitterSettings.h | C | gpl-2.0 | 6,039 |
<?php
class NextendCss {
var $_css;
var $_cssFiles;
var $_cssFilesGroup;
var $_cacheenabled;
var $_cache;
var $_cacheGroup;
var $_lesscache;
var $_lesscacheGroup;
var $_echo;
function NextendCss() {
$this->_css = '';
$this->_cssFiles = array();
$this->_cssFilesGroup = array();
$this->_cacheGroup = array();
$this->_cacheenabled = 1;
$this->_lesscache = false;
$this->_lesscacheGroup = array();
$this->_echo = false;
if ($this->_cacheenabled) {
nextendimport('nextend.cache.css');
$this->_cache = new NextendCacheCss();
}
}
static function getInstance() {
static $instance;
if (!is_object($instance)) {
if (nextendIsJoomla()) {
nextendimport('nextend.css.joomla');
$instance = new NextendCssJoomla();
} elseif (nextendIsWordPress()) {
nextendimport('nextend.css.wordpress');
$instance = new NextendCssWordPress();
} elseif (nextendIsMagento()) {
nextendimport('nextend.css.magento');
$instance = new NextendCssMagento();
}
}
return $instance;
}
function createGroup($name) {
$this->_cssFilesGroup[$name] = array();
if ($this->_cacheenabled) {
nextendimport('nextend.cache.css');
$this->_cacheGroup[$name] = new NextendCacheCss();
}
}
function enableLess($group = null) {
nextendimport('nextend.cache.less');
if (!$group)
$this->_lesscache = new NextendCacheLess();
else
$this->_lesscacheGroup[$group] = new NextendCacheLess();
}
function addLessImportDir($dir, $group = null) {
if (!$group)
$this->_lesscache->_less->addImportDir($dir);
else
$this->_lesscacheGroup[$group]->_less->addImportDir($dir);
}
function addCss($css) {
$this->_css .= $css . PHP_EOL;
}
function addCssFile($file, $group = null, $first = false) {
if (is_string($file)) {
if (!$group){
if($first){
$this->_cssFiles = array($file => $file) + $this->_cssFiles;
}else{
$this->_cssFiles[$file] = $file;
}
}else{
if($first){
$this->_cssFilesGroup[$group] = array($file => $file) + $this->_cssFilesGroup[$group];
}else{
$this->_cssFilesGroup[$group][$file] = $file;
}
}
} else if (is_array($file)) {
if (!$group){
$this->_cssFiles[$file[0]] = $file;
}else{
$this->_cssFilesGroup[$group][$file[0]] = $file;
}
}
}
function addCssLibraryFile($file, $group = null) {
$this->addCssFile(NEXTENDLIBRARYASSETS . 'css' . DIRECTORY_SEPARATOR . $file, $group);
}
function generateCSS($group = null) {
if (!$group && class_exists('NextendFontsGoogle')) {
$fonts = NextendFontsGoogle::getInstance();
$fonts->generateFonts();
}
$cssfiles = !$group ? $this->_cssFiles : $this->_cssFilesGroup[$group];
$cache = !$group ? $this->_cache : $this->_cacheGroup[$group];
$lesscache = !$group ? $this->_lesscache : $this->_lesscacheGroup[$group];
if (count($cssfiles)) {
foreach ($cssfiles AS $file) {
if (is_array($file)) { // LESS
$lesscache->addContext($file[1], $file[2]);
} else if (substr($file, 0, 4) == 'http') {
$this->serveCSSFile($file);
} else {
if ($this->_cacheenabled) {
$cache->addFile($file);
} else {
$url = NextendFilesystem::pathToAbsoluteURL($file);
$this->serveCSSFile($url);
}
}
}
}
$filename = null;
if ($this->_cacheenabled) {
if ($lesscache) {
$cache->addFile(NextendFilesystem::absoluteURLToPath($lesscache->getCache()));
}
$this->serveCSSFile($filename = $cache->getCache());
} else {
if ($lesscache) {
$this->serveCSSFile($filename = $lesscache->getCache());
}
}
$this->serveCSS(true, $group);
return $filename;
}
/*
* Abstract, must redeclare
* This one only for testing purpose!
*/
function serveCSS($clear = true, $group = null) {
if (!$group && $this->_css != '') {
echo "<style type='text/css'>";
echo $this->_css;
echo "</style>";
if ($clear) $this->_css = '';
}
}
/*
* Abstract, must redeclare
* This one only for testing purpose!
*/
function serveCSSFile($url) {
echo '<link rel="stylesheet" href="' . $url . '" type="text/css" />';
}
function generateAjaxCSS($loadedCSS) {
$css = '';
if (count($this->_cssFiles)) {
foreach ($this->_cssFiles AS $file) {
if (!in_array($file, $loadedCSS)) {
$css .= preg_replace('#url\([\'"]([^"\'\)]+)[\'"]\)#', 'url(' . NextendFilesystem::pathToAbsoluteURL(dirname($file)) . '/$1)', NextendFilesystem::readFile($file));
}
}
}
$css .= $this->_css;
return $css;
}
function generateArrayCSS() {
$css = array();
$css = array_merge($css, $this->_cssFiles);
return $css;
}
}
| WietzeDuursma/slapen | wp-content/plugins/smart-slider-2/nextend/css/css.php | PHP | gpl-2.0 | 5,816 |
/**********************************************************************
Freeciv - Copyright (C) 1996 - A Kjeldberg, L Gregersen, P Unold
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
***********************************************************************/
/**********************************************************************
Functions for handling the tilespec files which describe
the files and contents of tilesets.
original author: David Pfitzner <dwp@mso.anu.edu.au>
***********************************************************************/
#ifdef HAVE_CONFIG_H
# include "../config.hh"
#endif
#include <assert.h>
#include <stdio.h>
#include <stdlib.h> /* exit */
#include <string.h>
#include "astring.hh"
#include "capability.hh"
#include "wc_intl.hh"
#include "game.hh" /* for fill_xxx */
#include "government.hh"
#include "hash.hh"
#include "log.hh"
#include "map.hh"
#include "mem.hh"
#include "nation.hh"
#include "player.hh"
#include "registry.hh"
#include "shared.hh"
#include "support.hh"
#include "unit.hh"
#include "include/dialogs_g.hh"
#include "include/graphics_g.hh"
#include "include/mapview_g.hh" /* for update_map_canvas_visible */
#include "include/plrdlg_g.hh"
#include "civclient.hh" /* for get_client_state() */
#include "climap.hh" /* for tile_get_known() */
#include "clinet.hh"
#include "control.hh" /* for fill_xxx */
#include "multiselect.hh"
#include "options.hh" /* for fill_xxx */
#include "tilespec.hh"
#define TILESPEC_SUFFIX ".tilespec"
char *main_intro_filename;
char *minimap_intro_filename;
struct named_sprites sprites;
/* Stores the currently loaded tileset. This differs from the value in
* options.h since that variable is changed by the GUI code. */
static char current_tileset[512];
static const int DIR4_TO_DIR8[4] =
{ DIR8_NORTH, DIR8_SOUTH, DIR8_EAST, DIR8_WEST };
int NORMAL_TILE_WIDTH;
int NORMAL_TILE_HEIGHT;
int UNIT_TILE_WIDTH;
int UNIT_TILE_HEIGHT;
unsigned int SMALL_TILE_WIDTH;
int SMALL_TILE_HEIGHT;
int OVERVIEW_TILE_SIZE = 2;
bool is_isometric;
int hex_width, hex_height;
char *city_names_font;
char *city_productions_font_name;
int num_tiles_explode_unit=0;
static int roadstyle;
int fogstyle;
static int flag_offset_x, flag_offset_y;
#define NUM_CORNER_DIRS 4
#define TILES_PER_CORNER 4
static int num_valid_tileset_dirs;
static int num_cardinal_tileset_dirs;
static int num_index_valid;
static int num_index_cardinal;
static enum direction8 valid_tileset_dirs[8];
static enum direction8 cardinal_tileset_dirs[8];
static struct layers_s {
enum match_style match_style;
int count;
char **match_types;
} layers[MAX_NUM_LAYERS];
/* Darkness style. Don't reorder this enum since tilesets depend on it. */
static enum darkness_style_e {
/* No darkness sprites are drawn. */
DARKNESS_NONE = 0,
/* 1 sprite that is split into 4 parts and treated as a darkness4. Only
* works in iso-view. */
DARKNESS_ISORECT = 1,
/* 4 sprites, one per direction. More than one sprite per tile may be
* drawn. */
DARKNESS_CARD_SINGLE = 2,
/* 15=2^4-1 sprites. A single sprite is drawn, chosen based on whether
* there's darkness in _each_ of the cardinal directions. */
DARKNESS_CARD_FULL = 3
} darkness_style;
struct specfile;
#define SPECLIST_TAG specfile
#define SPECLIST_TYPE struct specfile
#include "speclist.hh"
#define specfile_list_iterate(list, pitem) \
TYPED_LIST_ITERATE(struct specfile, list, pitem)
#define specfile_list_iterate_end LIST_ITERATE_END
struct small_sprite;
#define SPECLIST_TAG small_sprite
#define SPECLIST_TYPE struct small_sprite
#include "speclist.hh"
#define small_sprite_list_iterate(list, pitem) \
TYPED_LIST_ITERATE(struct small_sprite, list, pitem)
#define small_sprite_list_iterate_end LIST_ITERATE_END
static struct specfile_list *specfiles;
static struct small_sprite_list *small_sprites;
struct specfile {
struct Sprite *big_sprite;
char *file_name;
};
/*
* Information about an individual sprite. All fields except 'sprite' are
* filled at the time of the scan of the specfile. 'Sprite' is
* set/cleared on demand in load_sprite/unload_sprite.
*/
struct small_sprite {
int ref_count;
/* The sprite is in this file. */
char *file;
/* Or, the sprite is in this file at the location. */
struct specfile *specfile_;
int x, y, width, height;
struct Sprite *sprite;
};
/*
* This hash table maps tilespec tags to struct small_sprites.
*/
static struct hash_table *sprite_hash = NULL;
/* This hash table maps terrain graphic strings to drawing data. */
static struct hash_table *terrain_hash;
#define TILESPEC_CAPSTR "+tilespec3 duplicates_ok"
/*
* Tilespec capabilities acceptable to this program:
*
* +tilespec3 - basic format; required
*
* duplicates_ok - we can handle existence of duplicate tags
* (lattermost tag which appears is used; tilesets which
* have duplicates should specify "+duplicates_ok")
*/
#define SPEC_CAPSTR "+spec3"
/*
* Individual spec file capabilities acceptable to this program:
* +spec3 - basic format, required
*/
/*
If focus_unit_hidden is true, then no units at
the location of the foc unit are ever drawn.
*/
static bool focus_unit_hidden = FALSE;
/* The vector of overview color for every player. */
static enum color_std player_color[MAX_NUM_PLAYERS + MAX_NUM_BARBARIANS];
/* The maximal number of usable colors */
#define PLAYER_COLORS_NUM (COLOR_STD_RACE13 - COLOR_STD_RACE0 + 1)
static struct Sprite* lookup_sprite_tag_alt(const char *tag, const char *alt,
bool required, const char *what,
const char *name);
/**************************************************************************
Return the tileset name of the direction. This is similar to
dir_get_name but you shouldn't change this or all tilesets will break.
**************************************************************************/
static const char *get_name_tileset_direction8(enum direction8 dir)
{
switch (dir) {
case DIR8_NORTH:
return "n";
case DIR8_NORTHEAST:
return "ne";
case DIR8_EAST:
return "e";
case DIR8_SOUTHEAST:
return "se";
case DIR8_SOUTH:
return "s";
case DIR8_SOUTHWEST:
return "sw";
case DIR8_WEST:
return "w";
case DIR8_NORTHWEST:
return "nw";
}
assert(0);
return "";
}
/****************************************************************************
Return TRUE iff the dir is valid in this tileset.
****************************************************************************/
static bool is_valid_tileset_dir(enum direction8 dir)
{
if (hex_width > 0) {
return dir != DIR8_NORTHEAST && dir != DIR8_SOUTHWEST;
} else if (hex_height > 0) {
return dir != DIR8_NORTHWEST && dir != DIR8_SOUTHEAST;
} else {
return TRUE;
}
}
/****************************************************************************
Return TRUE iff the dir is cardinal in this tileset.
"Cardinal", in this sense, means that a tile will share a border with
another tile in the direction rather than sharing just a single vertex.
****************************************************************************/
static bool is_cardinal_tileset_dir(enum direction8 dir)
{
if (hex_width > 0 || hex_height > 0) {
return is_valid_tileset_dir(dir);
} else {
return (dir == DIR8_NORTH || dir == DIR8_EAST
|| dir == DIR8_SOUTH || dir == DIR8_WEST);
}
}
/**********************************************************************
Returns a static list of tilesets available on the system by
searching all data directories for files matching TILESPEC_SUFFIX.
The list is NULL-terminated.
***********************************************************************/
const char **get_tileset_list(void)
{
static const char **tileset_list = NULL;
if (!tileset_list) {
/* Note: this means you must restart the client after installing a new
tileset. */
tileset_list = datafilelist(TILESPEC_SUFFIX);
}
return tileset_list;
}
/**********************************************************************
warciv does not want isotrident for default
***********************************************************************/
const char *get_default_tilespec_name(void)
{
return "trident-26";
#if 0
if (isometric_view_supported()) {
return "isotrident"; /* Do not i18n! --dwp */
} else {
return "trident"; /* Do not i18n! --dwp */
}
#endif
}
/**********************************************************************
Gets full filename for tilespec file, based on input name.
Returned data is allocated, and freed by user as required.
Input name may be null, in which case uses default.
Falls back to default if can't find specified name;
dies if can't find default.
***********************************************************************/
static char *tilespec_fullname(const char *tileset_name)
{
const char *tileset_default = get_default_tilespec_name();
char *fname;
char *dname;
if (!tileset_name || tileset_name[0] == '\0') {
tileset_name = tileset_default;
}
/* Hack: this is the name of the tileset we're about to load. We copy
* it here, since this is the only place where we know it. Note this
* also means if you do "civ -t foo" this will change your *default*
* tileset to 'foo'. */
if (default_tileset_name != tileset_name) {
sz_strlcpy(default_tileset_name, tileset_name);
}
fname = (char*)wc_malloc(strlen(tileset_name) + strlen(TILESPEC_SUFFIX) + 1);
sprintf(fname, "%s%s", tileset_name, TILESPEC_SUFFIX);
dname = datafilename(fname);
free(fname);
if (dname) {
return mystrdup(dname);
}
if (strcmp(tileset_name, tileset_default) == 0) {
freelog(LOG_FATAL, _("No usable default tileset found, aborting!"));
exit(EXIT_FAILURE);
}
freelog(LOG_ERROR, _("Trying \"%s\" tileset."), tileset_default);
return tilespec_fullname(tileset_default);
}
/**********************************************************************
Checks options in filename match what we require and support.
Die if not.
'which' should be "tilespec" or "spec".
***********************************************************************/
static bool check_tilespec_capabilities(struct section_file *file,
const char *which,
const char *us_capstr,
const char *filename)
{
char *file_capstr = secfile_lookup_str(file, "%s.options", which);
if (!has_capabilities(us_capstr, file_capstr)) {
freelog(LOG_ERROR, _("%s file appears incompatible:\n"
"file: \"%s\"\n"
"file options: %s\n"
"supported options: %s"),
which, filename, file_capstr, us_capstr);
return FALSE;
}
if (!has_capabilities(file_capstr, us_capstr)) {
freelog(LOG_ERROR, _("%s file claims required option(s)"
" which we don't support:\n"
"file: \"%s\"\n"
"file options: %s\n"
"supported options: %s"),
which, filename, file_capstr, us_capstr);
return FALSE;
}
return TRUE;
}
/**********************************************************************
Frees the tilespec toplevel data, in preparation for re-reading it.
See tilespec_read_toplevel().
***********************************************************************/
static void tilespec_free_toplevel(void)
{
if (city_names_font) {
free(city_names_font);
city_names_font = NULL;
}
if (city_productions_font_name) {
free(city_productions_font_name);
city_productions_font_name = NULL;
}
if (main_intro_filename) {
free(main_intro_filename);
main_intro_filename = NULL;
}
if (minimap_intro_filename) {
free(minimap_intro_filename);
minimap_intro_filename = NULL;
}
while (hash_num_entries(terrain_hash) > 0) {
const struct terrain_drawing_data *draw;
draw = static_cast<const terrain_drawing_data*>(hash_value_by_number(terrain_hash, 0));
hash_delete_entry(terrain_hash, draw->name);
free(draw->name);
if (draw->mine_tag) {
free(draw->mine_tag);
}
free((void *) draw);
}
hash_free(terrain_hash);
terrain_hash = NULL; /* Helpful for sanity. */
}
/**********************************************************************
Read a new tilespec in from scratch.
Unlike the initial reading code, which reads pieces one at a time,
this gets rid of the old data and reads in the new all at once. If the
new tileset fails to load the old tileset may be reloaded; otherwise the
client will exit.
It will also call the necessary functions to redraw the graphics.
***********************************************************************/
void tilespec_reread(const char *tileset_name)
{
int id;
tile_t *center_tile;
enum client_states state = get_client_state();
freelog(LOG_NORMAL, "Loading tileset %s.", tileset_name);
/* Step 0: Record old data.
*
* We record the current mapcanvas center, etc.
*/
center_tile = get_center_tile_mapcanvas();
/* Step 1: Cleanup.
*
* We free all old data in preparation for re-reading it.
*/
tilespec_free_tiles();
tilespec_free_city_tiles(game.ruleset_control.style_count);
tilespec_free_toplevel();
/* Step 2: Read.
*
* We read in the new tileset. This should be pretty straightforward.
*/
if (!tilespec_read_toplevel(tileset_name)) {
if (!tilespec_read_toplevel(current_tileset)) {
die("Failed to re-read the currently loaded tileset.");
}
}
tilespec_load_tiles();
/* Step 3: Setup
*
* This is a seriously sticky problem. On startup, we build a hash
* from all the sprite data. Then, when we connect to a server, the
* server sends us ruleset data a piece at a time and we use this data
* to assemble the sprite structures. But if we change while connected
* we have to reassemble all of these. This should just involve
* calling tilespec_setup_*** on everything. But how do we tell what
* "everything" is?
*
* The below code just does things straightforwardly, by setting up
* each possible sprite again. Hopefully it catches everything, and
* doesn't mess up too badly if we change tilesets while not connected
* to a server.
*/
if (state < CLIENT_SELECT_RACE_STATE) {
/* The ruleset data is not sent until this point. */
return;
}
for (id = OLD_TERRAIN_FIRST; id < OLD_TERRAIN_COUNT; id++) {
tilespec_setup_tile_type(id);
}
unit_type_iterate(id) {
tilespec_setup_unit_type(id);
} unit_type_iterate_end;
government_iterate(gov) {
tilespec_setup_government(gov->index);
} government_iterate_end;
for (id = 0; id < game.ruleset_control.nation_count; id++) {
tilespec_setup_nation_flag(id);
}
impr_type_iterate(imp_id) {
tilespec_setup_impr_type(imp_id);
} impr_type_iterate_end;
tech_type_iterate(tech_id) {
if (tech_id != A_NONE && tech_exists(tech_id)) {
tilespec_setup_tech_type(tech_id);
}
} tech_type_iterate_end;
tilespec_setup_specialist_types();
/* tilespec_load_tiles reverts the city tile pointers to 0. This
is a workaround. */
tilespec_alloc_city_tiles(game.ruleset_control.style_count);
for (id = 0; id < game.ruleset_control.style_count; id++) {
tilespec_setup_city_tiles(id);
}
/* Step 4: Draw.
*
* Do any necessary redraws.
*/
if (state < CLIENT_GAME_RUNNING_STATE) {
/* Unless the client state is playing a game or in gameover,
we don't want/need to redraw. */
return;
}
popdown_all_game_dialogs_except_option_dialog();
generate_citydlg_dimensions();
tileset_changed();
can_slide = FALSE;
center_tile_mapcanvas(center_tile);
/* update_map_cavnas_visible forces a full redraw. Otherwise with fast
* drawing we might not get one. Of course this is slower. */
update_map_canvas_visible(MAP_UPDATE_NORMAL);
can_slide = TRUE;
}
/**************************************************************************
This is merely a wrapper for tilespec_reread (above) for use in
options.c and the client local options dialog.
**************************************************************************/
void tilespec_reread_callback(struct client_option *option)
{
assert(option->type == CLIENT_OPTION_TYPE_STRING
&& *option->u.string.pvalue != '\0');
tilespec_reread(option->u.string.pvalue);
}
/**************************************************************************
Loads the given graphics file (found in the data path) into a newly
allocated sprite.
**************************************************************************/
static struct Sprite *load_gfx_file(const char *gfx_filename)
{
const char **gfx_fileexts = gfx_fileextensions();
const char *gfx_fileext;
struct Sprite *s;
/* Try out all supported file extensions to find one that works. */
while ((gfx_fileext = *gfx_fileexts++)) {
char *real_full_name;
char full_name[strlen(gfx_filename) + strlen(gfx_fileext) + 2];
sprintf(full_name, "%s.%s", gfx_filename, gfx_fileext);
real_full_name = datafilename(full_name);
if ( real_full_name ) {
freelog(LOG_DEBUG, "trying to load gfx file %s", real_full_name);
s = load_gfxfile(real_full_name);
if (s) {
return s;
}
}
}
freelog(LOG_VERBOSE, "Could not load gfx file %s.", gfx_filename);
return NULL;
}
/**************************************************************************
Ensure that the big sprite of the given spec file is loaded.
**************************************************************************/
static void ensure_big_sprite(struct specfile *specf)
{
struct section_file the_file;
struct section_file *file = &the_file;
const char *gfx_filename;
if (specf->big_sprite) {
/* Looks like it's already loaded. */
return;
}
/* Otherwise load it. The big sprite will sometimes be freed and will have
* to be reloaded, but most of the time it's just loaded once, the small
* sprites are extracted, and then it's freed. */
if (!section_file_load(file, specf->file_name)) {
freelog(LOG_FATAL, _("Could not open \"%s\"."), specf->file_name);
exit(EXIT_FAILURE);
}
if (!check_tilespec_capabilities(file, "spec",
SPEC_CAPSTR, specf->file_name)) {
exit(EXIT_FAILURE);
}
gfx_filename = secfile_lookup_str(file, "file.gfx");
specf->big_sprite = load_gfx_file(gfx_filename);
if (!specf->big_sprite) {
freelog(LOG_FATAL, _("Couldn't load gfx file for the spec file %s"),
specf->file_name);
exit(EXIT_FAILURE);
}
section_file_free(file);
}
/**************************************************************************
Scan all sprites declared in the given specfile. This means that the
positions of the sprites in the big_sprite are saved in the
small_sprite structs.
**************************************************************************/
static void scan_specfile(struct specfile *specf, bool duplicates_ok)
{
struct section_file the_file, *file = &the_file;
char **gridnames;
int num_grids, i;
if (!section_file_load(file, specf->file_name)) {
freelog(LOG_FATAL, _("Could not open \"%s\"."), specf->file_name);
exit(EXIT_FAILURE);
}
if (!check_tilespec_capabilities(file, "spec",
SPEC_CAPSTR, specf->file_name)) {
exit(EXIT_FAILURE);
}
/* currently unused */
(void) section_file_lookup(file, "info.artists");
gridnames = secfile_get_secnames_prefix(file, "grid_", &num_grids);
for (i = 0; i < num_grids; i++) {
int j, k;
int x_top_left, y_top_left, dx, dy;
int pixel_border;
pixel_border =
secfile_lookup_int_default(file, -1, "%s.pixel_border", gridnames[i]);
if (pixel_border < 0) {
/* is_pixel_border is used in old tilesets. */
pixel_border =
(secfile_lookup_bool_default(file, FALSE,
"%s.is_pixel_border", gridnames[i])
? 1 : 0);
}
x_top_left = secfile_lookup_int(file, "%s.x_top_left", gridnames[i]);
y_top_left = secfile_lookup_int(file, "%s.y_top_left", gridnames[i]);
dx = secfile_lookup_int(file, "%s.dx", gridnames[i]);
dy = secfile_lookup_int(file, "%s.dy", gridnames[i]);
j = -1;
while (section_file_lookup(file, "%s.tiles%d.tag", gridnames[i], ++j)) {
struct small_sprite *ss = (small_sprite *)wc_malloc(sizeof(*ss));
int row, column;
int x1, y1;
char **tags;
int num_tags;
row = secfile_lookup_int(file, "%s.tiles%d.row", gridnames[i], j);
column = secfile_lookup_int(file, "%s.tiles%d.column", gridnames[i], j);
tags = secfile_lookup_str_vec(file, &num_tags, "%s.tiles%d.tag",
gridnames[i], j);
/* there must be at least 1 because of the while(): */
assert(num_tags > 0);
x1 = x_top_left + (dx + pixel_border) * column;
y1 = y_top_left + (dy + pixel_border) * row;
ss->ref_count = 0;
ss->file = NULL;
ss->x = x1;
ss->y = y1;
ss->width = dx;
ss->height = dy;
ss->specfile_ = specf;
ss->sprite = NULL;
small_sprite_list_prepend(small_sprites, ss);
if (!duplicates_ok) {
for (k = 0; k < num_tags; k++) {
if (!hash_insert(sprite_hash, mystrdup(tags[k]), ss)) {
freelog(LOG_ERROR, "warning: already have a sprite for %s", tags[k]);
}
}
} else {
for (k = 0; k < num_tags; k++) {
(void) hash_replace(sprite_hash, mystrdup(tags[k]), ss);
}
}
free(tags);
tags = NULL;
}
}
free(gridnames);
gridnames = NULL;
/* Load "extra" sprites. Each sprite is one file. */
i = -1;
while (secfile_lookup_str_default(file, NULL, "extra.sprites%d.tag", ++i)) {
struct small_sprite *ss = static_cast<small_sprite *>(wc_malloc(sizeof(*ss)));
char **tags;
char *filename;
int num_tags, k;
tags
= secfile_lookup_str_vec(file, &num_tags, "extra.sprites%d.tag", i);
filename = secfile_lookup_str(file, "extra.sprites%d.file", i);
ss->ref_count = 0;
ss->file = mystrdup(filename);
ss->specfile_ = NULL;
ss->sprite = NULL;
small_sprite_list_prepend(small_sprites, ss);
if (!duplicates_ok) {
for (k = 0; k < num_tags; k++) {
if (!hash_insert(sprite_hash, mystrdup(tags[k]), ss)) {
freelog(LOG_ERROR, "warning: already have a sprite for %s", tags[k]);
}
}
} else {
for (k = 0; k < num_tags; k++) {
(void) hash_replace(sprite_hash, mystrdup(tags[k]), ss);
}
}
free(tags);
}
section_file_check_unused(file, specf->file_name);
section_file_free(file);
}
/**********************************************************************
Returns the correct name of the gfx file (with path and extension)
Must be free'd when no longer used
***********************************************************************/
static char *tilespec_gfx_filename(const char *gfx_filename)
{
const char *gfx_current_fileext;
const char **gfx_fileexts = gfx_fileextensions();
while((gfx_current_fileext = *gfx_fileexts++))
{
char *full_name = static_cast<char*>(
wc_malloc(strlen(gfx_filename) + strlen(gfx_current_fileext) + 2));
char *real_full_name;
sprintf(full_name,"%s.%s",gfx_filename,gfx_current_fileext);
real_full_name = datafilename(full_name);
free(full_name);
if (real_full_name) {
return mystrdup(real_full_name);
}
}
freelog(LOG_FATAL, _("Couldn't find a supported gfx file extension for %s"),
gfx_filename);
exit(EXIT_FAILURE);
return NULL;
}
/**********************************************************************
Finds and reads the toplevel tilespec file based on given name.
Sets global variables, including tile sizes and full names for
intro files.
***********************************************************************/
bool tilespec_read_toplevel(const char *tileset_name)
{
struct section_file the_file;
struct section_file *file = &the_file;
char *fname;
char *c;
int i;
int num_spec_files, num_terrains, hex_side;
char **spec_filenames, **terrains;
char *file_capstr;
bool duplicates_ok, is_hex;
enum direction8 dir;
fname = tilespec_fullname(tileset_name);
freelog(LOG_VERBOSE, "tilespec file is %s", fname);
if (!section_file_load(file, fname)) {
free(fname);
freelog(LOG_ERROR, _("Could not open \"%s\"."), fname);
return FALSE;
}
if (!check_tilespec_capabilities(file, "tilespec",
TILESPEC_CAPSTR, fname)) {
section_file_free(file);
free(fname);
return FALSE;
}
file_capstr = secfile_lookup_str(file, "%s.options", "tilespec");
duplicates_ok = has_capabilities("+duplicates_ok", file_capstr);
(void) section_file_lookup(file, "tilespec.name"); /* currently unused */
(void) section_file_lookup(file, "tilespec.flags_directory");
is_isometric = secfile_lookup_bool_default(file, FALSE, "tilespec.is_isometric");
/* Read hex-tileset information. */
is_hex = secfile_lookup_bool_default(file, FALSE, "tilespec.is_hex");
hex_side = secfile_lookup_int_default(file, 0, "tilespec.hex_side");
hex_width = hex_height = 0;
if (is_hex) {
if (is_isometric) {
hex_height = hex_side;
} else {
hex_width = hex_side;
}
is_isometric = TRUE; /* Hex tilesets are drawn the same as isometric. */
}
if (is_isometric && !isometric_view_supported()) {
freelog(LOG_ERROR, _("Client does not support isometric tilesets."
" Using default tileset instead."));
assert(tileset_name != NULL);
section_file_free(file);
free(fname);
return tilespec_read_toplevel(NULL);
}
if (!is_isometric && !overhead_view_supported()) {
freelog(LOG_ERROR, _("Client does not support overhead view tilesets."
" Using default tileset instead."));
assert(tileset_name != NULL);
section_file_free(file);
free(fname);
return tilespec_read_toplevel(NULL);
}
/* Create arrays of valid and cardinal tileset dirs. These depend
* entirely on the tileset, not the topology. They are also in clockwise
* rotational ordering. */
num_valid_tileset_dirs = num_cardinal_tileset_dirs = 0;
dir = DIR8_NORTH;
do {
if (is_valid_tileset_dir(dir)) {
valid_tileset_dirs[num_valid_tileset_dirs] = dir;
num_valid_tileset_dirs++;
}
if (is_cardinal_tileset_dir(dir)) {
cardinal_tileset_dirs[num_cardinal_tileset_dirs] = dir;
num_cardinal_tileset_dirs++;
}
dir = dir_cw(dir);
} while (dir != DIR8_NORTH);
assert(num_valid_tileset_dirs % 2 == 0); /* Assumed elsewhere. */
num_index_valid = 1 << num_valid_tileset_dirs;
num_index_cardinal = 1 << num_cardinal_tileset_dirs;
NORMAL_TILE_WIDTH = secfile_lookup_int(file, "tilespec.normal_tile_width");
NORMAL_TILE_HEIGHT = secfile_lookup_int(file, "tilespec.normal_tile_height");
if (is_isometric) {
UNIT_TILE_WIDTH = NORMAL_TILE_WIDTH;
UNIT_TILE_HEIGHT = 3 * NORMAL_TILE_HEIGHT/2;
} else {
UNIT_TILE_WIDTH = NORMAL_TILE_WIDTH;
UNIT_TILE_HEIGHT = NORMAL_TILE_HEIGHT;
}
SMALL_TILE_WIDTH = secfile_lookup_int(file, "tilespec.small_tile_width");
SMALL_TILE_HEIGHT = secfile_lookup_int(file, "tilespec.small_tile_height");
freelog(LOG_VERBOSE, "tile sizes %dx%d, %d%d unit, %d%d small",
NORMAL_TILE_WIDTH, NORMAL_TILE_HEIGHT,
UNIT_TILE_WIDTH, UNIT_TILE_HEIGHT,
SMALL_TILE_WIDTH, SMALL_TILE_HEIGHT);
roadstyle = secfile_lookup_int_default(file, is_isometric ? 0 : 1,
"tilespec.roadstyle");
fogstyle = secfile_lookup_int_default(file, 0,
"tilespec.fogstyle");
darkness_style = (darkness_style_e)secfile_lookup_int(file, "tilespec.darkness_style");
if (darkness_style < DARKNESS_NONE
|| darkness_style > DARKNESS_CARD_FULL
|| (darkness_style == DARKNESS_ISORECT
&& (!is_isometric || hex_width > 0 || hex_height > 0))) {
freelog(LOG_FATAL, _("Invalid darkness style set in tileset."));
exit(EXIT_FAILURE);
}
flag_offset_x = secfile_lookup_int_default(file, 0,
"tilespec.flag_offset_x");
flag_offset_y = secfile_lookup_int_default(file, 0,
"tilespec.flag_offset_y");
c = secfile_lookup_str_default(file, "10x20", "tilespec.city_names_font");
city_names_font = mystrdup(c);
c =
secfile_lookup_str_default(file, "8x16",
"tilespec.city_productions_font");
city_productions_font_name = mystrdup(c);
c = secfile_lookup_str(file, "tilespec.main_intro_file");
main_intro_filename = tilespec_gfx_filename(c);
freelog(LOG_DEBUG, "intro file %s", main_intro_filename);
c = secfile_lookup_str(file, "tilespec.minimap_intro_file");
minimap_intro_filename = tilespec_gfx_filename(c);
freelog(LOG_DEBUG, "radar file %s", minimap_intro_filename);
/* Terrain layer info. */
for (i = 0; i < MAX_NUM_LAYERS; i++) {
char *style = secfile_lookup_str_default(file, "none",
"layer%d.match_style", i);
int j;
if (mystrcasecmp(style, "full") == 0) {
layers[i].match_style = MATCH_FULL;
} else if (mystrcasecmp(style, "bool") == 0) {
layers[i].match_style = MATCH_BOOLEAN;
} else {
layers[i].match_style = MATCH_NONE;
}
layers[i].match_types = secfile_lookup_str_vec(file, &layers[i].count,
"layer%d.match_types", i);
for (j = 0; j < layers[i].count; j++) {
layers[i].match_types[j] = mystrdup(layers[i].match_types[j]);
}
}
/* Terrain drawing info. */
terrains = secfile_get_secnames_prefix(file, "terrain_", &num_terrains);
if (num_terrains == 0) {
freelog(LOG_ERROR, "No terrain types supported by tileset.");
section_file_free(file);
free(fname);
return FALSE;
}
assert(terrain_hash == NULL);
terrain_hash = hash_new(hash_fval_string, hash_fcmp_string);
for (i = 0; i < num_terrains; i++) {
struct terrain_drawing_data *terr = (terrain_drawing_data*)wc_malloc(sizeof(*terr));
char *cell_type;
int l, j;
memset(terr, 0, sizeof(*terr));
terr->name = mystrdup(terrains[i] + strlen("terrain_"));
terr->is_blended = secfile_lookup_bool(file, "%s.is_blended",
terrains[i]);
terr->num_layers = secfile_lookup_int(file, "%s.num_layers",
terrains[i]);
terr->num_layers = CLIP(1, terr->num_layers, MAX_NUM_LAYERS);
for (l = 0; l < terr->num_layers; l++) {
char *match_type, *match_style;
terr->layer[l].is_tall
= secfile_lookup_bool_default(file, FALSE, "%s.layer%d_is_tall",
terrains[i], l);
terr->layer[l].offset_x
= secfile_lookup_int_default(file, 0, "%s.layer%d_offset_x",
terrains[i], l);
terr->layer[l].offset_y
= secfile_lookup_int_default(file, 0, "%s.layer%d_offset_y",
terrains[i], l);
match_style = secfile_lookup_str_default(file, "none",
"%s.layer%d_match_style",
terrains[i], l);
if (mystrcasecmp(match_style, "full") == 0) {
terr->layer[l].match_style = MATCH_FULL;
} else if (mystrcasecmp(match_style, "bool") == 0) {
terr->layer[l].match_style = MATCH_BOOLEAN;
} else {
terr->layer[l].match_style = MATCH_NONE;
}
match_type = secfile_lookup_str_default(file, NULL,
"%s.layer%d_match_type",
terrains[i], l);
if (match_type) {
/* Set match_count */
switch (terr->layer[l].match_style) {
case MATCH_NONE:
terr->layer[l].match_count = 0;
break;
case MATCH_FULL:
terr->layer[l].match_count = layers[l].count;
break;
case MATCH_BOOLEAN:
terr->layer[l].match_count = 2;
break;
}
/* Determine our match_type. */
for (j = 0; j < layers[l].count; j++) {
if (mystrcasecmp(layers[l].match_types[j], match_type) == 0) {
break;
}
}
terr->layer[l].match_type = j;
if (j >= layers[l].count) {
freelog(LOG_ERROR, "Invalid match type given for %s.", terrains[i]);
terr->layer[l].match_type = 0;
terr->layer[l].match_style = MATCH_NONE;
}
} else {
terr->layer[l].match_style = MATCH_NONE;
if (layers[l].match_style != MATCH_NONE) {
freelog(LOG_ERROR, "Layer %d has a match_style set; all terrains"
" must have a match_type. %s doesn't.", l, terrains[i]);
}
}
if (terr->layer[l].match_style == MATCH_NONE
&& layers[l].match_style == MATCH_FULL) {
freelog(LOG_ERROR, "Layer %d has match_type full set; all terrains"
" must match this. %s doesn't.", l, terrains[i]);
}
cell_type
= secfile_lookup_str_default(file, "single", "%s.layer%d_cell_type",
terrains[i], l);
if (mystrcasecmp(cell_type, "single") == 0) {
terr->layer[l].cell_type = CELL_SINGLE;
} else if (mystrcasecmp(cell_type, "rect") == 0) {
terr->layer[l].cell_type = CELL_RECT;
if (terr->layer[l].is_tall
|| terr->layer[l].offset_x > 0
|| terr->layer[l].offset_y > 0) {
freelog(LOG_ERROR,
_("Error in %s layer %d: you cannot have tall terrain or\n"
"a sprite offset with a cell-based drawing method."),
terrains[i], l);
terr->layer[l].is_tall = FALSE;
terr->layer[l].offset_x = terr->layer[l].offset_y = 0;
}
} else {
freelog(LOG_ERROR, "Unknown cell type %s for %s.",
cell_type, terrains[i]);
terr->layer[l].cell_type = CELL_SINGLE;
}
}
terr->mine_tag = secfile_lookup_str_default(file, NULL, "%s.mine_sprite",
terrains[i]);
if (terr->mine_tag) {
terr->mine_tag = mystrdup(terr->mine_tag);
}
if (!hash_insert(terrain_hash, terr->name, terr)) {
freelog(LOG_NORMAL, "warning: duplicate terrain entry %s.",
terrains[i]);
section_file_free(file);
free(fname);
return FALSE;
}
}
free(terrains);
spec_filenames = secfile_lookup_str_vec(file, &num_spec_files,
"tilespec.files");
if (num_spec_files == 0) {
freelog(LOG_ERROR, "No tile files specified in \"%s\"", fname);
section_file_free(file);
free(fname);
return FALSE;
}
sprite_hash = hash_new(hash_fval_string, hash_fcmp_string);
specfiles = specfile_list_new();
small_sprites = small_sprite_list_new();
for (i = 0; i < num_spec_files; i++) {
struct specfile *specf_ = (specfile*)wc_malloc(sizeof(struct specfile));
freelog(LOG_DEBUG, "spec file %s", spec_filenames[i]);
specf_->big_sprite = NULL;
specf_->file_name = mystrdup(datafilename_required(spec_filenames[i]));
scan_specfile(specf_, duplicates_ok);
specfile_list_prepend(specfiles, specf_);
}
free(spec_filenames);
section_file_check_unused(file, fname);
section_file_free(file);
freelog(LOG_VERBOSE, "finished reading %s", fname);
free(fname);
sz_strlcpy(current_tileset, tileset_name);
return TRUE;
}
/**********************************************************************
Returns a text name for the citizen, as used in the tileset.
***********************************************************************/
static const char *get_citizen_name(struct citizen_type citizen)
{
/* These strings are used in reading the tileset. Do not
* translate. */
switch (citizen.type) {
case CITIZEN_SPECIALIST:
return game.ruleset_game.specialist_name[citizen.spec_type];
case CITIZEN_HAPPY:
return "happy";
case CITIZEN_CONTENT:
return "content";
case CITIZEN_UNHAPPY:
return "unhappy";
case CITIZEN_ANGRY:
return "angry";
case CITIZEN_LAST:
break;
}
die("unknown citizen type %d", (int) citizen.type);
return NULL;
}
/****************************************************************************
Return a directional string for the cardinal directions. Normally the
binary value 1000 will be converted into "n1e0s0w0". This is in a
clockwise ordering.
****************************************************************************/
static const char *cardinal_index_str(int idx)
{
static char c[64];
int i;
c[0] = '\0';
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
int value = (idx >> i) & 1;
cat_snprintf(c, sizeof(c), "%s%d",
get_name_tileset_direction8(cardinal_tileset_dirs[i]), value);
}
return c;
}
/****************************************************************************
Do the same thing as cardinal_str, except including all valid directions.
The returned string is a pointer to static memory.
****************************************************************************/
static char *valid_index_str(int index)
{
static char c[64];
int i;
c[0] = '\0';
for (i = 0; i < num_valid_tileset_dirs; i++) {
int value = (index >> i) & 1;
cat_snprintf(c, sizeof(c), "%s%d",
get_name_tileset_direction8(valid_tileset_dirs[i]), value);
}
return c;
}
/* Not very safe, but convenient: */
#define SET_SPRITE(field, tag) do { \
sprites.field = load_sprite(tag); \
if (!sprites.field) { \
die("Sprite tag %s missing.", tag); \
} \
} while(FALSE)
/* Sets sprites.field to tag or (if tag isn't available) to alt */
#define SET_SPRITE_ALT(field, tag, alt) do { \
sprites.field = load_sprite(tag); \
if (!sprites.field) { \
sprites.field = load_sprite(alt); \
} \
if (!sprites.field) { \
die("Sprite tag %s and alternate %s are both missing.", tag, alt); \
} \
} while(FALSE)
/* Sets sprites.field to tag, or NULL if not available */
#define SET_SPRITE_OPT(field, tag) \
sprites.field = load_sprite(tag)
#define SET_SPRITE_ALT_OPT(field, tag, alt) do { \
sprites.field = lookup_sprite_tag_alt(tag, alt, FALSE, \
"sprite", #field); \
} while (FALSE)
/****************************************************************************
Setup the graphics for specialist types.
****************************************************************************/
void tilespec_setup_specialist_types(void)
{
/* Load the specialist sprite graphics. */
specialist_type_iterate(i) {
struct citizen_type c =
{.type = CITIZEN_SPECIALIST,
.spec_type = (Specialist_type_id)i};
const char *name = get_citizen_name(c);
char buffer[512];
int j;
for (j = 0; j < NUM_TILES_CITIZEN; j++) {
my_snprintf(buffer, sizeof(buffer), "specialist.%s_%d", name, j);
sprites.specialist[i].sprite[j] = load_sprite(buffer);
if (!sprites.specialist[i].sprite[j]) {
break;
}
}
sprites.specialist[i].count = j;
if (j == 0) {
freelog(LOG_NORMAL, _("No graphics for specialist %s."), name);
exit(EXIT_FAILURE);
}
} specialist_type_iterate_end;
}
/****************************************************************************
Setup the graphics for (non-specialist) citizen types.
****************************************************************************/
static void tilespec_setup_citizen_types(void)
{
int i, j;
char buffer[512];
/* Load the citizen sprite graphics. */
for (i = 0; i < NUM_TILES_CITIZEN; i++) {
struct citizen_type c = {.type = (citizen_type_type)i};
const char *name = get_citizen_name(c);
if (i == CITIZEN_SPECIALIST) {
continue; /* Handled separately. */
}
for (j = 0; j < NUM_TILES_CITIZEN; j++) {
my_snprintf(buffer, sizeof(buffer), "citizen.%s_%d", name, j);
sprites.citizen[i].sprite[j] = load_sprite(buffer);
if (!sprites.citizen[i].sprite[j]) {
break;
}
}
sprites.citizen[i].count = j;
if (j == 0) {
freelog(LOG_NORMAL, _("No graphics for citizen %s."), name);
exit(EXIT_FAILURE);
}
}
}
/**********************************************************************
Initialize 'sprites' structure based on hardwired tags which
warciv always requires.
***********************************************************************/
static void tilespec_lookup_sprite_tags(void)
{
char buffer[512];
const char dir_char[] = "nsew";
int i;
assert(sprite_hash != NULL);
SET_SPRITE(treaty_thumb[0], "treaty.disagree_thumb_down");
SET_SPRITE(treaty_thumb[1], "treaty.agree_thumb_up");
for(i=0; i<NUM_TILES_PROGRESS; i++) {
my_snprintf(buffer, sizeof(buffer), "s.science_bulb_%d", i);
SET_SPRITE(bulb[i], buffer);
my_snprintf(buffer, sizeof(buffer), "s.warming_sun_%d", i);
SET_SPRITE(warming[i], buffer);
my_snprintf(buffer, sizeof(buffer), "s.cooling_flake_%d", i);
SET_SPRITE(cooling[i], buffer);
}
SET_SPRITE(right_arrow, "s.right_arrow");
if (is_isometric) {
SET_SPRITE(black_tile, "t.black_tile");
SET_SPRITE(dither_tile, "t.dither_tile");
}
SET_SPRITE(tax_luxury, "s.tax_luxury");
SET_SPRITE(tax_science, "s.tax_science");
SET_SPRITE(tax_gold, "s.tax_gold");
tilespec_setup_citizen_types();
SET_SPRITE(spaceship.solar_panels, "spaceship.solar_panels");
SET_SPRITE(spaceship.life_support, "spaceship.life_support");
SET_SPRITE(spaceship.habitation, "spaceship.habitation");
SET_SPRITE(spaceship.structural, "spaceship.structural");
SET_SPRITE(spaceship.fuel, "spaceship.fuel");
SET_SPRITE(spaceship.propulsion, "spaceship.propulsion");
/* Isolated road graphics are used by roadstyle 0 and 1*/
if (roadstyle == 0 || roadstyle == 1) {
SET_SPRITE(road.isolated, "r.road_isolated");
SET_SPRITE(rail.isolated, "r.rail_isolated");
}
if (roadstyle == 0) {
/* Roadstyle 0 has just 8 additional sprites for both road and rail:
* one for the road/rail going off in each direction. */
for (i = 0; i < num_valid_tileset_dirs; i++) {
enum direction8 dir = valid_tileset_dirs[i];
const char *dir_name = get_name_tileset_direction8(dir);
my_snprintf(buffer, sizeof(buffer), "r.road_%s", dir_name);
SET_SPRITE(road.dir[i], buffer);
my_snprintf(buffer, sizeof(buffer), "r.rail_%s", dir_name);
SET_SPRITE(rail.dir[i], buffer);
}
} else if (roadstyle == 1) {
int num_index = 1 << (num_valid_tileset_dirs / 2), j;
/* Roadstyle 1 has 32 additional sprites for both road and rail:
* 16 each for cardinal and diagonal directions. Each set
* of 16 provides a NSEW-indexed sprite to provide connectors for
* all rails in the cardinal/diagonal directions. The 0 entry is
* unused (the "isolated" sprite is used instead). */
char c[64];
char d[64];
for (i = 1; i < num_index; i++) {
c[0] = '\0';
d[0] = '\0';
for (j = 0; j < num_valid_tileset_dirs / 2; j++) {
int value = (i >> j) & 1;
cat_snprintf(c, sizeof(c), "%s%d",
get_name_tileset_direction8(valid_tileset_dirs[2 * j]), value);
cat_snprintf(d, sizeof(d), "%s%d",
get_name_tileset_direction8(valid_tileset_dirs[2 * j + 1]), value);
}
my_snprintf(buffer, sizeof(buffer), "r.c_road_%s", c);
SET_SPRITE(road.even[i], buffer);
my_snprintf(buffer, sizeof(buffer), "r.d_road_%s", d);
SET_SPRITE(road.odd[i], buffer);
my_snprintf(buffer, sizeof(buffer), "r.c_rail_%s", c);
SET_SPRITE(rail.even[i], buffer);
my_snprintf(buffer, sizeof(buffer), "r.d_rail_%s", d);
SET_SPRITE(rail.odd[i], buffer);
}
} else {
/* Roadstyle 2 includes 256 sprites, one for every possibility.
* Just go around clockwise, with all combinations. */
for (i = 0; i < num_index_valid; i++) {
my_snprintf(buffer, sizeof(buffer), "r.road_%s", valid_index_str(i));
SET_SPRITE(road.total[i], buffer);
my_snprintf(buffer, sizeof(buffer), "r.rail_%s", valid_index_str(i));
SET_SPRITE(rail.total[i], buffer);
}
}
/* Corner road/rail graphics are used by roadstyle 0 and 1. */
if (roadstyle == 0 || roadstyle == 1) {
for (i = 0; i < num_valid_tileset_dirs; i++) {
enum direction8 dir = valid_tileset_dirs[i];
if (0 <= dir && 8 > dir && !is_cardinal_tileset_dir(dir)) {
my_snprintf(buffer, sizeof(buffer), "r.c_road_%s",
get_name_tileset_direction8(dir));
SET_SPRITE_OPT(road.corner[dir], buffer);
my_snprintf(buffer, sizeof(buffer), "r.c_rail_%s",
get_name_tileset_direction8(dir));
SET_SPRITE_OPT(rail.corner[dir], buffer);
}
}
}
SET_SPRITE(explode.nuke, "explode.nuke");
num_tiles_explode_unit = 0;
do {
my_snprintf(buffer, sizeof(buffer), "explode.unit_%d",
num_tiles_explode_unit++);
} while (sprite_exists(buffer));
num_tiles_explode_unit--;
if (num_tiles_explode_unit==0) {
sprites.explode.unit = NULL;
} else {
sprites.explode.unit = (Sprite**)wc_calloc(num_tiles_explode_unit,
sizeof(struct Sprite *));
for (i = 0; i < num_tiles_explode_unit; i++) {
my_snprintf(buffer, sizeof(buffer), "explode.unit_%d", i);
SET_SPRITE(explode.unit[i], buffer);
}
}
SET_SPRITE(unit.auto_attack, "unit.auto_attack");
SET_SPRITE(unit.auto_settler, "unit.auto_settler");
SET_SPRITE(unit.auto_explore, "unit.auto_explore");
SET_SPRITE(unit.fallout, "unit.fallout");
SET_SPRITE(unit.fortified, "unit.fortified");
SET_SPRITE(unit.fortifying, "unit.fortifying");
SET_SPRITE(unit.fortress, "unit.fortress");
SET_SPRITE(unit.airbase, "unit.airbase");
SET_SPRITE(unit.go_to, "unit.goto");
SET_SPRITE(unit.irrigate, "unit.irrigate");
SET_SPRITE(unit.mine, "unit.mine");
SET_SPRITE(unit.pillage, "unit.pillage");
SET_SPRITE(unit.pollution, "unit.pollution");
SET_SPRITE(unit.road, "unit.road");
SET_SPRITE(unit.sentry, "unit.sentry");
sprites.unit.sleeping = load_sprite("unit.sleeping");
SET_SPRITE(unit.stack, "unit.stack");
sprites.unit.loaded = load_sprite("unit.loaded");
sprites.unit.trade = load_sprite("unit.trade");
SET_SPRITE(unit.transform, "unit.transform");
SET_SPRITE(unit.connect, "unit.connect");
SET_SPRITE(unit.patrol, "unit.patrol");
SET_SPRITE(unit.lowfuel, "unit.lowfuel");
SET_SPRITE(unit.tired, "unit.tired");
for(i=0; i<NUM_TILES_HP_BAR; i++) {
my_snprintf(buffer, sizeof(buffer), "unit.hp_%d", i*10);
SET_SPRITE(unit.hp_bar[i], buffer);
}
for (i = 0; i < MAX_VET_LEVELS; i++) {
/* Veteran level sprites are optional. For instance "green" units
* usually have no special graphic. */
my_snprintf(buffer, sizeof(buffer), "unit.vet_%d", i);
sprites.unit.vet_lev[i] = load_sprite(buffer);
}
SET_SPRITE(city.disorder, "city.disorder");
sprites.city.happy = load_sprite("city.happy");
for(i=0; i<NUM_TILES_DIGITS; i++) {
char buffer2[512];
my_snprintf(buffer, sizeof(buffer), "city.size_%d", i);
SET_SPRITE(city.size[i], buffer);
my_snprintf(buffer2, sizeof(buffer2), "path.turns_%d", i);
SET_SPRITE_ALT_OPT(path.turns[i], buffer2, buffer);
if(i!=0) {
my_snprintf(buffer, sizeof(buffer), "city.size_%d", i*10);
SET_SPRITE(city.size_tens[i], buffer);
my_snprintf(buffer2, sizeof(buffer2), "path.turns_%d", i * 10);
SET_SPRITE_ALT_OPT(path.turns_tens[i], buffer2, buffer);
}
my_snprintf(buffer, sizeof(buffer), "city.t_food_%d", i);
SET_SPRITE(city.tile_foodnum[i], buffer);
my_snprintf(buffer, sizeof(buffer), "city.t_shields_%d", i);
SET_SPRITE(city.tile_shieldnum[i], buffer);
my_snprintf(buffer, sizeof(buffer), "city.t_trade_%d", i);
SET_SPRITE(city.tile_tradenum[i], buffer);
}
SET_SPRITE(upkeep.food[0], "upkeep.food");
SET_SPRITE(upkeep.food[1], "upkeep.food2");
SET_SPRITE(upkeep.unhappy[0], "upkeep.unhappy");
SET_SPRITE(upkeep.unhappy[1], "upkeep.unhappy2");
SET_SPRITE(upkeep.gold[0], "upkeep.gold");
SET_SPRITE(upkeep.gold[1], "upkeep.gold2");
SET_SPRITE(upkeep.shield, "upkeep.shield");
SET_SPRITE(user.attention, "user.attention");
SET_SPRITE(tx.fallout, "tx.fallout");
SET_SPRITE(tx.pollution, "tx.pollution");
SET_SPRITE(tx.village, "tx.village");
SET_SPRITE(tx.fortress, "tx.fortress");
SET_SPRITE_ALT(tx.fortress_back, "tx.fortress_back", "tx.fortress");
SET_SPRITE(tx.airbase, "tx.airbase");
SET_SPRITE(tx.fog, "tx.fog");
for (i = 0; i < num_index_cardinal; i++) {
my_snprintf(buffer, sizeof(buffer), "tx.s_river_%s",
cardinal_index_str(i));
SET_SPRITE(tx.spec_river[i], buffer);
}
/* We use direction-specific irrigation and farmland graphics, if they
* are available. If not, we just fall back to the basic irrigation
* graphics. */
for (i = 0; i < num_index_cardinal; i++) {
my_snprintf(buffer, sizeof(buffer), "tx.s_irrigation_%s",
cardinal_index_str(i));
SET_SPRITE_ALT(tx.irrigation[i], buffer, "tx.irrigation");
}
for (i = 0; i < num_index_cardinal; i++) {
my_snprintf(buffer, sizeof(buffer), "tx.s_farmland_%s",
cardinal_index_str(i));
SET_SPRITE_ALT(tx.farmland[i], buffer, "tx.farmland");
}
switch (darkness_style) {
case DARKNESS_NONE:
/* Nothing. */
break;
case DARKNESS_ISORECT:
{
/* Isometric: take a single tx.darkness tile and split it into 4. */
struct Sprite *darkness = load_sprite("tx.darkness");
const int W = NORMAL_TILE_WIDTH, H = NORMAL_TILE_HEIGHT;
int offsets[4][2] = {{W / 2, 0}, {0, H / 2}, {W / 2, H / 2}, {0, 0}};
if (!darkness) {
freelog(LOG_FATAL, "Sprite tx.darkness missing.");
exit(EXIT_FAILURE);
}
for (i = 0; i < 4; i++) {
sprites.tx.darkness[i] = crop_sprite(darkness, offsets[i][0],
offsets[i][1], W / 2, H / 2,
NULL, 0, 0);
}
}
break;
case DARKNESS_CARD_SINGLE:
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
enum direction8 dir = cardinal_tileset_dirs[i];
my_snprintf(buffer, sizeof(buffer), "tx.darkness_%s",
get_name_tileset_direction8(dir));
SET_SPRITE(tx.darkness[i], buffer);
}
break;
case DARKNESS_CARD_FULL:
for(i = 1; i < num_index_cardinal; i++) {
my_snprintf(buffer, sizeof(buffer), "tx.darkness_%s",
cardinal_index_str(i));
SET_SPRITE(tx.darkness[i], buffer);
}
break;
}
for (i = 0; i < 4; i++) {
my_snprintf(buffer, sizeof(buffer), "tx.river_outlet_%c", dir_char[i]);
SET_SPRITE(tx.river_outlet[i], buffer);
}
sprites.city.tile_wall = NULL; /* no place to initialize this variable */
sprites.city.tile = NULL; /* no place to initialize this variable */
}
/**********************************************************************
Load the tiles; requires tilespec_read_toplevel() called previously.
Leads to tile_sprites being allocated and filled with pointers
to sprites. Also sets up and populates sprite_hash, and calls func
to initialize 'sprites' structure.
***********************************************************************/
void tilespec_load_tiles(void)
{
tilespec_lookup_sprite_tags();
finish_loading_sprites();
}
/**********************************************************************
Lookup sprite to match tag, or else to match alt if don't find,
or else return NULL, and emit log message.
***********************************************************************/
static struct Sprite* lookup_sprite_tag_alt(const char *tag, const char *alt,
bool required, const char *what,
const char *name)
{
struct Sprite *sp;
/* (should get sprite_hash before connection) */
if (!sprite_hash) {
die("attempt to lookup for %s %s before sprite_hash setup", what, name);
}
sp = load_sprite(tag);
if (sp) return sp;
sp = load_sprite(alt);
if (sp) {
freelog(LOG_VERBOSE,
"Using alternate graphic %s (instead of %s) for %s %s",
alt, tag, what, name);
return sp;
}
freelog(required ? LOG_FATAL : LOG_VERBOSE,
_("Don't have graphics tags %s or %s for %s %s"),
tag, alt, what, name);
if (required) {
exit(EXIT_FAILURE);
}
return NULL;
}
/**********************************************************************
Set unit_type sprite value; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_unit_type(int id)
{
struct unit_type *ut = get_unit_type(id);
ut->sprite = lookup_sprite_tag_alt(ut->graphic_str, ut->graphic_alt,
unit_type_exists(id), "unit_type",
ut->name);
/* should maybe do something if NULL, eg generic default? */
}
/**********************************************************************
Set improvement_type sprite value; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_impr_type(int id)
{
struct impr_type *pimpr = get_improvement_type(id);
pimpr->sprite = lookup_sprite_tag_alt(pimpr->graphic_str,
pimpr->graphic_alt,
FALSE, "impr_type",
pimpr->name);
/* should maybe do something if NULL, eg generic default? */
}
/**********************************************************************
Set tech_type sprite value; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_tech_type(int id)
{
advances[id].sprite
= lookup_sprite_tag_alt(advances[id].graphic_str,
advances[id].graphic_alt,
FALSE, "tech_type",
get_tech_name(get_player_ptr(), id));
/* should maybe do something if NULL, eg generic default? */
}
/**********************************************************************
Set tile_type sprite values; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_tile_type(Terrain_type_id terrain)
{
struct tile_type *tt = get_tile_type(terrain);
struct terrain_drawing_data *draw;
char buffer1[MAX_LEN_NAME + 20];
int i, l;
if (tt->terrain_name[0] == '\0') {
return;
}
draw = (terrain_drawing_data*)hash_lookup_data(terrain_hash, tt->graphic_str);
if (!draw) {
draw = (terrain_drawing_data*)hash_lookup_data(terrain_hash, tt->graphic_alt);
if (!draw) {
freelog(LOG_FATAL, "No graphics %s or %s for %s terrain.",
tt->graphic_str, tt->graphic_alt, tt->terrain_name);
exit(EXIT_FAILURE);
}
}
/* Set up each layer of the drawing. */
for (l = 0; l < draw->num_layers; l++) {
sprite_vector_init(&draw->layer[l].base);
sprite_vector_reserve(&draw->layer[l].base, 1);
if (draw->layer[l].match_style == MATCH_NONE) {
/* Load single sprite for this terrain. */
for (i = 0; ; i++) {
struct Sprite *sprite;
my_snprintf(buffer1, sizeof(buffer1), "t.%s%d", draw->name, i + 1);
sprite = load_sprite(buffer1);
if (!sprite) {
break;
}
sprite_vector_reserve(&draw->layer[l].base, i + 1);
draw->layer[l].base.p[i] = sprite;
}
if (i == 0) {
/* TRANS: obscure tileset error. */
freelog(LOG_FATAL, _("Missing base sprite tag \"%s1\"."),
draw->name);
exit(EXIT_FAILURE);
}
} else {
switch (draw->layer[l].cell_type) {
case CELL_SINGLE:
/* Load 16 cardinally-matched sprites. */
for (i = 0; i < num_index_cardinal; i++) {
my_snprintf(buffer1, sizeof(buffer1),
"t.%s_%s", draw->name, cardinal_index_str(i));
draw->layer[l].match[i] = lookup_sprite_tag_alt(buffer1, "", TRUE,
"tile_type",
tt->terrain_name);
}
draw->layer[l].base.p[0] = draw->layer[l].match[0];
break;
case CELL_RECT:
{
const int count = draw->layer[l].match_count;
/* N directions (NSEW) * 3 dimensions of matching */
/* FIXME: should use exp() or expi() here. */
const int number = NUM_CORNER_DIRS * count * count * count;
draw->layer[l].cells = (Sprite**)
wc_malloc(number * sizeof(*draw->layer[l].cells));
for (i = 0; i < number; i++) {
int value = i / NUM_CORNER_DIRS;
enum direction4 dir = (direction4)(i % NUM_CORNER_DIRS);
const char dirs[4] = { 'u', 'd', 'r', 'l'}; /* Matches direction4 ordering */
switch (draw->layer[l].match_style) {
case MATCH_NONE:
assert(0); /* Impossible. */
break;
case MATCH_BOOLEAN:
my_snprintf(buffer1, sizeof(buffer1), "t.%s_cell_%c%d%d%d",
draw->name, dirs[dir],
(value >> 0) & 1,
(value >> 1) & 1,
(value >> 2) & 1);
draw->layer[l].cells[i]
= lookup_sprite_tag_alt(buffer1, "", TRUE, "tile_type",
tt->terrain_name);
break;
case MATCH_FULL:
{
int n = 0, s = 0, e = 0, w = 0;
int v1, v2, v3;
int this_ = draw->layer[l].match_type;
struct Sprite *sprite;
v1 = value % count;
value /= count;
v2 = value % count;
value /= count;
v3 = value % count;
assert(v1 < count && v2 < count && v3 < count);
/* Assume merged cells. This should be a separate option. */
switch (dir) {
case DIR4_NORTH:
s = this_;
w = v1;
n = v2;
e = v3;
break;
case DIR4_EAST:
w = this_;
n = v1;
e = v2;
s = v3;
break;
case DIR4_SOUTH:
n = this_;
e = v1;
s = v2;
w = v3;
break;
case DIR4_WEST:
e = this_;
s = v1;
w = v2;
n = v3;
break;
}
my_snprintf(buffer1, sizeof(buffer1),
"t.cellgroup_%s_%s_%s_%s",
layers[l].match_types[n],
layers[l].match_types[e],
layers[l].match_types[s],
layers[l].match_types[w]);
sprite = load_sprite(buffer1);
if (sprite) {
/* Crop the sprite to separate this cell. */
const int W = NORMAL_TILE_WIDTH, H = NORMAL_TILE_HEIGHT;
int x[4] = {W / 4, W / 4, 0, W / 2};
int y[4] = {H / 2, 0, H / 4, H / 4};
int xo[4] = {0, 0, -W / 2, W / 2};
int yo[4] = {H / 2, -H / 2, 0, 0};
sprite = crop_sprite(sprite,
x[dir], y[dir], W / 2, H / 2,
sprites.black_tile, xo[dir], yo[dir]);
}
draw->layer[l].cells[i] = sprite;
break;
}
}
}
}
my_snprintf(buffer1, sizeof(buffer1), "t.%s1", draw->name);
draw->layer[l].base.p[0]
= lookup_sprite_tag_alt(buffer1, "", FALSE, "tile_type",
tt->terrain_name);
break;
}
}
}
if (draw->is_blended && is_isometric) {
/* Set up blending sprites. This only works in iso-view! */
const int W = NORMAL_TILE_WIDTH, H = NORMAL_TILE_HEIGHT;
const int offsets[4][2] = {
{W / 2, 0}, {0, H / 2}, {W / 2, H / 2}, {0, 0}
};
int /* enum direction4 */ dir;
for (dir = 0; dir < 4; dir++) {
assert(sprite_vector_size(&draw->layer[0].base) > 0);
draw->blend[dir] = crop_sprite(draw->layer[0].base.p[0],
offsets[dir][0], offsets[dir][1],
W / 2, H / 2,
sprites.dither_tile, 0, 0);
}
}
for (i = 0; i < 2; i++) {
const char *name = (i != 0) ? tt->special_2_name : tt->special_1_name;
if (name[0] != '\0') {
draw->special[i]
= lookup_sprite_tag_alt(tt->special[i].graphic_str,
tt->special[i].graphic_alt,
TRUE, "tile_type special", name);
assert(draw->special[i] != NULL);
} else {
draw->special[i] = NULL;
}
/* should probably do something if NULL, eg generic default? */
}
if (draw->mine_tag) {
draw->mine = load_sprite(draw->mine_tag);
} else {
draw->mine = NULL;
}
sprites.terrain[terrain] = draw;
}
/**********************************************************************
Set government sprite value; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_government(int id)
{
struct government *gov = get_government(id);
gov->sprite = lookup_sprite_tag_alt(gov->graphic_str, gov->graphic_alt,
TRUE, "government", gov->name);
/* should probably do something if NULL, eg generic default? */
}
/**********************************************************************
Set nation flag sprite value; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_nation_flag(int id)
{
struct nation_type *nation = get_nation_by_idx(id);
char *tags[] = {nation->flag_graphic_str,
nation->flag_graphic_alt,
(char*)"f.unknown", NULL};
int i;
nation->flag_sprite = NULL;
for (i = 0; i < 3; i++) {
if ( tags[i] ) {
nation->flag_sprite = load_sprite(tags[i]);
if (nation->flag_sprite)
break;
} else {
continue;
}
}
if (!nation->flag_sprite) {
/* Should never get here because of the f.unknown fallback. */
freelog(LOG_FATAL, "No national flag for %s.", nation->name);
exit(EXIT_FAILURE);
}
}
/**********************************************************************
...
***********************************************************************/
static struct Sprite *get_city_nation_flag_sprite(city_t *pcity)
{
return get_nation_by_plr(city_owner(pcity))->flag_sprite;
}
/**********************************************************************
...
***********************************************************************/
static struct Sprite *get_unit_nation_flag_sprite(unit_t *punit)
{
return get_nation_by_plr(unit_owner(punit))->flag_sprite;
}
/**************************************************************************
Return the sprite needed to draw the city
**************************************************************************/
static struct Sprite *get_city_sprite(city_t *pcity)
{
int size;
int style;
style = get_city_style(pcity); /* get style and match the best tile */
/* based on city size */
for( size=0; size < city_styles[style].tiles_num; size++)
if( (int)pcity->common.pop_size < city_styles[style].tresh[size])
break;
if (is_isometric) {
if (city_got_citywalls(pcity))
return sprites.city.tile_wall[style][size-1];
else
return sprites.city.tile[style][size-1];
} else {
return sprites.city.tile[style][size-1];
}
}
/**************************************************************************
Return the sprite needed to draw the city wall
Not used for isometric view.
**************************************************************************/
static struct Sprite *get_city_wall_sprite(city_t *pcity)
{
int style = get_city_style(pcity);
return sprites.city.tile[style][city_styles[style].tiles_num];
}
/**************************************************************************
Return the sprite needed to draw the occupied tile
**************************************************************************/
static struct Sprite *get_city_occupied_sprite(city_t *pcity)
{
int style = get_city_style(pcity);
return sprites.city.tile[style][city_styles[style].tiles_num+1];
}
#define ADD_SPRITE(s, draw_style, draw_fog, x_offset, y_offset) \
(assert(s != NULL), \
sprs->type = DRAWN_SPRITE, \
sprs->data.sprite.style = draw_style, \
sprs->data.sprite.sprite = s, \
sprs->data.sprite.foggable = (draw_fog && fogstyle == 0), \
sprs->data.sprite.offset_x = x_offset, \
sprs->data.sprite.offset_y = y_offset, \
sprs++)
#define ADD_SPRITE_SIMPLE(s) ADD_SPRITE(s, DRAW_NORMAL, TRUE, 0, 0);
#define ADD_SPRITE_FULL(s) ADD_SPRITE(s, DRAW_FULL, TRUE, 0, 0);
#define ADD_GRID(ptile, mode) \
(sprs->type = DRAWN_GRID, \
sprs->data.grid.tile = (ptile), \
sprs->data.grid.citymode = (mode), \
sprs++)
#define ADD_BG(bg_color) \
(sprs->type = DRAWN_BG, \
sprs->data.bg.color = (bg_color), \
sprs++)
/**************************************************************************
Assemble some data that is used in building the tile sprite arrays.
(map_x, map_y) : the (normalized) map position
The values we fill in:
ter_type : the terrain type of the tile
talteration : all alterations the tile has
ter_type_near : terrain types of all adjacent terrain
talteration_near : alterations of all adjacent terrain
**************************************************************************/
static void build_tile_data(tile_t *ptile,
Terrain_type_id *ter_type,
enum tile_alteration_type *talteration,
Terrain_type_id *ter_type_near,
enum tile_alteration_type *talteration_near)
{
int /*enum direction8*/ dir;
*talteration = map_get_alteration(ptile);
*ter_type = map_get_terrain(ptile);
/* Loop over all adjacent tiles. We should have an iterator for this. */
for (dir = 0; dir < 8; dir++) {
tile_t *tile1 = mapstep(ptile, (direction8)dir);
if (tile1 && tile_get_known(tile1) != TILE_UNKNOWN) {
talteration_near[dir] = map_get_alteration(tile1);
ter_type_near[dir] = map_get_terrain(tile1);
} else {
/* We draw the edges of the (known) map as if the same terrain just
* continued off the edge of the map. */
talteration_near[dir] = S_NO_SPECIAL;
ter_type_near[dir] = *ter_type;
}
}
}
/**********************************************************************
Fill in the sprite array for the unit
***********************************************************************/
static int fill_unit_sprite_array(struct drawn_sprite *sprs,
unit_t *punit,
bool stack, bool backdrop)
{
struct drawn_sprite *save_sprs = sprs;
int ihp;
if (backdrop) {
if (!solid_color_behind_units) {
ADD_SPRITE(get_unit_nation_flag_sprite(punit),
DRAW_FULL, TRUE,
flag_offset_x, flag_offset_y);
} else {
ADD_BG(get_player_color(unit_owner(punit)));
}
}
ADD_SPRITE_FULL(unit_type(punit)->sprite);
if (sprites.unit.loaded && punit->transported_by != -1) {
ADD_SPRITE_FULL(sprites.unit.loaded);
}
if (punit->is_sleeping && sprites.unit.sleeping) {
ADD_SPRITE_FULL(sprites.unit.sleeping);
} else if (punit->activity != ACTIVITY_IDLE) {
struct Sprite *s = NULL;
switch (punit->activity) {
case ACTIVITY_MINE:
s = sprites.unit.mine;
break;
case ACTIVITY_POLLUTION:
s = sprites.unit.pollution;
break;
case ACTIVITY_FALLOUT:
s = sprites.unit.fallout;
break;
case ACTIVITY_PILLAGE:
s = sprites.unit.pillage;
break;
case ACTIVITY_ROAD:
case ACTIVITY_RAILROAD:
s = sprites.unit.road;
break;
case ACTIVITY_IRRIGATE:
s = sprites.unit.irrigate;
break;
case ACTIVITY_EXPLORE:
s = sprites.unit.auto_explore;
break;
case ACTIVITY_FORTIFIED:
s = sprites.unit.fortified;
break;
case ACTIVITY_FORTIFYING:
s = sprites.unit.fortifying;
break;
case ACTIVITY_FORTRESS:
s = sprites.unit.fortress;
break;
case ACTIVITY_AIRBASE:
s = sprites.unit.airbase;
break;
case ACTIVITY_SENTRY:
s = sprites.unit.sentry;
break;
case ACTIVITY_GOTO:
s = sprites.unit.go_to;
break;
case ACTIVITY_TRANSFORM:
s = sprites.unit.transform;
break;
default:
break;
}
ADD_SPRITE_FULL(s);
}
if (punit->ai.control && punit->activity != ACTIVITY_EXPLORE) {
if (is_military_unit(punit)) {
ADD_SPRITE_FULL(sprites.unit.auto_attack);
} else {
ADD_SPRITE_FULL(sprites.unit.auto_settler);
}
}
if (punit->ptr) {
if (sprites.unit.trade != NULL) {
ADD_SPRITE_FULL(sprites.unit.trade);
}
} else if (punit->air_patrol_tile) {
ADD_SPRITE_FULL(sprites.unit.patrol);
} else if (unit_has_orders(punit)) {
if (punit->orders.repeat) {
ADD_SPRITE_FULL(sprites.unit.patrol);
} else if (punit->activity != ACTIVITY_IDLE) {
ADD_SPRITE_SIMPLE(sprites.unit.connect);
} else {
ADD_SPRITE_FULL(sprites.unit.go_to);
}
}
if (sprites.unit.lowfuel
&& unit_type(punit)->fuel > 0
&& punit->fuel == 1
&& punit->moves_left <= 2 * SINGLE_MOVE) {
/* Show a low-fuel graphic if the plane has 2 or fewer moves left. */
ADD_SPRITE_FULL(sprites.unit.lowfuel);
}
if (sprites.unit.tired
&& punit->moves_left < SINGLE_MOVE) {
/* Show a "tired" graphic if the unit has fewer than one move
* remaining. */
ADD_SPRITE_FULL(sprites.unit.tired);
}
if (stack || punit->occupy) {
ADD_SPRITE_FULL(sprites.unit.stack);
}
if (sprites.unit.vet_lev[punit->veteran]) {
ADD_SPRITE_FULL(sprites.unit.vet_lev[punit->veteran]);
}
ihp = ((NUM_TILES_HP_BAR-1)*punit->hp) / unit_type(punit)->hp;
ihp = CLIP(0, ihp, NUM_TILES_HP_BAR-1);
ADD_SPRITE_FULL(sprites.unit.hp_bar[ihp]);
return sprs - save_sprs;
}
/**************************************************************************
Add any corner road sprites to the sprite array.
**************************************************************************/
static int fill_road_corner_sprites(struct drawn_sprite *sprs,
bool road, bool *road_near,
bool rail, bool *rail_near)
{
struct drawn_sprite *saved_sprs = sprs;
int i;
assert(draw_roads_rails);
/* Roads going diagonally adjacent to this tile need to be
* partly drawn on this tile. */
/* Draw the corner sprite if:
* - There is a diagonal road (not rail!) between two adjacent tiles.
* - There is no diagonal road (not rail!) that intersects this road.
* The logic is simple: roads are drawn underneath railrods, but are
* not always covered by them (even in the corners!). But if a railroad
* connects two tiles, only the railroad (no road) is drawn between
* those tiles.
*/
for (i = 0; i < num_valid_tileset_dirs; i++) {
enum direction8 dir = valid_tileset_dirs[i];
if (!is_cardinal_tileset_dir(dir)) {
/* Draw corner sprites for this non-cardinal direction. */
int cw = (i + 1) % num_valid_tileset_dirs;
int ccw = (i + num_valid_tileset_dirs - 1) % num_valid_tileset_dirs;
enum direction8 dir_cw = valid_tileset_dirs[cw];
enum direction8 dir_ccw = valid_tileset_dirs[ccw];
if (sprites.road.corner[dir]
&& (road_near[dir_cw] && road_near[dir_ccw]
&& !(rail_near[dir_cw] && rail_near[dir_ccw]))
&& !(road && road_near[dir] && !(rail && rail_near[dir]))) {
ADD_SPRITE_SIMPLE(sprites.road.corner[dir]);
}
}
}
return sprs - saved_sprs;
}
/**************************************************************************
Add any corner rail sprites to the sprite array.
**************************************************************************/
static int fill_rail_corner_sprites(struct drawn_sprite *sprs,
bool rail, bool *rail_near)
{
struct drawn_sprite *saved_sprs = sprs;
int i;
assert(draw_roads_rails);
/* Rails going diagonally adjacent to this tile need to be
* partly drawn on this tile. */
for (i = 0; i < num_valid_tileset_dirs; i++) {
enum direction8 dir = valid_tileset_dirs[i];
if (!is_cardinal_tileset_dir(dir)) {
/* Draw corner sprites for this non-cardinal direction. */
int cw = (i + 1) % num_valid_tileset_dirs;
int ccw = (i + num_valid_tileset_dirs - 1) % num_valid_tileset_dirs;
enum direction8 dir_cw = valid_tileset_dirs[cw];
enum direction8 dir_ccw = valid_tileset_dirs[ccw];
if (sprites.rail.corner[dir]
&& rail_near[dir_cw] && rail_near[dir_ccw]
&& !(rail && rail_near[dir])) {
ADD_SPRITE_SIMPLE(sprites.rail.corner[dir]);
}
}
}
return sprs - saved_sprs;
}
/**************************************************************************
Fill all road and rail sprites into the sprite array.
**************************************************************************/
static int fill_road_rail_sprite_array(struct drawn_sprite *sprs,
enum tile_alteration_type talteration,
enum tile_alteration_type *talteration_near,
city_t *pcity)
{
struct drawn_sprite *saved_sprs = sprs;
bool road, road_near[8], rail, rail_near[8];
bool draw_road[8],
draw_single_road,
draw_rail[8],
draw_single_rail;
int /* enum direction8 */ dir;
if (!draw_roads_rails) {
/* Don't draw anything. */
return 0;
}
/* Fill some data arrays. rail_near and road_near store whether road/rail
* is present in the given direction. draw_rail and draw_road store
* whether road/rail is to be drawn in that direction. draw_single_road
* and draw_single_rail store whether we need an isolated road/rail to be
* drawn. */
road = contains_alteration(talteration, S_ROAD);
rail = contains_alteration(talteration, S_RAILROAD);
draw_single_road = road && (!pcity || !draw_cities) && !rail;
draw_single_rail = rail && (!pcity || !draw_cities);
for (dir = 0; dir < 8; dir++) {
/* Check if there is adjacent road/rail. */
road_near[dir] = contains_alteration(talteration_near[dir], S_ROAD);
rail_near[dir] = contains_alteration(talteration_near[dir], S_RAILROAD);
/* Draw rail/road if there is a connection from this tile to the
* adjacent tile. But don't draw road if there is also a rail
* connection. */
draw_rail[dir] = rail && rail_near[dir];
draw_road[dir] = road && road_near[dir] && !draw_rail[dir];
/* Don't draw an isolated road/rail if there's any connection. */
draw_single_rail &= !draw_rail[dir];
draw_single_road &= !draw_rail[dir] && !draw_road[dir];
}
/* Draw road corners underneath rails (styles 0 and 1). */
sprs += fill_road_corner_sprites(sprs, road, road_near, rail, rail_near);
if (roadstyle == 0) {
/* With roadstyle 0, we simply draw one road/rail for every connection.
* This means we only need a few sprites, but a lot of drawing is
* necessary and it generally doesn't look very good. */
int i;
/* First raw roads under rails. */
if (road) {
for (i = 0; i < num_valid_tileset_dirs; i++) {
if (draw_road[valid_tileset_dirs[i]]) {
ADD_SPRITE_SIMPLE(sprites.road.dir[i]);
}
}
}
/* Then draw rails over roads. */
if (rail) {
for (i = 0; i < num_valid_tileset_dirs; i++) {
if (draw_rail[valid_tileset_dirs[i]]) {
ADD_SPRITE_SIMPLE(sprites.rail.dir[i]);
}
}
}
} else if (roadstyle == 1) {
/* With roadstyle 1, we draw one sprite for cardinal road connections,
* one sprite for diagonal road connections, and the same for rail.
* This means we need about 4x more sprites than in style 0, but up to
* 4x less drawing is needed. The drawing quality may also be
* improved. */
/* First draw roads under rails. */
if (road) {
int road_even_tileno = 0, road_odd_tileno = 0, i;
for (i = 0; i < num_valid_tileset_dirs / 2; i++) {
enum direction8 even = valid_tileset_dirs[2 * i];
enum direction8 odd = valid_tileset_dirs[2 * i + 1];
if (draw_road[even]) {
road_even_tileno |= 1 << i;
}
if (draw_road[odd]) {
road_odd_tileno |= 1 << i;
}
}
/* Draw the cardinal/even roads first. */
if (road_even_tileno != 0) {
ADD_SPRITE_SIMPLE(sprites.road.even[road_even_tileno]);
}
if (road_odd_tileno != 0) {
ADD_SPRITE_SIMPLE(sprites.road.odd[road_odd_tileno]);
}
}
/* Then draw rails over roads. */
if (rail) {
int rail_even_tileno = 0, rail_odd_tileno = 0, i;
for (i = 0; i < num_valid_tileset_dirs / 2; i++) {
enum direction8 even = valid_tileset_dirs[2 * i];
enum direction8 odd = valid_tileset_dirs[2 * i + 1];
if (draw_rail[even]) {
rail_even_tileno |= 1 << i;
}
if (draw_rail[odd]) {
rail_odd_tileno |= 1 << i;
}
}
/* Draw the cardinal/even rails first. */
if (rail_even_tileno != 0) {
ADD_SPRITE_SIMPLE(sprites.rail.even[rail_even_tileno]);
}
if (rail_odd_tileno != 0) {
ADD_SPRITE_SIMPLE(sprites.rail.odd[rail_odd_tileno]);
}
}
} else {
/* Roadstyle 2 is a very simple method that lets us simply retrieve
* entire finished tiles, with a bitwise index of the presence of
* roads in each direction. */
/* Draw roads first */
if (road) {
int road_tileno = 0, i;
for (i = 0; i < num_valid_tileset_dirs; i++) {
enum direction8 dir = valid_tileset_dirs[i];
if (draw_road[dir]) {
road_tileno |= 1 << i;
}
}
if (road_tileno != 0 || draw_single_road) {
ADD_SPRITE_SIMPLE(sprites.road.total[road_tileno]);
}
}
/* Then draw rails over roads. */
if (rail) {
int rail_tileno = 0, i;
for (i = 0; i < num_valid_tileset_dirs; i++) {
enum direction8 dir = valid_tileset_dirs[i];
if (draw_rail[dir]) {
rail_tileno |= 1 << i;
}
}
if (rail_tileno != 0 || draw_single_rail) {
ADD_SPRITE_SIMPLE(sprites.rail.total[rail_tileno]);
}
}
}
/* Draw isolated rail/road separately (styles 0 and 1 only). */
if (roadstyle == 0 || roadstyle == 1) {
if (draw_single_rail) {
ADD_SPRITE_SIMPLE(sprites.rail.isolated);
} else if (draw_single_road) {
ADD_SPRITE_SIMPLE(sprites.road.isolated);
}
}
/* Draw rail corners over roads (styles 0 and 1). */
sprs += fill_rail_corner_sprites(sprs, rail, rail_near);
return sprs - saved_sprs;
}
/**************************************************************************
Return the index of the sprite to be used for irrigation or farmland in
this tile.
We assume that the current tile has farmland or irrigation. We then
choose a sprite (index) based upon which cardinally adjacent tiles have
either farmland or irrigation (the two are considered interchangable for
this).
**************************************************************************/
static int get_irrigation_index(enum tile_alteration_type *talteration_near)
{
int tileno = 0, i;
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
enum direction8 dir = cardinal_tileset_dirs[i];
/* A tile with S_FARMLAND will also have S_IRRIGATION set. */
if (contains_alteration(talteration_near[dir], S_IRRIGATION)) {
tileno |= 1 << i;
}
}
return tileno;
}
/**************************************************************************
Fill in the farmland/irrigation sprite for the tile.
**************************************************************************/
static int fill_irrigation_sprite_array(struct drawn_sprite *sprs,
enum tile_alteration_type talteration,
enum tile_alteration_type *talteration_near,
city_t *pcity)
{
struct drawn_sprite *saved_sprs = sprs;
/* Tiles with S_FARMLAND also have S_IRRIGATION set. */
assert(!contains_alteration(talteration, S_FARMLAND)
|| contains_alteration(talteration, S_IRRIGATION));
/* We don't draw the irrigation if there's a city (it just gets overdrawn
* anyway, and ends up looking bad). */
if (draw_irrigation
&& contains_alteration(talteration, S_IRRIGATION)
&& !(pcity && draw_cities)) {
int index = get_irrigation_index(talteration_near);
if (contains_alteration(talteration, S_FARMLAND)) {
ADD_SPRITE_SIMPLE(sprites.tx.farmland[index]);
} else {
ADD_SPRITE_SIMPLE(sprites.tx.irrigation[index]);
}
}
return sprs - saved_sprs;
}
/****************************************************************************
Fill in the sprite array for blended terrain.
****************************************************************************/
static int fill_blending_sprite_array(struct drawn_sprite *sprs,
tile_t *ptile,
Terrain_type_id *ttype_near)
{
struct drawn_sprite *saved_sprs = sprs;
Terrain_type_id ttype = map_get_terrain(ptile);
if (is_isometric && sprites.terrain[ttype]->is_blended) {
int dir;
const int W = NORMAL_TILE_WIDTH, H = NORMAL_TILE_HEIGHT;
const int offsets[4][2] = {
{W/2, 0}, {0, H / 2}, {W / 2, H / 2}, {0, 0}
};
/*
* We want to mark unknown tiles so that an unreal tile will be
* given the same marking as our current tile - that way we won't
* get the "unknown" dither along the edge of the map.
*/
for (dir = 0; dir < 4; dir++) {
tile_t *tile1 = mapstep(ptile, (direction8)DIR4_TO_DIR8[dir]);
Terrain_type_id other = ttype_near[DIR4_TO_DIR8[dir]];
if (!tile1
|| tile_get_known(tile1) == TILE_UNKNOWN
|| other == ttype
|| !sprites.terrain[other]->is_blended) {
continue;
}
ADD_SPRITE(sprites.terrain[other]->blend[dir], DRAW_NORMAL, TRUE,
offsets[dir][0], offsets[dir][1]);
}
}
return sprs - saved_sprs;
}
/****************************************************************************
Add sprites for the base terrain to the sprite list. This doesn't
include specials or rivers.
****************************************************************************/
static int fill_terrain_sprite_array(struct drawn_sprite *sprs,
tile_t *ptile,
Terrain_type_id *ttype_near)
{
struct drawn_sprite *saved_sprs = sprs;
struct Sprite *sprite;
Terrain_type_id ttype = ptile->terrain;
struct terrain_drawing_data *draw = sprites.terrain[ttype];
int l, i, tileno;
tile_t *adjc_tile;
if (!draw_terrain) {
return 0;
}
/* Skip the normal drawing process. */
if (ptile->spec_sprite) {
sprite = load_sprite(ptile->spec_sprite);
ADD_SPRITE_SIMPLE(sprite);
return 1;
}
for (l = 0; l < draw->num_layers; l++) {
if (draw->layer[l].match_style == MATCH_NONE) {
int count = sprite_vector_size(&draw->layer[l].base);
/* Pseudo-random reproducable algorithm to pick a sprite. */
#define LARGE_PRIME 10007
#define SMALL_PRIME 1009
assert(count < SMALL_PRIME);
assert((int)(LARGE_PRIME * MAX_MAP_INDEX) > 0);
count = ((ptile->index
* LARGE_PRIME) % SMALL_PRIME) % count;
ADD_SPRITE(draw->layer[l].base.p[count],
draw->layer[l].is_tall ? DRAW_FULL : DRAW_NORMAL,
TRUE, draw->layer[l].offset_x, draw->layer[l].offset_y);
} else {
int match_type = draw->layer[l].match_type;
#define MATCH(dir) \
(sprites.terrain[ttype_near[(dir)]]->num_layers > l \
? sprites.terrain[ttype_near[(dir)]]->layer[l].match_type : -1)
if (draw->layer[l].cell_type == CELL_SINGLE) {
tileno = 0;
assert(draw->layer[l].match_style == MATCH_BOOLEAN);
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
enum direction8 dir = cardinal_tileset_dirs[i];
if (MATCH(dir) == match_type) {
tileno |= 1 << i;
}
}
ADD_SPRITE(draw->layer[l].match[tileno],
draw->layer[l].is_tall ? DRAW_FULL : DRAW_NORMAL,
TRUE, draw->layer[l].offset_x, draw->layer[l].offset_y);
} else if (draw->layer[l].cell_type == CELL_RECT) {
/* Divide the tile up into four rectangular cells. Now each of these
* cells covers one corner, and each is adjacent to 3 different
* tiles. For each cell we pixk a sprite based upon the adjacent
* terrains at each of those tiles. Thus we have 8 different sprites
* for each of the 4 cells (32 sprites total).
*
* These arrays correspond to the direction4 ordering. */
const int W = NORMAL_TILE_WIDTH, H = NORMAL_TILE_HEIGHT;
const int iso_offsets[4][2] = {
{W / 4, 0}, {W / 4, H / 2}, {W / 2, H / 4}, {0, H / 4}
};
const int noniso_offsets[4][2] = {
{0, 0}, {W / 2, H / 2}, {W / 2, 0}, {0, H / 2}
};
int i;
/* put corner cells */
for (i = 0; i < NUM_CORNER_DIRS; i++) {
const int count = draw->layer[l].match_count;
int array_index = 0;
enum direction8 dir = dir_ccw((direction8)DIR4_TO_DIR8[i]);
int x = (is_isometric ? iso_offsets[i][0] : noniso_offsets[i][0]);
int y = (is_isometric ? iso_offsets[i][1] : noniso_offsets[i][1]);
int m[3] = {MATCH(dir_ccw(dir)), MATCH(dir), MATCH(dir_cw(dir))};
struct Sprite *s;
switch (draw->layer[l].match_style) {
case MATCH_NONE:
/* Impossible */
assert(0);
break;
case MATCH_BOOLEAN:
assert(count == 2);
array_index = array_index * count + (m[2] != match_type);
array_index = array_index * count + (m[1] != match_type);
array_index = array_index * count + (m[0] != match_type);
break;
case MATCH_FULL:
if (m[0] == -1 || m[1] == -1 || m[2] == -1) {
break;
}
array_index = array_index * count + m[2];
array_index = array_index * count + m[1];
array_index = array_index * count + m[0];
break;
}
array_index = array_index * NUM_CORNER_DIRS + i;
s = draw->layer[l].cells[array_index];
if (s) {
ADD_SPRITE(s, DRAW_NORMAL, TRUE, x, y);
}
}
}
#undef MATCH
}
/* Add blending on top of the first layer. */
if (l == 0 && draw->is_blended) {
sprs += fill_blending_sprite_array(sprs, ptile, ttype_near);
}
/* Add darkness on top of the first layer. Note that darkness is always
* drawn, even in citymode, etc. */
if (l == 0) {
#define UNKNOWN(dir) \
((adjc_tile = mapstep(ptile, (dir))) \
&& tile_get_known(adjc_tile) == TILE_UNKNOWN)
switch (darkness_style) {
case DARKNESS_NONE:
break;
case DARKNESS_ISORECT:
for (i = 0; i < 4; i++) {
const int W = NORMAL_TILE_WIDTH, H = NORMAL_TILE_HEIGHT;
int offsets[4][2] = {{W / 2, 0}, {0, H / 2}, {W / 2, H / 2}, {0, 0}};
if (UNKNOWN((direction8)DIR4_TO_DIR8[i])) {
ADD_SPRITE(sprites.tx.darkness[i], DRAW_NORMAL, TRUE,
offsets[i][0], offsets[i][1]);
}
}
break;
case DARKNESS_CARD_SINGLE:
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
if (UNKNOWN(cardinal_tileset_dirs[i])) {
ADD_SPRITE_SIMPLE(sprites.tx.darkness[i]);
}
}
break;
case DARKNESS_CARD_FULL:
/* We're looking to find the INDEX_NSEW for the directions that
* are unknown. We want to mark unknown tiles so that an unreal
* tile will be given the same marking as our current tile - that
* way we won't get the "unknown" dither along the edge of the
* map. */
tileno = 0;
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
if (UNKNOWN(cardinal_tileset_dirs[i])) {
tileno |= 1 << i;
}
}
if (tileno != 0) {
ADD_SPRITE_SIMPLE(sprites.tx.darkness[tileno]);
}
break;
}
#undef UNKNOWN
}
}
return sprs - saved_sprs;
}
/****************************************************************************
Fill in the sprite array for the given tile, city, and unit.
ptile, if specified, gives the tile. If specified the terrain and specials
will be drawn for this tile. In this case (map_x,map_y) should give the
location of the tile.
punit, if specified, gives the unit. For tile drawing this should
generally be get_drawable_unit(); otherwise it can be any unit.
pcity, if specified, gives the city. For tile drawing this should
generally be ptile->city; otherwise it can be any city.
citymode specifies whether this is part of a citydlg. If so some drawing
is done differently.
****************************************************************************/
int fill_sprite_array(struct drawn_sprite *sprs, tile_t *ptile,
unit_t *punit, city_t *pcity,
bool citymode)
{
Terrain_type_id ttype, ttype_near[8];
enum tile_alteration_type talteration = S_NO_SPECIAL,
talteration_near[8];
int tileno, dir;
unit_t *pfocus = get_unit_in_focus();
struct drawn_sprite *save_sprs = sprs;
/* Unit drawing is disabled if the view options is turned off, but only
* if we're drawing on the mapview. */
bool do_draw_unit = (punit && (draw_units || !ptile
|| (draw_focus_unit && pfocus == punit)));
if (ptile && tile_get_known(ptile) == TILE_UNKNOWN) {
return sprs - save_sprs;
}
/* Set up background color. */
if (solid_color_behind_units) {
if (do_draw_unit) {
ADD_BG(get_player_color(unit_owner(punit)));
} else if (pcity && draw_cities) {
ADD_BG(get_player_color(city_owner(pcity)));
}
} else if (!draw_terrain) {
if (ptile) {
ADD_BG(COLOR_STD_BACKGROUND);
}
}
/* Terrain and specials. */
if (ptile) {
build_tile_data(ptile,
&ttype, &talteration, ttype_near, talteration_near);
sprs += fill_terrain_sprite_array(sprs, ptile, ttype_near);
if (is_ocean(ttype) && draw_terrain) {
for (dir = 0; dir < 4; dir++) {
if (contains_alteration(talteration_near[DIR4_TO_DIR8[dir]], S_RIVER)) {
ADD_SPRITE_SIMPLE(sprites.tx.river_outlet[dir]);
}
}
}
sprs += fill_irrigation_sprite_array(sprs, talteration, talteration_near,
pcity);
if (draw_terrain && contains_alteration(talteration, S_RIVER)) {
int i;
/* Draw rivers on top of irrigation. */
tileno = 0;
for (i = 0; i < num_cardinal_tileset_dirs; i++) {
enum direction8 dir = cardinal_tileset_dirs[i];
if (contains_alteration(talteration_near[dir], S_RIVER)
|| is_ocean(ttype_near[dir])) {
tileno |= 1 << i;
}
}
ADD_SPRITE_SIMPLE(sprites.tx.spec_river[tileno]);
}
sprs += fill_road_rail_sprite_array(sprs,
talteration, talteration_near, pcity);
if (draw_specials) {
if (contains_alteration(talteration, S_SPECIAL_1)) {
ADD_SPRITE_SIMPLE(sprites.terrain[ttype]->special[0]);
} else if (contains_alteration(talteration, S_SPECIAL_2)) {
ADD_SPRITE_SIMPLE(sprites.terrain[ttype]->special[1]);
}
}
if (draw_fortress_airbase && contains_alteration(talteration, S_FORTRESS)
&& sprites.tx.fortress_back) {
ADD_SPRITE_FULL(sprites.tx.fortress_back);
}
if (draw_mines && contains_alteration(talteration, S_MINE)
&& sprites.terrain[ttype]->mine) {
ADD_SPRITE_SIMPLE(sprites.terrain[ttype]->mine);
}
if (draw_specials && contains_alteration(talteration, S_HUT)) {
ADD_SPRITE_SIMPLE(sprites.tx.village);
}
}
if (ptile && is_isometric) {
/* Add grid. In classic view this is done later. */
ADD_GRID(ptile, citymode);
}
/* City. Some city sprites are drawn later. */
if (pcity && draw_cities) {
if (!solid_color_behind_units) {
ADD_SPRITE(get_city_nation_flag_sprite(pcity),
DRAW_FULL, TRUE, flag_offset_x, flag_offset_y);
} else {
ADD_BG(get_player_color(city_owner(pcity)));
}
ADD_SPRITE_FULL(get_city_sprite(pcity));
if (pcity->u.client.occupied) {
ADD_SPRITE_FULL(get_city_occupied_sprite(pcity));
}
if (!is_isometric && city_got_citywalls(pcity)) {
/* In iso-view the city wall is a part of the city sprite. */
ADD_SPRITE_SIMPLE(get_city_wall_sprite(pcity));
}
if (pcity->u.client.unhappy) {
ADD_SPRITE_FULL(sprites.city.disorder);
} else if (city_celebrating(pcity) && sprites.city.happy) {
ADD_SPRITE_SIMPLE(sprites.city.happy);
}
}
if (ptile) {
if (draw_fortress_airbase && contains_alteration(talteration, S_AIRBASE)) {
ADD_SPRITE_FULL(sprites.tx.airbase);
}
if (draw_pollution && contains_alteration(talteration, S_POLLUTION)) {
ADD_SPRITE_SIMPLE(sprites.tx.pollution);
}
if (draw_pollution && contains_alteration(talteration, S_FALLOUT)) {
ADD_SPRITE_SIMPLE(sprites.tx.fallout);
}
}
if (fogstyle == 1 && draw_fog_of_war
&& ptile && tile_get_known(ptile) == TILE_KNOWN_FOGGED) {
/* With fogstyle 1, fog is done this way. */
ADD_SPRITE_SIMPLE(sprites.tx.fog);
}
/* City size. Drawing this under fog makes it hard to read. */
if (pcity && draw_cities) {
if (pcity->common.pop_size >= 10) {
ADD_SPRITE(sprites.city.size_tens[pcity->common.pop_size / 10], DRAW_FULL,
FALSE, 0, 0);
}
ADD_SPRITE(sprites.city.size[pcity->common.pop_size % 10], DRAW_FULL,
FALSE, 0, 0);
}
if (do_draw_unit) {
bool stacked = ptile && (unit_list_size(ptile->units) > 1);
bool backdrop = !pcity;
sprs += fill_unit_sprite_array(sprs, punit, stacked, backdrop);
}
if (ptile) {
if (is_isometric && draw_fortress_airbase
&& contains_alteration(talteration, S_FORTRESS)) {
/* Draw fortress front in iso-view (non-iso view only has a fortress
* back). */
ADD_SPRITE_FULL(sprites.tx.fortress);
}
}
if (ptile && !is_isometric) {
/* Add grid. In iso-view this is done earlier. */
ADD_GRID(ptile, citymode);
}
return sprs - save_sprs;
}
/**********************************************************************
Set city tiles sprite values; should only happen after
tilespec_load_tiles().
***********************************************************************/
static void tilespec_setup_style_tile(int style, char *graphics)
{
struct Sprite *sprite2;
char buffer[128];
int j;
struct Sprite *sp_wall = NULL;
char buffer_wall[128];
city_styles[style].tiles_num = 0;
for(j = 0; j < 32 && city_styles[style].tiles_num < MAX_CITY_TILES; j++)
{
my_snprintf(buffer, sizeof(buffer), "%s_%d", graphics, j);
sprite2 = load_sprite(buffer);
if (is_isometric) {
my_snprintf(buffer, sizeof(buffer_wall), "%s_%d_wall", graphics, j);
sp_wall = load_sprite(buffer);
}
if (sprite2) {
sprites.city.tile[style][city_styles[style].tiles_num] = sprite2;
if (is_isometric) {
assert(sp_wall != NULL);
sprites.city.tile_wall[style][city_styles[style].tiles_num] = sp_wall;
}
city_styles[style].tresh[city_styles[style].tiles_num] = j;
city_styles[style].tiles_num++;
freelog(LOG_DEBUG, "Found tile %s_%d", graphics, j);
}
}
if(city_styles[style].tiles_num == 0) /* don't waste more time */
return;
if (!is_isometric) {
/* the wall tile */
my_snprintf(buffer, sizeof(buffer), "%s_wall", graphics);
sprite2 = load_sprite(buffer);
if (sprite2) {
sprites.city.tile[style][city_styles[style].tiles_num] = sprite2;
} else {
freelog(LOG_NORMAL, "Warning: no wall tile for graphic %s", graphics);
}
}
/* occupied tile */
my_snprintf(buffer, sizeof(buffer), "%s_occupied", graphics);
sprite2 = load_sprite(buffer);
if (sprite2) {
sprites.city.tile[style][city_styles[style].tiles_num+1] = sprite2;
} else {
freelog(LOG_NORMAL, "Warning: no occupied tile for graphic %s", graphics);
}
}
/**********************************************************************
Set city tiles sprite values; should only happen after
tilespec_load_tiles().
***********************************************************************/
void tilespec_setup_city_tiles(int style)
{
tilespec_setup_style_tile(style, city_styles[style].graphic);
if (city_styles[style].tiles_num == 0) {
/* no tiles found, try alternate */
freelog(LOG_NORMAL, "No tiles for %s style, trying alternate %s style",
city_styles[style].graphic, city_styles[style].graphic_alt);
tilespec_setup_style_tile(style, city_styles[style].graphic_alt);
}
if (city_styles[style].tiles_num == 0) {
/* no alternate, use default */
freelog(LOG_NORMAL,
"No tiles for alternate %s style, using default tiles",
city_styles[style].graphic_alt);
sprites.city.tile[style][0] = load_sprite("cd.city");
sprites.city.tile[style][1] = load_sprite("cd.city_wall");
sprites.city.tile[style][2] = load_sprite("cd.occupied");
city_styles[style].tiles_num = 1;
city_styles[style].tresh[0] = 0;
}
}
/**********************************************************************
alloc memory for city tiles sprites
***********************************************************************/
void tilespec_alloc_city_tiles(int count)
{
int i;
if (is_isometric)
sprites.city.tile_wall = (Sprite***)wc_calloc( count, sizeof(struct Sprite**) );
sprites.city.tile = (Sprite***)wc_calloc( count, sizeof(struct Sprite**) );
for (i=0; i<count; i++) {
if (is_isometric)
sprites.city.tile_wall[i] = (Sprite**)wc_calloc(MAX_CITY_TILES+2, sizeof(struct Sprite*));
sprites.city.tile[i] = (Sprite**)wc_calloc(MAX_CITY_TILES+2, sizeof(struct Sprite*));
}
}
/**********************************************************************
alloc memory for city tiles sprites
***********************************************************************/
void tilespec_free_city_tiles(int count)
{
int i;
for (i=0; i<count; i++) {
if (is_isometric) {
free(sprites.city.tile_wall[i]);
sprites.city.tile_wall[i] = NULL;
}
free(sprites.city.tile[i]);
sprites.city.tile[i] = NULL;
}
if (is_isometric) {
free(sprites.city.tile_wall);
sprites.city.tile_wall = NULL;
}
free(sprites.city.tile);
sprites.city.tile = NULL;
}
/**********************************************************************
Not sure which module to put this in...
It used to be that each nation had a color, when there was
fixed number of nations. Now base on player number instead,
since still limited to less than 14. Could possibly improve
to allow players to choose their preferred color etc.
A hack added to avoid returning more that COLOR_STD_RACE13.
But really there should be more colors available -- jk.
***********************************************************************/
enum color_std get_player_color(player_t *pplayer)
{
return pplayer ? player_color[pplayer->player_no] : COLOR_STD_LAST;
}
/**********************************************************************
...
***********************************************************************/
static void classic_player_colors_init(void)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(player_color); i++) {
player_color[i] = static_cast<color_std>(COLOR_STD_RACE0 + (i % PLAYER_COLORS_NUM));
}
}
/**********************************************************************
Return color for overview map tile in classic warciv style.
***********************************************************************/
static enum color_std classic_overview_tile_color(tile_t *ptile)
{
unit_t *punit;
city_t *pcity;
player_t *pplayer;
player_t *me = get_player_ptr();
if (!ptile || tile_get_known(ptile) == TILE_UNKNOWN) {
return COLOR_STD_BLACK;
}
if ((pcity = map_get_city(ptile))) {
pplayer = city_owner(pcity);
if (!pplayer || client_is_global_observer() || pplayer == me) {
return COLOR_STD_WHITE;
} else {
switch (pplayer_get_diplstate(pplayer, me)->type) {
case DIPLSTATE_NO_CONTACT:
if (game.info.diplomacy >= 2) {
return COLOR_STD_FORANGE;
}
case DIPLSTATE_NEUTRAL:
case DIPLSTATE_PEACE:
case DIPLSTATE_CEASEFIRE:
return COLOR_STD_CYAN;
case DIPLSTATE_ALLIANCE:
case DIPLSTATE_TEAM:
return COLOR_STD_FGREEN;
default: /* DS_WAR */
return COLOR_STD_FORANGE;
}
}
} else if ((punit=find_visible_unit(ptile))) {
pplayer = unit_owner(punit);
if (!pplayer || client_is_global_observer() || pplayer == me) {
return COLOR_STD_YELLOW;
} else {
switch (pplayer_get_diplstate(pplayer, me)->type) {
case DIPLSTATE_NO_CONTACT:
if (game.info.diplomacy >= 2) {
return COLOR_STD_RED;
}
case DIPLSTATE_NEUTRAL:
case DIPLSTATE_PEACE:
case DIPLSTATE_CEASEFIRE:
return COLOR_STD_ORANGE;
case DIPLSTATE_ALLIANCE:
case DIPLSTATE_TEAM:
return COLOR_STD_GREEN;
default: /* DS_WAR */
return COLOR_STD_RED;
}
}
}
if (is_ocean(ptile->terrain)) {
if (tile_get_known(ptile) == TILE_KNOWN_FOGGED && draw_fog_of_war) {
return COLOR_STD_RACE4;
} else {
return COLOR_STD_OCEAN;
}
} else {
if (tile_get_known(ptile) == TILE_KNOWN_FOGGED && draw_fog_of_war) {
return COLOR_STD_BACKGROUND;
} else {
return COLOR_STD_GROUND;
}
}
return COLOR_STD_LAST;
}
/**********************************************************************
As long as the team couldn't be changed while the game is running,
we can init those colors when the game starts.
***********************************************************************/
static void team_player_colors_init(void)
{
unsigned int i;
int n = 0;
/* Give team colors */
team_iterate(pteam) {
if (pteam->member_count > 0) {
players_iterate(pplayer) {
if (pplayer->team == pteam->id) {
player_color[pplayer->player_no] = static_cast<color_std>(
COLOR_STD_RACE0 + (n % PLAYER_COLORS_NUM));
}
} players_iterate_end;
n++;
}
} team_iterate_end;
/* Other colors */
for (i = 0; i < ARRAY_SIZE(player_color); i++) {
if (i >= game.info.nplayers || game.players[i].team == TEAM_NONE) {
player_color[i] = static_cast<color_std>(
COLOR_STD_RACE0 + (n % PLAYER_COLORS_NUM));
n++;
}
}
}
/**********************************************************************
Return color for overview map tile with better emphasis on teams.
***********************************************************************/
static enum color_std team_overview_tile_color(tile_t *ptile)
{
unit_t *punit;
city_t *pcity;
player_t *pplayer;
player_t *me;
bool is_fogged;
if (!ptile || tile_get_known(ptile) == TILE_UNKNOWN) {
return COLOR_STD_BLACK;
}
me = get_player_ptr();
/* Cities */
pcity = map_get_city(ptile);
if (pcity) {
pplayer = city_owner(pcity);
if (me) {
if ( players_on_same_team(pplayer, me) )
return COLOR_STD_GREEN;
else
return COLOR_STD_RED;
} else {
return get_player_color(pplayer);
}
}
/* Units */
punit = find_visible_unit(ptile);
if (punit) {
pplayer = unit_owner(punit);
if (me) {
if (players_on_same_team(pplayer, me) )
return COLOR_STD_FGREEN;
else
return COLOR_STD_FRED;
} else {
return get_player_color(pplayer);
}
}
/* Terrain */
is_fogged = (tile_get_known(ptile) == TILE_KNOWN_FOGGED && draw_fog_of_war);
if is_ocean(ptile->terrain) {
if (is_fogged)
return COLOR_STD_RACE4;
else
return COLOR_STD_OCEAN;
} else {
if (is_fogged)
return COLOR_STD_BACKGROUND;
else
return COLOR_STD_GROUND;
}
}
/**********************************************************************
...
***********************************************************************/
void player_colors_init(void)
{
switch (player_colors_mode) {
case PLAYER_COLORS_MODES_CLASSIC:
classic_player_colors_init();
return;
case PLAYER_COLORS_MODES_TEAM:
team_player_colors_init();
return;
case PLAYER_COLORS_MODES_NUM:
/* Not a valid case */
break;
}
/* Don't set as default case to be warned if we forgot to add a value */
freelog(LOG_ERROR, "Unkown player colors mode varriant (%d)",
player_colors_mode);
classic_player_colors_init();
}
/**********************************************************************
Return color for overview map tile.
***********************************************************************/
enum color_std overview_tile_color(tile_t *ptile)
{
switch (player_colors_mode) {
case PLAYER_COLORS_MODES_CLASSIC:
return classic_overview_tile_color(ptile);
case PLAYER_COLORS_MODES_TEAM:
return team_overview_tile_color(ptile);
case PLAYER_COLORS_MODES_NUM:
/* Not a valid case */
break;
}
/* Don't set as default case to be warned if we forgot to add a value */
freelog(LOG_ERROR, "Unkown player colors mode varriant (%d)",
player_colors_mode);
return classic_overview_tile_color(ptile);
}
/**************************************************************************
Return the string name (unstranslated) corresponding to the
player colors mode.
**************************************************************************/
const char *player_colors_mode_get_name(enum player_colors_modes mode)
{
switch (mode) {
case PLAYER_COLORS_MODES_CLASSIC:
return N_("Classic");
case PLAYER_COLORS_MODES_TEAM:
return N_("Team");
case PLAYER_COLORS_MODES_NUM:
/* Not a valid case */
break;
}
/* Don't set as default case to be warned if we forgot to add a value */
return NULL;
}
/**************************************************************************
Update all it should be updated.
**************************************************************************/
void player_colors_mode_changed(void)
{
/* Reset colors */
player_colors_init();
update_player_colors_mode_label();
refresh_overview_canvas();
update_map_canvas_visible(MAP_UPDATE_NORMAL); /* for borders color */
update_players_dialog(); /* for borders color */
}
/****************************************************************
...
*****************************************************************/
void player_colors_mode_option_callback(struct client_option *poption)
{
player_colors_mode_changed();
}
/**********************************************************************
Set focus_unit_hidden (q.v.) variable to given value.
***********************************************************************/
void set_focus_unit_hidden_state(bool hide)
{
focus_unit_hidden = hide;
}
/**********************************************************************
...
***********************************************************************/
unit_t *get_drawable_unit(tile_t *ptile, bool citymode)
{
unit_t *punit = find_visible_unit(ptile);
if (!punit) {
return NULL;
}
if (citymode && punit->owner == get_player_idx()) {
return NULL;
}
if ( ! is_unit_in_multi_select(0, punit)
|| !focus_unit_hidden )
{
return punit;
} else if (multi_select_blink_all) {
return NULL;
} else if ( punit->focus_status == FOCUS_DONE) {
return punit;
}
/* for unit in stack or city */
else if ( punit == get_unit_in_focus()
|| (multi_select_blink
&& unit_satisfies_filter(punit,
multi_select_inclusive_filter,
multi_select_exclusive_filter)))
{
return NULL;
} else {
return punit;
}
}
static void unload_all_sprites(void )
{
int i, entries = hash_num_entries(sprite_hash);
for (i = 0; i < entries; i++) {
const char *tag_name = static_cast<const char *>(
hash_key_by_number(sprite_hash, i));
struct small_sprite *ss = static_cast<struct small_sprite *>(
hash_lookup_data(sprite_hash, tag_name));
while (ss->ref_count > 0) {
unload_sprite(tag_name);
}
}
}
/**********************************************************************
...
***********************************************************************/
void tilespec_free_tiles(void)
{
int i, entries = hash_num_entries(sprite_hash);
freelog(LOG_DEBUG, "tilespec_free_tiles");
unload_all_sprites();
for (i = 0; i < entries; i++) {
const char *key = (const char *)hash_key_by_number(sprite_hash, 0);
hash_delete_entry(sprite_hash, key);
free((void *) key);
}
hash_free(sprite_hash);
sprite_hash = NULL;
small_sprite_list_iterate(small_sprites, ss) {
if (ss->file) {
free(ss->file);
}
assert(ss->sprite == NULL);
free(ss);
} small_sprite_list_iterate_end;
small_sprite_list_free(small_sprites);
specfile_list_iterate(specfiles, sf) {
free(sf->file_name);
if (sf->big_sprite) {
free_sprite(sf->big_sprite);
sf->big_sprite = NULL;
}
free(sf);
} specfile_list_iterate_end;
specfile_list_free(specfiles);
if (num_tiles_explode_unit > 0) {
free(sprites.explode.unit);
}
}
/**************************************************************************
Return a sprite for the given citizen. The citizen's type is given,
as well as their index (in the range [0..pcity->common.size)). The
citizen's city can be used to determine which sprite to use (a NULL
value indicates there is no city; i.e., the sprite is just being
used as a picture).
**************************************************************************/
struct Sprite *get_citizen_sprite(struct citizen_type type,
int citizen_index,
const city_t *pcity)
{
struct named_sprites::ns_citizen_graphic *graphic;
if (type.type == CITIZEN_SPECIALIST) {
graphic = &sprites.specialist[type.spec_type];
} else {
graphic = &sprites.citizen[type.type];
}
return graphic->sprite[citizen_index % graphic->count];
}
/**************************************************************************
Loads the sprite. If the sprite is already loaded a reference
counter is increased. Can return NULL if the sprite couldn't be
loaded.
**************************************************************************/
struct Sprite *load_sprite(const char *tag_name)
{
/* Lookup information about where the sprite is found. */
struct small_sprite *ss = static_cast<struct small_sprite *>(
hash_lookup_data(sprite_hash, tag_name));
freelog(LOG_DEBUG, "load_sprite(tag='%s')", tag_name);
if (!ss) {
return NULL;
}
assert(ss->ref_count >= 0);
if (!ss->sprite) {
/* If the sprite hasn't been loaded already, then load it. */
assert(ss->ref_count == 0);
if (ss->file) {
ss->sprite = load_gfx_file(ss->file);
if (!ss->sprite) {
freelog(LOG_FATAL, _("Couldn't load gfx file %s for sprite %s"),
ss->file, tag_name);
exit(EXIT_FAILURE);
}
} else {
int specf_w, specf_h;
ensure_big_sprite(ss->specfile_);
get_sprite_dimensions(ss->specfile_->big_sprite, &specf_w, &specf_h);
if (ss->x < 0 || ss->x + ss->width > specf_w
|| ss->y < 0 || ss->y + ss->height > specf_h) {
freelog(LOG_ERROR,
"Sprite '%s' in file '%s' isn't within the image!",
tag_name, ss->specfile_->file_name);
return NULL;
}
ss->sprite =
crop_sprite(ss->specfile_->big_sprite, ss->x, ss->y, ss->width, ss->height,
NULL, -1, -1);
}
}
/* Track the reference count so we know when to free the sprite. */
ss->ref_count++;
return ss->sprite;
}
/**************************************************************************
Unloads the sprite. Decrease the reference counter. If the last
reference is removed the sprite is freed.
**************************************************************************/
void unload_sprite(const char *tag_name)
{
struct small_sprite *ss = static_cast<struct small_sprite *>(
hash_lookup_data(sprite_hash, tag_name));
assert(ss);
assert(ss->ref_count >= 1);
assert(ss->sprite);
ss->ref_count--;
if (ss->ref_count == 0) {
/* Nobody's using the sprite anymore, so we should free it. We know
* where to find it if we need it again. */
freelog(LOG_DEBUG, "freeing sprite '%s'", tag_name);
free_sprite(ss->sprite);
ss->sprite = NULL;
}
}
/**************************************************************************
Return TRUE iff the specified sprite exists in the tileset (whether
or not it is currently loaded).
**************************************************************************/
bool sprite_exists(const char *tag_name)
{
/* Lookup information about where the sprite is found. */
struct small_sprite *ss = static_cast<struct small_sprite *>(
hash_lookup_data(sprite_hash, tag_name));
return (ss != NULL);
}
/**************************************************************************
Frees any internal buffers which are created by load_sprite. Should
be called after the last (for a given period of time) load_sprite
call. This saves a fair amount of memory, but it will take extra time
the next time we start loading sprites again.
**************************************************************************/
void finish_loading_sprites(void)
{
specfile_list_iterate(specfiles, sf) {
if (sf->big_sprite) {
free_sprite(sf->big_sprite);
sf->big_sprite = NULL;
}
} specfile_list_iterate_end;
}
| seggil/warciv | client/tilespec.cc | C++ | gpl-2.0 | 118,508 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.