code
stringlengths 3
1.01M
| repo_name
stringlengths 5
116
| path
stringlengths 3
311
| language
stringclasses 30
values | license
stringclasses 15
values | size
int64 3
1.01M
|
|---|---|---|---|---|---|
html {
-webkit-font-smoothing: antialiased;
}
/* This is the default Tooltipster theme (feel free to modify or duplicate and create multiple themes!): */
.tooltipster-default {
border-radius: 0;
max-width:250px !important;
border: 1px solid #cfcfcf;
background: #fff;
border-radius:3px;
color: #8e8e8e;
-webkit-box-shadow: 2px 2px 8px rgba(0,0,0,0.5);
-moz-box-shadow: 2px 2px 8px rgba(0,0,0,0.5);
box-shadow: 2px 2px 8px rgba(0,0,0,0.5);
}
.tooltipster-arrow-border{border-color:#cfcfcf !important;}
/* Use this next selector to style things like font-size and line-height: */
.tooltipster-default .tooltipster-content {
font-family: Arial, sans-serif;
font-size: 13px;
line-height: 16px;
padding: 12px 19px 20px 19px;
overflow: hidden;
}
/* This next selector defines the color of the border on the outside of the arrow. This will automatically match the color and size of the border set on the main tooltip styles. Set display: none; if you would like a border around the tooltip but no border around the arrow */
.tooltipster-default .tooltipster-arrow .tooltipster-arrow-border {
/* border-color: ... !important; */
}
/* If you're using the icon option, use this next selector to style them */
.tooltipster-icon {
cursor: help;
margin-left: 4px;
}
/* This is the base styling required to make all Tooltipsters work */
.tooltipster-base {
padding: 0;
font-size: 0;
line-height: 0;
position: absolute;
z-index: 9999999;
pointer-events: none;
width: auto;
overflow: visible;
}
.tooltipster-base .tooltipster-content {
overflow: hidden;
}
/* These next classes handle the styles for the little arrow attached to the tooltip. By default, the arrow will inherit the same colors and border as what is set on the main tooltip itself. */
.tooltipster-arrow {
display: block;
text-align: center;
width: 100%;
height: 100%;
position: absolute;
top: 0;
left: 0;
z-index: -1;
}
.tooltipster-arrow span, .tooltipster-arrow-border {
display: block;
width: 0;
height: 0;
position: absolute;
}
.tooltipster-arrow-top span, .tooltipster-arrow-top-right span, .tooltipster-arrow-top-left span {
border-left: 8px solid transparent !important;
border-right: 8px solid transparent !important;
border-top: 8px solid;
bottom: -7px;
}
.tooltipster-arrow-top .tooltipster-arrow-border, .tooltipster-arrow-top-right .tooltipster-arrow-border, .tooltipster-arrow-top-left .tooltipster-arrow-border {
border-left: 9px solid transparent !important;
border-right: 9px solid transparent !important;
border-top: 9px solid;
bottom: -7px;
}
.tooltipster-arrow-bottom span, .tooltipster-arrow-bottom-right span, .tooltipster-arrow-bottom-left span {
border-left: 8px solid transparent !important;
border-right: 8px solid transparent !important;
border-bottom: 8px solid;
top: -7px;
}
.tooltipster-arrow-bottom .tooltipster-arrow-border, .tooltipster-arrow-bottom-right .tooltipster-arrow-border, .tooltipster-arrow-bottom-left .tooltipster-arrow-border {
border-left: 9px solid transparent !important;
border-right: 9px solid transparent !important;
border-bottom: 9px solid;
top: -7px;
}
.tooltipster-arrow-top span, .tooltipster-arrow-top .tooltipster-arrow-border, .tooltipster-arrow-bottom span, .tooltipster-arrow-bottom .tooltipster-arrow-border {
left: 0;
right: 0;
margin: 0 auto;
}
.tooltipster-arrow-top-left span, .tooltipster-arrow-bottom-left span {
left: 6px;
}
.tooltipster-arrow-top-left .tooltipster-arrow-border, .tooltipster-arrow-bottom-left .tooltipster-arrow-border {
left: 5px;
}
.tooltipster-arrow-top-right span, .tooltipster-arrow-bottom-right span {
right: 6px;
}
.tooltipster-arrow-top-right .tooltipster-arrow-border, .tooltipster-arrow-bottom-right .tooltipster-arrow-border {
right: 5px;
}
.tooltipster-arrow-left span, .tooltipster-arrow-left .tooltipster-arrow-border {
border-top: 8px solid transparent !important;
border-bottom: 8px solid transparent !important;
border-left: 8px solid;
top: 50%;
margin-top: -7px;
right: -7px;
}
.tooltipster-arrow-left .tooltipster-arrow-border {
border-top: 9px solid transparent !important;
border-bottom: 9px solid transparent !important;
border-left: 9px solid;
margin-top: -8px;
}
.tooltipster-arrow-right span, .tooltipster-arrow-right .tooltipster-arrow-border {
border-top: 8px solid transparent !important;
border-bottom: 8px solid transparent !important;
border-right: 8px solid;
top: 50%;
margin-top: -7px;
left: -7px;
}
.tooltipster-arrow-right .tooltipster-arrow-border {
border-top: 9px solid transparent !important;
border-bottom: 9px solid transparent !important;
border-right: 9px solid;
margin-top: -8px;
}
/* Some CSS magic for the awesome animations - feel free to make your own custom animations and reference it in your Tooltipster settings! */
.tooltipster-fade {
opacity: 0;
-webkit-transition-property: opacity;
-moz-transition-property: opacity;
-o-transition-property: opacity;
-ms-transition-property: opacity;
transition-property: opacity;
}
.tooltipster-fade-show {
opacity: 1;
}
.tooltipster-grow {
-webkit-transform: scale(0,0);
-moz-transform: scale(0,0);
-o-transform: scale(0,0);
-ms-transform: scale(0,0);
transform: scale(0,0);
-webkit-transition-property: -webkit-transform;
-moz-transition-property: -moz-transform;
-o-transition-property: -o-transform;
-ms-transition-property: -ms-transform;
transition-property: transform;
-webkit-backface-visibility: hidden;
}
.tooltipster-grow-show {
-webkit-transform: scale(1,1);
-moz-transform: scale(1,1);
-o-transform: scale(1,1);
-ms-transform: scale(1,1);
transform: scale(1,1);
-webkit-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1);
-webkit-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-moz-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-ms-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-o-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
}
.tooltipster-swing {
opacity: 0;
-webkit-transform: rotateZ(4deg);
-moz-transform: rotateZ(4deg);
-o-transform: rotateZ(4deg);
-ms-transform: rotateZ(4deg);
transform: rotateZ(4deg);
-webkit-transition-property: -webkit-transform, opacity;
-moz-transition-property: -moz-transform;
-o-transition-property: -o-transform;
-ms-transition-property: -ms-transform;
transition-property: transform;
}
.tooltipster-swing-show {
opacity: 1;
-webkit-transform: rotateZ(0deg);
-moz-transform: rotateZ(0deg);
-o-transform: rotateZ(0deg);
-ms-transform: rotateZ(0deg);
transform: rotateZ(0deg);
-webkit-transition-timing-function: cubic-bezier(0.230, 0.635, 0.495, 1);
-webkit-transition-timing-function: cubic-bezier(0.230, 0.635, 0.495, 2.4);
-moz-transition-timing-function: cubic-bezier(0.230, 0.635, 0.495, 2.4);
-ms-transition-timing-function: cubic-bezier(0.230, 0.635, 0.495, 2.4);
-o-transition-timing-function: cubic-bezier(0.230, 0.635, 0.495, 2.4);
transition-timing-function: cubic-bezier(0.230, 0.635, 0.495, 2.4);
}
.tooltipster-fall {
top: 0;
-webkit-transition-property: top;
-moz-transition-property: top;
-o-transition-property: top;
-ms-transition-property: top;
transition-property: top;
-webkit-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1);
-webkit-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-moz-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-ms-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-o-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
}
.tooltipster-fall-show {
}
.tooltipster-fall.tooltipster-dying {
-webkit-transition-property: all;
-moz-transition-property: all;
-o-transition-property: all;
-ms-transition-property: all;
transition-property: all;
top: 0px !important;
opacity: 0;
}
.tooltipster-slide {
left: -40px;
-webkit-transition-property: left;
-moz-transition-property: left;
-o-transition-property: left;
-ms-transition-property: left;
transition-property: left;
-webkit-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1);
-webkit-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-moz-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-ms-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
-o-transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
transition-timing-function: cubic-bezier(0.175, 0.885, 0.320, 1.15);
}
.tooltipster-slide.tooltipster-slide-show {
}
.tooltipster-slide.tooltipster-dying {
-webkit-transition-property: all;
-moz-transition-property: all;
-o-transition-property: all;
-ms-transition-property: all;
transition-property: all;
left: 0px !important;
opacity: 0;
}
/* CSS transition for when contenting is changing in a tooltip that is still open. The only properties that will NOT transition are: width, height, top, and left */
.tooltipster-content-changing {
opacity: 0.5;
-webkit-transform: scale(1.1, 1.1);
-moz-transform: scale(1.1, 1.1);
-o-transform: scale(1.1, 1.1);
-ms-transform: scale(1.1, 1.1);
transform: scale(1.1, 1.1);
}
|
davidHuanghw/david_blog
|
wp-content/themes/truemag/css/tooltipster.css
|
CSS
|
gpl-2.0
| 9,332
|
#pragma once
#include <windows.h>
#include <vector>
#include "KeyboardEvent.h"
#include "log.h"
#include <thread>
class KeyboardHelper
{
public:
KeyboardHelper()
{
}
~KeyboardHelper()
{
}
void PressAndReleaseKey(const KeyboardEvent& ke)
{
if (ke._bRShift)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RSHIFT;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bLShift)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LSHIFT;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bRCtrl)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RCONTROL;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bLCtrl)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LCONTROL;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bRWin)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RWIN;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bLWin)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LWIN;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bRAlt)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RMENU;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._bLAlt)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LMENU;
inp.ki.wScan = 0;
inp.ki.dwFlags = 0;
_vinp.push_back(inp);
}
if (ke._vkCode)
{
INPUT inpDown;
inpDown.ki.dwFlags = 0;
inpDown.type = INPUT_KEYBOARD;
inpDown.ki.wVk = static_cast<WORD>(ke._vkCode);
inpDown.ki.wScan = 0;
inpDown.ki.dwFlags = 0;
_vinp.push_back(inpDown);
//////////////////////////////////////////////////////////////////////////
INPUT inpUp;
inpUp.ki.dwFlags = 0;
inpUp.type = INPUT_KEYBOARD;
inpUp.ki.wVk = static_cast<WORD>(ke._vkCode);
inpUp.ki.wScan = 0;
inpUp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inpUp);
}
if (ke._bRShift)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RSHIFT;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLShift)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LSHIFT;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bRCtrl)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RCONTROL;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLCtrl)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LCONTROL;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bRWin)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RWIN;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLWin)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LWIN;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bRAlt)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RMENU;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLAlt)
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LMENU;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (_vinp.empty() == false) SendInput(_vinp.size(), _vinp.data(), sizeof(INPUT));
_vinp.clear();
}
void UnpressKey(KeyboardEvent& ke)
{
if (ke._vkCode != VK_RSHIFT &&
ke._vkCode != VK_LSHIFT &&
ke._vkCode != VK_LCONTROL &&
ke._vkCode != VK_RCONTROL &&
ke._vkCode != VK_LMENU &&
ke._vkCode != VK_RMENU &&
ke._vkCode != VK_LWIN &&
ke._vkCode != VK_RWIN &&
ke._vkCode != 0)
{
INPUT inpUp;
inpUp.ki.dwFlags = 0;
inpUp.type = INPUT_KEYBOARD;
inpUp.ki.wVk = static_cast<WORD>(ke._vkCode);
inpUp.ki.wScan = 0;
inpUp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inpUp);
}
if (ke._bRShift && KeyPressed(VK_RSHIFT))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RSHIFT;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLShift && KeyPressed(VK_LSHIFT))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LSHIFT;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bRCtrl && KeyPressed(VK_RCONTROL))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RCONTROL;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLCtrl && KeyPressed(VK_LCONTROL))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LCONTROL;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bRWin && KeyPressed(VK_RWIN))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RWIN;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLWin && KeyPressed(VK_LWIN))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LWIN;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bRAlt && KeyPressed(VK_RMENU))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_RMENU;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (ke._bLAlt && KeyPressed(VK_LMENU))
{
INPUT inp;
inp.ki.dwFlags = 0;
inp.type = INPUT_KEYBOARD;
inp.ki.wVk = VK_LMENU;
inp.ki.wScan = 0;
inp.ki.dwFlags = KEYEVENTF_KEYUP;
_vinp.push_back(inp);
}
if (_vinp.empty() == false)
{
if (SendInput(_vinp.size(), _vinp.data(), sizeof(INPUT)) == 0)
LOG("releasing key 0x" << std::hex << ke._vkCode << " failed with " << GetLastError());
_vinp.clear();
LOG("releasing key 0x" << std::hex << ke._vkCode);
return;
}
LOG("empty keycode in ReleaseKey");
}
void FlashScrollLockLed(bool bEnableLed)
{
bool ScrollLock = KeyToggled(VK_SCROLL);
if (bEnableLed && !ScrollLock)
{
LOG("scroll on");
PressAndReleaseKey(KeyboardEvent(VK_SCROLL));
}
if (!bEnableLed && ScrollLock)
{
LOG("scroll off");
PressAndReleaseKey(KeyboardEvent(VK_SCROLL));
}
}
bool KeyPressed(DWORD vkCode)
{
return ((GetAsyncKeyState(vkCode) & 0x8000) != 0);
}
bool KeyToggled(DWORD vkCode)
{
return ((GetKeyState(vkCode) & 0x0001));
}
KeyboardEvent FillKeybardEvent(DWORD vkCode)
{
KeyboardEvent ke;
ke._vkCode = vkCode;
ke._bRAlt = KeyPressed(VK_RMENU);
ke._bLAlt = KeyPressed(VK_LMENU);
ke._bRCtrl = KeyPressed(VK_RCONTROL);
ke._bLCtrl = KeyPressed(VK_LCONTROL);
ke._bRShift = KeyPressed(VK_RSHIFT);
ke._bLShift = KeyPressed(VK_LSHIFT);
ke._bRWin = KeyPressed(VK_RWIN);
ke._bLWin = KeyPressed(VK_LWIN);
if (vkCode == VK_RMENU)
{
ke._bRAlt = true;
ke._vkCode = 0;
}
if (vkCode == VK_LMENU)
{
ke._bLAlt = true;
ke._vkCode = 0;
}
if (vkCode == VK_RCONTROL)
{
ke._bRCtrl = true;
ke._vkCode = 0;
}
if (vkCode == VK_LCONTROL)
{
ke._bLCtrl = true;
ke._vkCode = 0;
}
if (vkCode == VK_RSHIFT)
{
ke._bRShift = true;
ke._vkCode = 0;
}
if (vkCode == VK_LSHIFT)
{
ke._bLShift = true;
ke._vkCode = 0;
}
if (vkCode == VK_RWIN)
{
ke._bRWin = true;
ke._vkCode = 0;
}
if (vkCode == VK_LWIN)
{
ke._bLWin = true;
ke._vkCode = 0;
}
return ke;
}
private:
std::vector<INPUT> _vinp;
};
|
nextfullstorm/uswitcher
|
KeyboardHelper.h
|
C
|
gpl-2.0
| 8,262
|
<?php
/*
Plugin Name: WPBakery Visual Composer (shared on http://urokwp.ru)
Plugin URI: http://vc.wpbakery.com
Description: Drag and drop page builder for WordPress. Take full control over your WordPress site, build any layout you can imagine – no programming knowledge required.
Version: 4.7
Author: Michael M - WPBakery.com
Author URI: http://wpbakery.com
*/
// don't load directly
if ( ! defined( 'ABSPATH' ) ) {
die( '-1' );
}
/**
* Current visual composer version
*/
if ( ! defined( 'WPB_VC_VERSION' ) ) {
/**
*
*/
define( 'WPB_VC_VERSION', '4.7' );
}
/**
* Vc starts here. Manager sets mode, adds required wp hooks and loads required object of structure
*
* Manager controls and access to all modules and classes of VC.
*
* @package WPBakeryVisualComposer
* @since 4.2
*/
class Vc_Manager {
/**
* Set status/mode for VC.
*
* It depends on what functionality is required from vc to work with current page/part of WP.
*
* Possible values:
* none - current status is unknown, default mode;
* page - simple wp page;
* admin_page - wp dashboard;
* admin_frontend_editor - visual composer front end editor version;
* admin_settings_page - settings page
* page_editable - inline version for iframe in front end editor;
*
* @since 4.2
* @var string
*/
private $mode = 'none';
/**
* Enables Visual Composer to act as the theme plugin.
*
* @since 4.2
* @var bool
*/
private $is_as_theme = false;
/**
* Vc is network plugin or not.
* @since 4.2
* @var bool
*/
private $is_network_plugin = null;
/**
* List of paths.
*
* @since 4.2
* @var array
*/
private $paths = array();
/**
* Default post types where to activate visual composer meta box settings
* @since 4.2
* @var array
*/
private $editor_default_post_types = array( 'page' ); // TODO: move to Vc settings
/**
* Directory name in theme folder where composer should search for alternative templates of the shortcode.
* @since 4.2
* @var string
*/
private $custom_user_templates_dir = false;
/**
* Set updater mode
* @since 4.2
* @var bool
*/
private $disable_updater = false;
/**
* Modules and objects instances list
* @since 4.2
* @var array
*/
private $factory = array();
/**
* File name for components manifest file.
*
* @since 4.4
* @var string
*/
private $components_manifest = 'components.json';
/**
* @var string
*/
private $plugin_name = 'js_composer/js_composer.php';
/**
* Core singleton class
* @var self - pattern realization
*/
private static $_instance;
/**
* Constructor loads API functions, defines paths and adds required wp actions
*
* @since 4.2
*/
private function __construct() {
$dir = dirname( __FILE__ );
/**
* Define path settings for visual composer.
*
* APP_ROOT - plugin directory.
* WP_ROOT - WP application root directory.
* APP_DIR - plugin directory name.
* CONFIG_DIR - configuration directory.
* ASSETS_DIR - asset directory full path.
* ASSETS_DIR_NAME - directory name for assets. Used from urls creating.
* CORE_DIR - classes directory for core vc files.
* HELPERS_DIR - directory with helpers functions files.
* SHORTCODES_DIR - shortcodes classes.
* SETTINGS_DIR - main dashboard settings classes.
* TEMPLATES_DIR - directory where all html templates are hold.
* EDITORS_DIR - editors for the post contents
* PARAMS_DIR - complex params for shortcodes editor form.
* UPDATERS_DIR - automatic notifications and updating classes.
*/
$this->setPaths( array(
'APP_ROOT' => $dir,
'WP_ROOT' => preg_replace( '/$\//', '', ABSPATH ),
'APP_DIR' => basename( $dir ),
'CONFIG_DIR' => $dir . '/config',
'ASSETS_DIR' => $dir . '/assets',
'ASSETS_DIR_NAME' => 'assets',
'AUTOLOAD_DIR' => $dir . '/include/autoload',
'CORE_DIR' => $dir . '/include/classes/core',
'HELPERS_DIR' => $dir . '/include/helpers',
'SHORTCODES_DIR' => $dir . '/include/classes/shortcodes',
'SETTINGS_DIR' => $dir . '/include/classes/settings',
'TEMPLATES_DIR' => $dir . '/include/templates',
'EDITORS_DIR' => $dir . '/include/classes/editors',
'PARAMS_DIR' => $dir . '/include/params',
'UPDATERS_DIR' => $dir . '/include/classes/updaters',
'VENDORS_DIR' => $dir . '/include/classes/vendors',
) );
// Load API
require_once $this->path( 'HELPERS_DIR', 'helpers_factory.php' );
require_once $this->path( 'HELPERS_DIR', 'helpers.php' );
require_once $this->path( 'CORE_DIR', 'interfaces.php' );
require_once $this->path( 'CORE_DIR', 'class-vc-sort.php' ); // used by wpb-map
require_once $this->path( 'CORE_DIR', 'class-wpb-map.php' );
require_once $this->path( 'HELPERS_DIR', 'helpers_api.php' );
require_once $this->path( 'HELPERS_DIR', 'filters.php' );
require_once $this->path( 'PARAMS_DIR', 'params.php' );
require_once $this->path( 'AUTOLOAD_DIR', 'vc-shortcode-autoloader.php' );
require_once $this->path( 'SHORTCODES_DIR', 'shortcodes.php' );
// Add hooks
add_action( 'plugins_loaded', array( &$this, 'pluginsLoaded' ), 9 );
add_action( 'init', array( &$this, 'init' ), 9 );
register_activation_hook( __FILE__, array( $this, 'activationHook' ) );
}
/**
* Get the instane of VC_Manager
*
* @return self
*/
public static function getInstance() {
if ( ! ( self::$_instance instanceof self ) ) {
self::$_instance = new self();
}
return self::$_instance;
}
/**
* Cloning disabled
*/
private function __clone() {
}
/**
* Serialization disabled
*/
private function __sleep() {
}
/**
* De-serialization disabled
*/
private function __wakeup() {
}
/**
* Callback function WP plugin_loaded action hook. Loads locale
*
* @since 4.2
* @access public
*/
public function pluginsLoaded() {
// Setup locale
do_action( 'vc_plugins_loaded' );
load_plugin_textdomain( 'js_composer', false, $this->path( 'APP_DIR', 'locale' ) );
}
/**
* Callback function for WP init action hook. Sets Vc mode and loads required objects.
*
* @since 4.2
* @access public
*
* @return void
*/
public function init() {
do_action( 'vc_before_init' );
$this->setMode();
do_action( 'vc_after_set_mode' );
/**
* Set version of VC if required.
*/
$this->setVersion();
// Load required
! vc_is_updater_disabled() && vc_updater()->init();
/**
* Init default hooks and options to load.
*/
$this->vc()->init();
/**
* if is admin and not front end editor.
*/
is_admin() && ! vc_is_frontend_editor() && $this->asAdmin();
/**
* if frontend editor is enabled init editor.
*/
vc_enabled_frontend() && vc_frontend_editor()->init();
// Load Automapper
vc_automapper()->addAjaxActions();
do_action( 'vc_before_mapping' ); // VC ACTION
// Include default shortcodes.
$this->mapper()->init(); //execute all required
do_action( 'vc_after_mapping' ); // VC ACTION
// Load && Map shortcodes from Automapper.
vc_automapper()->map();
do_action( 'vc_after_init' );
}
/**
* Enables to add hooks in activation process.
* @since 4.5
*/
public function activationHook() {
do_action( 'vc_activation_hook' );
}
/**
* Load required components to enable useful functionality.
*
* @access public
* @since 4.4
*/
public function loadComponents() {
$manifest_file = apply_filters(
'vc_autoload_components_manifest_file',
vc_path_dir( 'AUTOLOAD_DIR', $this->components_manifest )
);
if ( is_file( $manifest_file ) ) {
ob_start();
require_once $manifest_file;
$data = ob_get_clean();
if ( $data ) {
$components = (array) json_decode( $data );
$components = apply_filters(
'vc_autoload_components_list',
$components
);
foreach ( $components as $component => $description ) {
$component_path = vc_path_dir( 'AUTOLOAD_DIR', $component );
if ( strpos( $component_path, '*' ) === false && is_file( $component_path ) ) {
require $component_path;
} else {
$components_paths = glob( $component_path );
if ( is_array( $components_paths ) && ! empty( $components_paths ) ) {
foreach ( $components_paths as $path ) {
if ( strpos( $path, '*' ) === false && is_file( $path ) ) {
require $path;
}
}
}
}
}
}
}
}
/**
* Load required logic for operating in Wp Admin dashboard.
*
* @since 4.2
* @access protected
*
* @return void
*/
protected function asAdmin() {
// License management and activation/deactivation methods.
vc_license()->addAjaxHooks();
// Settings page. Adds menu page in admin panel.
// vc_settings()->addMenuPageHooks();
// Load backend editor hooks
vc_backend_editor()->addHooksSettings();
// If auto updater is enabled initialize updating notifications service.
}
/**
* Set VC mode.
*
* Mode depends on which page is requested by client from server and request parameters like vc_action.
*
* @since 4.2
* @access protected
*
* @return void
*/
protected function setMode() {
/**
* TODO: Create another system (When ajax rebuild).
* Use vc_action param to define mode.
* 1. admin_frontend_editor - set by editor or request param
* 2. admin_backend_editor - set by editor or request param
* 3. admin_frontend_editor_ajax - set by request param
* 4. admin_backend_editor_ajax - set by request param
* 5. admin_updater - by vc_action
* 6. page_editable - by vc_action
*/
if ( is_admin() ) {
if ( vc_action() === 'vc_inline' ) {
$this->mode = 'admin_frontend_editor';
} elseif ( vc_action() === 'vc_upgrade' || ( vc_get_param( 'action' ) === 'update-selected' && vc_get_param( 'plugins' ) === $this->pluginName() ) ) {
$this->mode = 'admin_updater';
} elseif ( isset( $_GET['page'] ) && $_GET['page'] === $this->settings()->page() ) {
$this->mode = 'admin_settings_page';
} else {
$this->mode = 'admin_page';
}
} else {
if ( isset( $_GET['vc_editable'] ) && 'true' === $_GET['vc_editable'] ) {
$this->mode = 'page_editable';
} else {
$this->mode = 'page';
}
}
}
/**
* Sets version of the VC in DB as option `vc_version`
*
* @since 4.3.2
* @access protected
*
* @return void
*/
protected function setVersion() {
$version = get_option( 'vc_version' );
if ( ! is_string( $version ) || version_compare( $version, WPB_VC_VERSION ) !== 0 ) {
add_action( 'vc_after_init', array( vc_settings(), 'rebuild' ) );
update_option( 'vc_version', WPB_VC_VERSION );
}
}
/**
* Get current mode for VC.
*
* @since 4.2
* @access public
*
* @return string
*/
public function mode() {
return $this->mode;
}
/**
* Setter for paths
*
* @since 4.2
* @access protected
*
* @param $paths
*/
protected function setPaths( $paths ) {
$this->paths = $paths;
}
/**
* Gets absolute path for file/directory in filesystem.
*
* @since 4.2
* @access public
*
* @param $name - name of path dir
* @param string $file - file name or directory inside path
*
* @return string
*/
public function path( $name, $file = '' ) {
$path = $this->paths[ $name ] . ( strlen( $file ) > 0 ? '/' . preg_replace( '/^\//', '', $file ) : '' );
return apply_filters( 'vc_path_filter', $path );
}
/**
* Set default post types. Vc editors are enabled for such kind of posts.
*
* @param array $type - list of default post types.
*/
public function setEditorDefaultPostTypes( array $type ) {
$this->editor_default_post_types = $type;
}
/**
* Returns list of default post types where user can use visual composer editors.
*
* @since 4.2
* @access public
*
* @return array
*/
public function editorDefaultPostTypes() {
return $this->editor_default_post_types;
}
/**
* Get post types where VC editors are enabled.
*
* @since 4.2
* @access public
*
* @return array
*/
public function editorPostTypes() {
if ( ! isset( $this->editor_post_types ) ) {
$pt_array = vc_settings()->get( 'content_types' );
$this->editor_post_types = $pt_array ? $pt_array : $this->editorDefaultPostTypes();
}
return $this->editor_post_types;
}
/**
* Set post types where VC editors are enabled.
*
* @since 4.4
* @access public
*
* @param array $post_types
*/
public function setEditorPostTypes( array $post_types ) {
$this->editor_post_types = ! empty( $post_types ) ? $post_types : $this->editorDefaultPostTypes();
vc_settings()->set( 'content_types', $this->editor_post_types );
}
/**
* Setter for as-theme-plugin status for VC.
*
* @since 4.2
* @access public
*
* @param bool $value
*/
public function setIsAsTheme( $value = true ) {
$this->is_as_theme = (boolean) $value;
}
/**
* Get as-theme-plugin status
*
* As theme plugin status used by theme developers. It disables settings
*
* @since 4.2
* @access public
*
* @return bool
*/
public function isAsTheme() {
return (boolean) $this->is_as_theme;
}
/**
* Setter for as network plugin for MultiWP.
*
* @since 4.2
* @access public
*
* @param bool $value
*/
public function setAsNetworkPlugin( $value = true ) {
$this->is_network_plugin = $value;
}
/**
* Gets VC is activated as network plugin.
*
* @since 4.2
* @access public
*
* @return bool
*/
public function isNetworkPlugin() {
if ( is_null( $this->is_network_plugin ) ) {
// Check is VC as network plugin
if ( is_multisite() && ( is_plugin_active_for_network( 'js_composer/js_composer.php' )
|| is_network_only_plugin( 'js_composer/js_composer.php' ) )
) {
$this->setAsNetworkPlugin( true );
}
}
return $this->is_network_plugin ? true : false;
}
/**
* Setter for disable updater variable.
* @since 4.2
* @see
*
* @param bool $value
*/
public function disableUpdater( $value = true ) {
$this->disable_updater = $value;
}
/**
* Get is vc updater is disabled;
*
* @since 4.2
* @see to where updater will be
*
* @return bool
*/
public function isUpdaterDisabled() {
return $this->disable_updater;
}
/**
* Set user directory name.
*
* Directory name is the directory name vc should scan for custom shortcodes template.
*
* @since 4.2
* @access public
*
* @param $dir - path to shortcodes templates inside developers theme
*/
public function setCustomUserShortcodesTemplateDir( $dir ) {
preg_replace( '/\/$/', '', $dir );
$this->custom_user_templates_dir = $dir;
}
/**
* Get default directory where shortcodes templates area placed.
*
* @since 4.2
* @access public
*
* @return string - path to default shortcodes
*/
public function getDefaultShortcodesTemplatesDir() {
return vc_path_dir( 'TEMPLATES_DIR', 'shortcodes' );
}
/**
*
* Get shortcodes template dir.
*
* @since 4.2
* @access public
*
* @ruturn string
*
* @param $template
*
* @return string
*/
public function getShortcodesTemplateDir( $template ) {
return $this->custom_user_templates_dir !== false ? $this->custom_user_templates_dir . '/' . $template : locate_template( 'vc_templates/' . $template );
}
/**
* Directory name where template files will be stored.
*
* @since 4.2
* @access public
*
* @return string
*/
public function uploadDir() {
return 'js_composer';
}
/**
* Getter for VC_Mapper instance
*
* @since 4.2
* @access public
*
* @return Vc_Mapper
*/
public function mapper() {
if ( ! isset( $this->factory['mapper'] ) ) {
require_once $this->path( 'CORE_DIR', 'class-vc-mapper.php' );
$this->factory['mapper'] = new Vc_Mapper();
}
return $this->factory['mapper'];
}
/**
* Visual Composer.
*
* @since 4.2
* @access public
*
* @return Vc_Base
*/
public function vc() {
if ( ! isset( $this->factory['vc'] ) ) {
do_action( 'vc_before_init_vc' );
require_once $this->path( 'CORE_DIR', 'class-vc-base.php' );
$vc = new Vc_Base();
// DI Set template editor. @deprecated and will be removed
require_once $this->path( 'EDITORS_DIR', 'popups/class-vc-templates-editor.php' );
$vc->setTemplatesEditor( new Vc_Templates_Editor() );
// DI Set template new modal editor.
require_once $this->path( 'EDITORS_DIR', 'popups/class-vc-templates-panel-editor.php' );
$vc->setTemplatesPanelEditor( new Vc_Templates_Panel_Editor() );
// DI Set edit form
require_once $this->path( 'EDITORS_DIR', 'popups/class-vc-shortcode-edit-form.php' );
$vc->setEditForm( new Vc_Shortcode_Edit_Form() );
// DI for third-party plugins manager.
require_once $this->path( 'VENDORS_DIR', 'class-vc-vendors-manager.php' );
$vc->setVendorsManager( new Vc_Vendors_Manager() );
$this->factory['vc'] = $vc;
do_action( 'vc_after_init_vc' );
}
return $this->factory['vc'];
}
/**
* Vc options.
*
* @since 4.2
* @access public
*
* @return Vc_Settings
*/
public function settings() {
if ( ! isset( $this->factory['settings'] ) ) {
do_action( 'vc_before_init_settings' );
require_once $this->path( 'SETTINGS_DIR', 'class-vc-settings.php' );
$this->factory['settings'] = new Vc_Settings();
do_action( 'vc_after_init_settings' );
}
return $this->factory['settings'];
}
/**
* Vc license settings.
*
* @since 4.2
* @access public
*
* @return Vc_License
*/
public function license() {
if ( ! isset( $this->factory['license'] ) ) {
do_action( 'vc_before_init_license' );
require_once $this->path( 'SETTINGS_DIR', 'class-vc-license.php' );
$this->factory['license'] = new Vc_License();
do_action( 'vc_after_init_license' );
}
return $this->factory['license'];
}
/**
* Get frontend VC editor.
*
* @since 4.2
* @access public
*
* @return Vc_Frontend_Editor
*/
public function frontendEditor() {
if ( ! isset( $this->factory['frontend_editor'] ) ) {
do_action( 'vc_before_init_frontend_editor' );
require_once $this->path( 'EDITORS_DIR', 'class-vc-frontend-editor.php' );
$this->factory['frontend_editor'] = new Vc_Frontend_Editor();
}
return $this->factory['frontend_editor'];
}
/**
* Get backend VC editor. Edit page version.
*
* @since 4.2
*
* @return Vc_Backend_Editor
*/
public function backendEditor() {
if ( ! isset( $this->factory['backend_editor'] ) ) {
do_action( 'vc_before_init_backend_editor' );
require_once $this->path( 'EDITORS_DIR', 'class-vc-backend-editor.php' );
$this->factory['backend_editor'] = new Vc_Backend_Editor();
}
return $this->factory['backend_editor'];
}
/**
* Gets automapper instance.
*
* @since 4.2
* @access public
*
* @return Vc_Automapper
*/
public function automapper() {
if ( ! isset( $this->factory['automapper'] ) ) {
do_action( 'vc_before_init_automapper' );
require_once $this->path( 'SETTINGS_DIR', 'class-vc-automapper.php' );
$this->factory['automapper'] = new Vc_Automapper();
do_action( 'vc_after_init_automapper' );
}
return $this->factory['automapper'];
}
/**
* Gets updater instance.
* @since 4.2
*
* @return Vc_Updater
*/
public function updater() {
if ( ! isset( $this->factory['updater'] ) ) {
do_action( 'vc_before_init_updater' );
require_once $this->path( 'UPDATERS_DIR', 'class-vc-updater.php' );
$updater = new Vc_Updater();
require_once vc_path_dir( 'UPDATERS_DIR', 'class-vc-updating-manager.php' );
$updater->setUpdateManager( new Vc_Updating_Manager( WPB_VC_VERSION, $updater->versionUrl(), vc_plugin_name() ) );
$this->factory['updater'] = $updater;
do_action( 'vc_after_init_updater' );
}
return $this->factory['updater'];
}
/**
* Getter for plugin name variable.
* @since 4.2
*
* @return string
*/
public function pluginName() {
return $this->plugin_name;
}
/**
* Get absolute url for VC asset file.
*
* Assets are css, javascript, less files and images.
*
* @since 4.2
*
* @param $file
*
* @return string
*/
public function assetUrl( $file ) {
return preg_replace( '/\s/', '%20', plugins_url( $this->path( 'ASSETS_DIR_NAME', $file ), __FILE__ ) );
}
}
/**
* Main Visual composer manager.
* @var Vc_Manager $vc_manager - instance of composer management.
* @since 4.2
*/
global $vc_manager;
if ( ! $vc_manager ) {
$vc_manager = Vc_Manager::getInstance();
// Load components
$vc_manager->loadComponents();
}
|
VitaAprel/mynotebook
|
wp-content/plugins/js_composer4-7/js_composer.php
|
PHP
|
gpl-2.0
| 20,440
|
#ifndef DOCK_SOURCE_BUTTON_H // :)
#define DOCK_SOURCE_BUTTON_H
#include "tintedbuttonbase.h"
class SourceButton : public TintedButtonBase
{
Q_OBJECT
public:
SourceButton(const QModelIndex& index, QWidget* parent = NULL);
virtual ~SourceButton();
protected:
virtual void indexChanged();
};
#endif
|
Elv13/Umbrello-ng2
|
umbrello/dock/pages/controller/delegateWidgets/sourcebutton.h
|
C
|
gpl-2.0
| 333
|
#include "Config/Config.h"
#include "config.h"
#include "../Player.h"
#include "PlayerbotAI.h"
#include "PlayerbotMgr.h"
#include "WorldPacket.h"
#include "../Chat.h"
#include "../ObjectMgr.h"
#include "../GossipDef.h"
#include "../Chat.h"
#include "../Language.h"
#include "../WaypointMovementGenerator.h"
#include "../Guild.h"
#include "../LootMgr.h"
class LoginQueryHolder;
class CharacterHandler;
Config botConfig;
void PlayerbotMgr::SetInitialWorldSettings()
{
//Get playerbot configuration file
if (!botConfig.SetSource(_PLAYERBOT_CONFIG))
sLog.outError("Playerbot: Unable to open configuration file. Database will be unaccessible. Configuration values will use default.");
else
sLog.outString("Playerbot: Using configuration file %s",_PLAYERBOT_CONFIG);
//Check playerbot config file version
if (botConfig.GetIntDefault("ConfVersion", 0) != PLAYERBOT_CONF_VERSION)
sLog.outError("Playerbot: Configuration file version doesn't match expected version. Some config variables may be wrong or missing.");
}
PlayerbotMgr::PlayerbotMgr(Player* const master) : m_master(master)
{
// load config variables
m_confMaxNumBots = botConfig.GetIntDefault("PlayerbotAI.MaxNumBots", 9);
m_confDebugWhisper = botConfig.GetBoolDefault("PlayerbotAI.DebugWhisper", false);
m_confFollowDistance[0] = botConfig.GetFloatDefault("PlayerbotAI.FollowDistanceMin", 0.5f);
m_confFollowDistance[1] = botConfig.GetFloatDefault("PlayerbotAI.FollowDistanceMax", 1.0f);
m_confCollectCombat = botConfig.GetBoolDefault("PlayerbotAI.Collect.Combat", true);
m_confCollectQuest = botConfig.GetBoolDefault("PlayerbotAI.Collect.Quest", true);
m_confCollectProfession = botConfig.GetBoolDefault("PlayerbotAI.Collect.Profession", true);
m_confCollectLoot = botConfig.GetBoolDefault("PlayerbotAI.Collect.Loot", true);
m_confCollectSkin = botConfig.GetBoolDefault("PlayerbotAI.Collect.Skin", true);
m_confCollectObjects = botConfig.GetBoolDefault("PlayerbotAI.Collect.Objects", true);
m_confCollectDistanceMax = botConfig.GetIntDefault("PlayerbotAI.Collect.DistanceMax", 50);
gConfigSellLevelDiff = botConfig.GetIntDefault("PlayerbotAI.SellAll.LevelDiff", 10);
if (m_confCollectDistanceMax > 100)
{
sLog.outError("Playerbot: PlayerbotAI.Collect.DistanceMax higher than allowed. Using 100");
m_confCollectDistanceMax = 100;
}
m_confCollectDistance = botConfig.GetIntDefault("PlayerbotAI.Collect.Distance", 25);
if (m_confCollectDistance > m_confCollectDistanceMax)
{
sLog.outError("Playerbot: PlayerbotAI.Collect.Distance higher than PlayerbotAI.Collect.DistanceMax. Using DistanceMax value");
m_confCollectDistance = m_confCollectDistanceMax;
}
}
PlayerbotMgr::~PlayerbotMgr()
{
LogoutAllBots();
}
void PlayerbotMgr::UpdateAI(const uint32 p_time) {}
void PlayerbotMgr::HandleMasterIncomingPacket(const WorldPacket& packet)
{
switch (packet.GetOpcode())
{
case CMSG_OFFER_PETITION:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid petitionGuid;
ObjectGuid playerGuid;
uint32 junk;
p >> junk; // this is not petition type!
p >> petitionGuid; // petition guid
p >> playerGuid; // player guid
Player* player = ObjectAccessor::FindPlayer(playerGuid);
if (!player)
return;
uint32 petitionLowGuid = petitionGuid.GetCounter();
QueryResult *result = CharacterDatabase.PQuery("SELECT * FROM petition_sign WHERE playerguid = '%u' AND petitionguid = '%u'", player->GetGUIDLow(), petitionLowGuid);
if(result)
{
ChatHandler(m_master).PSendSysMessage("%s has already signed the petition",player->GetName());
delete result;
return;
}
CharacterDatabase.PExecute("INSERT INTO petition_sign (ownerguid,petitionguid, playerguid, player_account) VALUES ('%u', '%u', '%u','%u')",
GetMaster()->GetGUIDLow(), petitionLowGuid, player->GetGUIDLow(), GetMaster()->GetSession()->GetAccountId());
p.Initialize(SMSG_PETITION_SIGN_RESULTS, (8+8+4));
p << ObjectGuid(petitionGuid);
p << ObjectGuid(playerGuid);
p << uint32(PETITION_SIGN_OK);
// close at signer side
GetMaster()->GetSession()->SendPacket(&p);
return;
}
case CMSG_ACTIVATETAXI:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid guid;
std::vector<uint32> nodes;
nodes.resize(2);
uint8 delay = 9;
p >> guid >> nodes[0] >> nodes[1];
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_ACTIVATETAXI from %d to %d", nodes[0], nodes[1]);
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
delay = delay + 3;
Player* const bot = it->second;
if (!bot)
return;
Group* group = bot->GetGroup();
if (!group)
continue;
Unit *target = ObjectAccessor::GetUnit(*bot, guid);
bot->GetPlayerbotAI()->SetIgnoreUpdateTime(delay);
bot->GetMotionMaster()->Clear(true);
bot->GetMotionMaster()->MoveFollow(target, INTERACTION_DISTANCE, bot->GetOrientation());
bot->GetPlayerbotAI()->GetTaxi(guid, nodes);
}
return;
}
case CMSG_ACTIVATETAXIEXPRESS:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid guid;
uint32 node_count;
uint8 delay = 9;
p >> guid;
p.read_skip<uint32>();
p >> node_count;
std::vector<uint32> nodes;
for (uint32 i = 0; i < node_count; ++i)
{
uint32 node;
p >> node;
nodes.push_back(node);
}
if (nodes.empty())
return;
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_ACTIVATETAXIEXPRESS from %d to %d", nodes.front(), nodes.back());
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
delay = delay + 3;
Player* const bot = it->second;
if (!bot)
return;
Group* group = bot->GetGroup();
if (!group)
continue;
Unit *target = ObjectAccessor::GetUnit(*bot, guid);
bot->GetPlayerbotAI()->SetIgnoreUpdateTime(delay);
bot->GetMotionMaster()->Clear(true);
bot->GetMotionMaster()->MoveFollow(target, INTERACTION_DISTANCE, bot->GetOrientation());
bot->GetPlayerbotAI()->GetTaxi(guid, nodes);
}
return;
}
case CMSG_MOVE_SPLINE_DONE:
{
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_MOVE_SPLINE_DONE");
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid guid; // used only for proper packet read
MovementInfo movementInfo; // used only for proper packet read
p >> guid.ReadAsPacked();
p >> movementInfo;
p >> Unused<uint32>(); // unk
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
if (!bot)
return;
// in taxi flight packet received in 2 case:
// 1) end taxi path in far (multi-node) flight
// 2) switch from one map to other in case multi-map taxi path
// we need process only (1)
uint32 curDest = bot->m_taxi.GetTaxiDestination();
if (!curDest)
return;
TaxiNodesEntry const* curDestNode = sTaxiNodesStore.LookupEntry(curDest);
// far teleport case
if (curDestNode && curDestNode->map_id != bot->GetMapId())
{
if (bot->GetMotionMaster()->GetCurrentMovementGeneratorType() == FLIGHT_MOTION_TYPE)
{
// short preparations to continue flight
FlightPathMovementGenerator* flight = (FlightPathMovementGenerator *) (bot->GetMotionMaster()->top());
flight->Interrupt(*bot); // will reset at map landing
flight->SetCurrentNodeAfterTeleport();
TaxiPathNodeEntry const& node = flight->GetPath()[flight->GetCurrentNode()];
flight->SkipCurrentNode();
bot->TeleportTo(curDestNode->map_id, node.x, node.y, node.z, bot->GetOrientation());
}
return;
}
uint32 destinationnode = bot->m_taxi.NextTaxiDestination();
if (destinationnode > 0) // if more destinations to go
{
// current source node for next destination
uint32 sourcenode = bot->m_taxi.GetTaxiSource();
// Add to taximask middle hubs in taxicheat mode (to prevent having player with disabled taxicheat and not having back flight path)
if (bot->isTaxiCheater())
if (bot->m_taxi.SetTaximaskNode(sourcenode))
{
WorldPacket data(SMSG_NEW_TAXI_PATH, 0);
bot->GetSession()->SendPacket(&data);
}
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_MOVE_SPLINE_DONE Taxi has to go from %u to %u", sourcenode, destinationnode);
uint32 mountDisplayId = sObjectMgr.GetTaxiMountDisplayId(sourcenode, bot->GetTeam());
uint32 path, cost;
sObjectMgr.GetTaxiPath(sourcenode, destinationnode, path, cost);
if (path && mountDisplayId)
bot->GetSession()->SendDoFlight(mountDisplayId, path, 1); // skip start fly node
else
bot->m_taxi.ClearTaxiDestinations(); // clear problematic path and next
}
else
/* std::ostringstream out;
out << "Destination reached" << bot->GetName();
ChatHandler ch(m_master);
ch.SendSysMessage(out.str().c_str()); */
bot->m_taxi.ClearTaxiDestinations(); // Destination, clear source node
}
return;
}
// if master is logging out, log out all bots
case CMSG_LOGOUT_REQUEST:
{
LogoutAllBots();
return;
}
// If master inspects one of his bots, give the master useful info in chat window
// such as inventory that can be equipped
case CMSG_INSPECT:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid guid;
p >> guid;
Player* const bot = GetPlayerBot(guid);
if (bot) bot->GetPlayerbotAI()->SendNotEquipList(*bot);
return;
}
// handle emotes from the master
//case CMSG_EMOTE:
case CMSG_TEXT_EMOTE:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
uint32 emoteNum;
p >> emoteNum;
/* std::ostringstream out;
out << "emote is: " << emoteNum;
ChatHandler ch(m_master);
ch.SendSysMessage(out.str().c_str()); */
switch (emoteNum)
{
case TEXTEMOTE_BOW:
{
// Buff anyone who bows before me. Useful for players not in bot's group
// How do I get correct target???
//Player* const pPlayer = GetPlayerBot(m_master->GetSelection());
//if (pPlayer->GetPlayerbotAI()->GetClassAI())
// pPlayer->GetPlayerbotAI()->GetClassAI()->BuffPlayer(pPlayer);
return;
}
/*
case TEXTEMOTE_BONK:
{
Player* const pPlayer = GetPlayerBot(m_master->GetSelection());
if (!pPlayer || !pPlayer->GetPlayerbotAI())
return;
PlayerbotAI* const pBot = pPlayer->GetPlayerbotAI();
ChatHandler ch(m_master);
{
std::ostringstream out;
out << "CurrentTime: " << CurrentTime()
<< " m_ignoreAIUpdatesUntilTime: " << pBot->m_ignoreAIUpdatesUntilTime;
ch.SendSysMessage(out.str().c_str());
}
{
std::ostringstream out;
out << "m_TimeDoneEating: " << pBot->m_TimeDoneEating
<< " m_TimeDoneDrinking: " << pBot->m_TimeDoneDrinking;
ch.SendSysMessage(out.str().c_str());
}
{
std::ostringstream out;
out << "m_CurrentlyCastingSpellId: " << pBot->m_CurrentlyCastingSpellId;
ch.SendSysMessage(out.str().c_str());
}
{
std::ostringstream out;
out << "IsBeingTeleported() " << pBot->GetPlayer()->IsBeingTeleported();
ch.SendSysMessage(out.str().c_str());
}
{
std::ostringstream out;
bool tradeActive = (pBot->GetPlayer()->GetTrader()) ? true : false;
out << "tradeActive: " << tradeActive;
ch.SendSysMessage(out.str().c_str());
}
{
std::ostringstream out;
out << "IsCharmed() " << pBot->getPlayer()->isCharmed();
ch.SendSysMessage(out.str().c_str());
}
return;
}
*/
case TEXTEMOTE_EAT:
case TEXTEMOTE_DRINK:
{
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->Feast();
}
return;
}
// emote to attack selected target
case TEXTEMOTE_POINT:
{
ObjectGuid attackOnGuid = m_master->GetSelectionGuid();
if (!attackOnGuid)
return;
Unit* thingToAttack = ObjectAccessor::GetUnit(*m_master, attackOnGuid);
if (!thingToAttack) return;
Player *bot = 0;
for (PlayerBotMap::iterator itr = m_playerBots.begin(); itr != m_playerBots.end(); ++itr)
{
bot = itr->second;
if (!bot->IsFriendlyTo(thingToAttack) && !bot->IsWithinLOSInMap(thingToAttack))
{
bot->GetPlayerbotAI()->DoTeleport(*m_master);
if (bot->IsWithinLOSInMap(thingToAttack))
bot->GetPlayerbotAI()->GetCombatTarget(thingToAttack);
}
else if (!bot->IsFriendlyTo(thingToAttack) && bot->IsWithinLOSInMap(thingToAttack))
bot->GetPlayerbotAI()->GetCombatTarget(thingToAttack);
}
return;
}
// emote to stay
case TEXTEMOTE_STAND:
{
Player* const bot = GetPlayerBot(m_master->GetSelectionGuid());
if (bot)
bot->GetPlayerbotAI()->SetMovementOrder(PlayerbotAI::MOVEMENT_STAY);
else
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->SetMovementOrder(PlayerbotAI::MOVEMENT_STAY);
}
return;
}
// 324 is the followme emote (not defined in enum)
// if master has bot selected then only bot follows, else all bots follow
case 324:
case TEXTEMOTE_WAVE:
{
Player* const bot = GetPlayerBot(m_master->GetSelectionGuid());
if (bot)
bot->GetPlayerbotAI()->SetMovementOrder(PlayerbotAI::MOVEMENT_FOLLOW, m_master);
else
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->SetMovementOrder(PlayerbotAI::MOVEMENT_FOLLOW, m_master);
}
return;
}
}
return;
} /* EMOTE ends here */
case CMSG_GAMEOBJ_USE: // not sure if we still need this one
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid objGUID;
p >> objGUID;
GameObject *obj = m_master->GetMap()->GetGameObject(objGUID);
if (!obj)
return;
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->FollowAutoReset();
if (obj->GetGoType() == GAMEOBJECT_TYPE_QUESTGIVER)
bot->GetPlayerbotAI()->TurnInQuests(obj);
// add other go types here, i.e.:
// GAMEOBJECT_TYPE_CHEST - loot quest items of chest
}
}
break;
case CMSG_QUESTGIVER_HELLO:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid npcGUID;
p >> npcGUID;
WorldObject* pNpc = m_master->GetMap()->GetWorldObject(npcGUID);
if (!pNpc)
return;
// for all master's bots
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->FollowAutoReset();
bot->GetPlayerbotAI()->TurnInQuests(pNpc);
}
return;
}
// if master accepts a quest, bots should also try to accept quest
case CMSG_QUESTGIVER_ACCEPT_QUEST:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid guid;
uint32 quest;
// uint32 unk1;
p >> guid >> quest; // >> unk1;
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_QUESTGIVER_ACCEPT_QUEST npc = %s, quest = %u, unk1 = %u", guid.GetString().c_str(), quest, unk1);
Quest const* qInfo = sObjectMgr.GetQuestTemplate(quest);
if (qInfo)
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->FollowAutoReset();
if (bot->GetQuestStatus(quest) == QUEST_STATUS_COMPLETE)
bot->GetPlayerbotAI()->TellMaster("I already completed that quest.");
else if (!bot->CanTakeQuest(qInfo, false))
{
if (!bot->SatisfyQuestStatus(qInfo, false))
bot->GetPlayerbotAI()->TellMaster("I already have that quest.");
else
bot->GetPlayerbotAI()->TellMaster("I can't take that quest.");
}
else if (!bot->SatisfyQuestLog(false))
bot->GetPlayerbotAI()->TellMaster("My quest log is full.");
else if (!bot->CanAddQuest(qInfo, false))
bot->GetPlayerbotAI()->TellMaster("I can't take that quest because it requires that I take items, but my bags are full!");
else
{
p.rpos(0); // reset reader
bot->GetSession()->HandleQuestgiverAcceptQuestOpcode(p);
bot->GetPlayerbotAI()->TellMaster("Got the quest.");
// build needed items if quest contains any
for (int i = 0; i < QUEST_ITEM_OBJECTIVES_COUNT; i++)
if (qInfo->ReqItemCount[i]>0)
{
bot->GetPlayerbotAI()->SetQuestNeedItems();
break;
}
// build needed creatures if quest contains any
for (int i = 0; i < QUEST_OBJECTIVES_COUNT; i++)
if (qInfo->ReqCreatureOrGOCount[i] > 0)
{
bot->GetPlayerbotAI()->SetQuestNeedCreatures();
break;
}
}
}
return;
}
case CMSG_AREATRIGGER:
{
WorldPacket p(packet);
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
if (!bot)
continue;
if (bot->IsWithinDistInMap(GetMaster(), 50))
{
p.rpos(0); // reset reader
bot->GetSession()->HandleAreaTriggerOpcode(p);
}
}
return;
}
case CMSG_QUESTGIVER_COMPLETE_QUEST:
{
WorldPacket p(packet);
p.rpos(0); // reset reader
uint32 quest;
ObjectGuid npcGUID;
p >> npcGUID >> quest;
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_QUESTGIVER_COMPLETE_QUEST npc = %s, quest = %u", npcGUID.GetString().c_str(), quest);
WorldObject* pNpc = m_master->GetMap()->GetWorldObject(npcGUID);
if (!pNpc)
return;
// for all master's bots
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
bot->GetPlayerbotAI()->FollowAutoReset();
bot->GetPlayerbotAI()->TurnInQuests(pNpc);
}
return;
}
case CMSG_LOOT_ROLL:
{
WorldPacket p(packet); //WorldPacket packet for CMSG_LOOT_ROLL, (8+4+1)
ObjectGuid Guid;
uint32 itemSlot;
uint8 rollType;
p.rpos(0); //reset packet pointer
p >> Guid; //guid of the lootable target
p >> itemSlot; //loot index
p >> rollType; //need,greed or pass on roll
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
uint32 choice = 0;
Player* const bot = it->second;
if (!bot)
return;
Group* group = bot->GetGroup();
if (!group)
return;
// check that the bot did not already vote
if (rollType >= ROLL_NOT_EMITED_YET)
return;
Loot* loot = sLootMgr.GetLoot(bot, Guid);
if (!loot)
{
sLog.outError("LootMgr::PlayerVote> Error cannot get loot object info!");
return;
}
LootItem* lootItem = loot->GetLootItemInSlot(itemSlot);
ItemPrototype const *pProto = lootItem->itemProto;
if (!pProto)
return;
if (bot->GetPlayerbotAI()->CanStore())
{
if (bot->CanUseItem(pProto) == EQUIP_ERR_OK && bot->GetPlayerbotAI()->IsItemUseful(lootItem->itemId))
choice = 1; // Need
else if (bot->HasSkill(SKILL_ENCHANTING))
choice = 3; // Disenchant
else
choice = 2; // Greed
}
else
choice = 0; // Pass
sLootMgr.PlayerVote(bot, Guid, itemSlot, RollVote(choice));
}
return;
}
// Handle GOSSIP activate actions, prior to GOSSIP select menu actions
case CMSG_GOSSIP_HELLO:
{
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_GOSSIP_HELLO");
WorldPacket p(packet); //WorldPacket packet for CMSG_GOSSIP_HELLO, (8)
ObjectGuid guid;
p.rpos(0); //reset packet pointer
p >> guid;
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
if (!bot)
continue;
bot->GetPlayerbotAI()->FollowAutoReset();
Creature *pCreature = bot->GetNPCIfCanInteractWith(guid, UNIT_NPC_FLAG_NONE);
if (!pCreature)
{
DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_GOSSIP_HELLO %s not found or you can't interact with him.", guid.GetString().c_str());
continue;
}
GossipMenuItemsMapBounds pMenuItemBounds = sObjectMgr.GetGossipMenuItemsMapBounds(pCreature->GetCreatureInfo()->GossipMenuId);
for (GossipMenuItemsMap::const_iterator itr = pMenuItemBounds.first; itr != pMenuItemBounds.second; ++itr)
{
uint32 npcflags = pCreature->GetUInt32Value(UNIT_NPC_FLAGS);
if (!(itr->second.npc_option_npcflag & npcflags))
continue;
switch (itr->second.option_id)
{
case GOSSIP_OPTION_TAXIVENDOR:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_TAXIVENDOR");
bot->GetSession()->SendLearnNewTaxiNode(pCreature);
break;
}
case GOSSIP_OPTION_QUESTGIVER:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_QUESTGIVER");
bot->GetPlayerbotAI()->TurnInQuests(pCreature);
break;
}
case GOSSIP_OPTION_VENDOR:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_VENDOR");
if (!botConfig.GetBoolDefault("PlayerbotAI.SellGarbage", true))
continue;
// changed the SellGarbage() function to support ch.SendSysMessaage()
bot->GetPlayerbotAI()->SellGarbage(*bot);
break;
}
case GOSSIP_OPTION_STABLEPET:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_STABLEPET");
break;
}
case GOSSIP_OPTION_AUCTIONEER:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_AUCTIONEER");
break;
}
case GOSSIP_OPTION_BANKER:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_BANKER");
break;
}
case GOSSIP_OPTION_INNKEEPER:
{
// bot->GetPlayerbotAI()->TellMaster("PlayerbotMgr:GOSSIP_OPTION_INNKEEPER");
break;
}
}
}
}
return;
}
case CMSG_SPIRIT_HEALER_ACTIVATE:
{
// DEBUG_LOG ("[PlayerbotMgr]: HandleMasterIncomingPacket - Received CMSG_SPIRIT_HEALER_ACTIVATE SpiritHealer is resurrecting the Player %s",m_master->GetName());
for (PlayerBotMap::iterator itr = m_playerBots.begin(); itr != m_playerBots.end(); ++itr)
{
Player* const bot = itr->second;
Group *grp = bot->GetGroup();
if (grp)
grp->RemoveMember(bot->GetObjectGuid(), 1);
}
return;
}
case CMSG_LIST_INVENTORY:
{
if (!botConfig.GetBoolDefault("PlayerbotAI.SellGarbage", true))
return;
WorldPacket p(packet);
p.rpos(0); // reset reader
ObjectGuid npcGUID;
p >> npcGUID;
Object* const pNpc = (WorldObject *) m_master->GetObjectByTypeMask(npcGUID, TYPEMASK_CREATURE_OR_GAMEOBJECT);
if (!pNpc)
return;
// for all master's bots
for(PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
if (!bot->IsInMap(static_cast<WorldObject *>(pNpc)))
{
bot->GetPlayerbotAI()->TellMaster("I'm too far away to sell items!");
continue;
}
else
{
// changed the SellGarbage() function to support ch.SendSysMessaage()
bot->GetPlayerbotAI()->FollowAutoReset();
bot->GetPlayerbotAI()->SellGarbage(*bot);
}
}
return;
}
/*
case CMSG_NAME_QUERY:
case MSG_MOVE_START_FORWARD:
case MSG_MOVE_STOP:
case MSG_MOVE_SET_FACING:
case MSG_MOVE_START_STRAFE_LEFT:
case MSG_MOVE_START_STRAFE_RIGHT:
case MSG_MOVE_STOP_STRAFE:
case MSG_MOVE_START_BACKWARD:
case MSG_MOVE_HEARTBEAT:
case CMSG_STANDSTATECHANGE:
case CMSG_QUERY_TIME:
case CMSG_CREATURE_QUERY:
case CMSG_GAMEOBJECT_QUERY:
case MSG_MOVE_JUMP:
case MSG_MOVE_FALL_LAND:
return;
default:
{
const char* oc = LookupOpcodeName(packet.GetOpcode());
// ChatHandler ch(m_master);
// ch.SendSysMessage(oc);
std::ostringstream out;
out << "masterin: " << oc;
sLog.outError(out.str().c_str());
}
*/
}
}
void PlayerbotMgr::HandleMasterOutgoingPacket(const WorldPacket& packet)
{
/*
switch (packet.GetOpcode())
{
// maybe our bots should only start looting after the master loots?
//case SMSG_LOOT_RELEASE_RESPONSE: {}
case SMSG_NAME_QUERY_RESPONSE:
case SMSG_MONSTER_MOVE:
case SMSG_COMPRESSED_UPDATE_OBJECT:
case SMSG_DESTROY_OBJECT:
case SMSG_UPDATE_OBJECT:
case SMSG_STANDSTATE_UPDATE:
case MSG_MOVE_HEARTBEAT:
case SMSG_QUERY_TIME_RESPONSE:
case SMSG_AURA_UPDATE_ALL:
case SMSG_CREATURE_QUERY_RESPONSE:
case SMSG_GAMEOBJECT_QUERY_RESPONSE:
return;
default:
{
const char* oc = LookupOpcodeName(packet.GetOpcode());
std::ostringstream out;
out << "masterout: " << oc;
sLog.outError(out.str().c_str());
}
}
*/
}
void PlayerbotMgr::LogoutAllBots()
{
while (true)
{
PlayerBotMap::const_iterator itr = GetPlayerBotsBegin();
if (itr == GetPlayerBotsEnd()) break;
Player* bot = itr->second;
LogoutPlayerBot(bot->GetObjectGuid());
}
RemoveAllBotsFromGroup();
}
void PlayerbotMgr::Stay()
{
for (PlayerBotMap::const_iterator itr = GetPlayerBotsBegin(); itr != GetPlayerBotsEnd(); ++itr)
{
Player* bot = itr->second;
bot->GetMotionMaster()->Clear();
}
}
// Playerbot mod: logs out a Playerbot.
void PlayerbotMgr::LogoutPlayerBot(ObjectGuid guid)
{
Player* bot = GetPlayerBot(guid);
if (bot)
{
WorldSession * botWorldSessionPtr = bot->GetSession();
m_playerBots.erase(guid); // deletes bot player ptr inside this WorldSession PlayerBotMap
botWorldSessionPtr->LogoutPlayer(true); // this will delete the bot Player object and PlayerbotAI object
delete botWorldSessionPtr; // finally delete the bot's WorldSession
}
}
// Playerbot mod: Gets a player bot Player object for this WorldSession master
Player* PlayerbotMgr::GetPlayerBot(ObjectGuid playerGuid) const
{
PlayerBotMap::const_iterator it = m_playerBots.find(playerGuid);
return (it == m_playerBots.end()) ? 0 : it->second;
}
void PlayerbotMgr::OnBotLogin(Player * const bot)
{
// give the bot some AI, object is owned by the player class
PlayerbotAI* ai = new PlayerbotAI(this, bot);
bot->SetPlayerbotAI(ai);
// tell the world session that they now manage this new bot
m_playerBots[bot->GetObjectGuid()] = bot;
// if bot is in a group and master is not in group then
// have bot leave their group
if (bot->GetGroup() &&
(m_master->GetGroup() == nullptr ||
m_master->GetGroup()->IsMember(bot->GetObjectGuid()) == false))
bot->RemoveFromGroup();
// sometimes master can lose leadership, pass leadership to master check
const ObjectGuid masterGuid = m_master->GetObjectGuid();
if (m_master->GetGroup() &&
!m_master->GetGroup()->IsLeader(masterGuid))
{
// But only do so if one of the master's bots is leader
for (PlayerBotMap::const_iterator itr = GetPlayerBotsBegin(); itr != GetPlayerBotsEnd(); itr++)
{
Player* bot = itr->second;
if ( m_master->GetGroup()->IsLeader(bot->GetObjectGuid()) )
{
m_master->GetGroup()->ChangeLeader(masterGuid);
break;
}
}
}
}
void PlayerbotMgr::RemoveAllBotsFromGroup()
{
for (PlayerBotMap::const_iterator it = GetPlayerBotsBegin(); m_master->GetGroup() && it != GetPlayerBotsEnd(); ++it)
{
Player* const bot = it->second;
if (bot->IsInSameGroupWith(m_master))
m_master->GetGroup()->RemoveMember(bot->GetObjectGuid(), 0);
}
}
void Creature::LoadBotMenu(Player *pPlayer)
{
if (pPlayer->GetPlayerbotAI()) return;
ObjectGuid guid = pPlayer->GetObjectGuid();
uint32 accountId = sObjectMgr.GetPlayerAccountIdByGUID(guid);
QueryResult *result = CharacterDatabase.PQuery("SELECT guid, name FROM characters WHERE account='%d'", accountId);
do
{
Field *fields = result->Fetch();
ObjectGuid guidlo = ObjectGuid(fields[0].GetUInt64());
std::string name = fields[1].GetString();
std::string word = "";
if ((guid == ObjectGuid()) || (guid == guidlo))
{
//not found or himself
}
else
{
// if(sConfig.GetBoolDefault("PlayerbotAI.DisableBots", false)) return;
// create the manager if it doesn't already exist
if (!pPlayer->GetPlayerbotMgr())
pPlayer->SetPlayerbotMgr(new PlayerbotMgr(pPlayer));
if (pPlayer->GetPlayerbotMgr()->GetPlayerBot(guidlo) == nullptr) // add (if not already in game)
{
word += "Recruit ";
word += name;
word += " as a Bot.";
pPlayer->PlayerTalkClass->GetGossipMenu().AddMenuItem((uint8) 9, word, guidlo, GOSSIP_OPTION_BOT, word, false);
}
else if (pPlayer->GetPlayerbotMgr()->GetPlayerBot(guidlo) != nullptr) // remove (if in game)
{
word += "Dismiss ";
word += name;
word += " from duty.";
pPlayer->PlayerTalkClass->GetGossipMenu().AddMenuItem((uint8) 0, word, guidlo, GOSSIP_OPTION_BOT, word, false);
}
}
}
while (result->NextRow());
delete result;
}
void Player::skill(std::list<uint32>& m_spellsToLearn)
{
for (SkillStatusMap::const_iterator itr = mSkillStatus.begin(); itr != mSkillStatus.end(); ++itr)
{
if (itr->second.uState == SKILL_DELETED)
continue;
uint32 pskill = itr->first;
m_spellsToLearn.push_back(pskill);
}
}
void Player::chompAndTrim(std::string& str)
{
while (str.length() > 0)
{
char lc = str[str.length() - 1];
if (lc == '\r' || lc == '\n' || lc == ' ' || lc == '"' || lc == '\'')
str = str.substr(0, str.length() - 1);
else
break;
while (str.length() > 0)
{
char lc = str[0];
if (lc == ' ' || lc == '"' || lc == '\'')
str = str.substr(1, str.length() - 1);
else
break;
}
}
}
bool Player::getNextQuestId(const std::string& pString, unsigned int& pStartPos, unsigned int& pId)
{
bool result = false;
unsigned int i;
for (i = pStartPos; i < pString.size(); ++i)
{
if (pString[i] == ',')
break;
}
if (i > pStartPos)
{
std::string idString = pString.substr(pStartPos, i - pStartPos);
pStartPos = i + 1;
chompAndTrim(idString);
pId = atoi(idString.c_str());
result = true;
}
return(result);
}
bool Player::requiredQuests(const char* pQuestIdString)
{
if (pQuestIdString != nullptr)
{
unsigned int pos = 0;
unsigned int id;
std::string confString(pQuestIdString);
chompAndTrim(confString);
while (getNextQuestId(confString, pos, id))
{
QuestStatus status = GetQuestStatus(id);
if (status == QUEST_STATUS_COMPLETE)
return true;
}
}
return false;
}
void Player::UpdateMail()
{
// save money,items and mail to prevent cheating
CharacterDatabase.BeginTransaction();
this->SaveGoldToDB();
this->SaveInventoryAndGoldToDB();
this->_SaveMail();
CharacterDatabase.CommitTransaction();
}
bool ChatHandler::HandlePlayerbotCommand(char* args)
{
if (!(m_session->GetSecurity() > SEC_PLAYER))
if (botConfig.GetBoolDefault("PlayerbotAI.DisableBots", false))
{
PSendSysMessage("|cffff0000Playerbot system is currently disabled!");
SetSentErrorMessage(true);
return false;
}
if (!m_session)
{
PSendSysMessage("|cffff0000You may only add bots from an active session");
SetSentErrorMessage(true);
return false;
}
if (!*args)
{
PSendSysMessage("|cffff0000usage: add PLAYERNAME or remove PLAYERNAME");
SetSentErrorMessage(true);
return false;
}
char *cmd = strtok ((char *) args, " ");
char *charname = strtok (nullptr, " ");
if (!cmd || !charname)
{
PSendSysMessage("|cffff0000usage: add PLAYERNAME or remove PLAYERNAME");
SetSentErrorMessage(true);
return false;
}
std::string cmdStr = cmd;
std::string charnameStr = charname;
if (!normalizePlayerName(charnameStr))
return false;
ObjectGuid guid = sObjectMgr.GetPlayerGuidByName(charnameStr.c_str());
if (guid == ObjectGuid() || (guid == m_session->GetPlayer()->GetObjectGuid()))
{
SendSysMessage(LANG_PLAYER_NOT_FOUND);
SetSentErrorMessage(true);
return false;
}
uint32 accountId = sObjectMgr.GetPlayerAccountIdByGUID(guid);
if (accountId != m_session->GetAccountId())
{
PSendSysMessage("|cffff0000You may only add bots from the same account.");
SetSentErrorMessage(true);
return false;
}
// create the playerbot manager if it doesn't already exist
PlayerbotMgr* mgr = m_session->GetPlayer()->GetPlayerbotMgr();
if (!mgr)
{
mgr = new PlayerbotMgr(m_session->GetPlayer());
m_session->GetPlayer()->SetPlayerbotMgr(mgr);
}
QueryResult *resultchar = CharacterDatabase.PQuery("SELECT COUNT(*) FROM characters WHERE online = '1' AND account = '%u'", m_session->GetAccountId());
if (resultchar)
{
Field *fields = resultchar->Fetch();
int acctcharcount = fields[0].GetUInt32();
int maxnum = botConfig.GetIntDefault("PlayerbotAI.MaxNumBots", 9);
if (!(m_session->GetSecurity() > SEC_PLAYER))
if (acctcharcount > maxnum && (cmdStr == "add" || cmdStr == "login"))
{
PSendSysMessage("|cffff0000You cannot summon anymore bots.(Current Max: |cffffffff%u)", maxnum);
SetSentErrorMessage(true);
delete resultchar;
return false;
}
delete resultchar;
}
QueryResult *resultlvl = CharacterDatabase.PQuery("SELECT level,name FROM characters WHERE guid = '%u'", guid.GetCounter());
if (resultlvl)
{
Field *fields = resultlvl->Fetch();
int charlvl = fields[0].GetUInt32();
int maxlvl = botConfig.GetIntDefault("PlayerbotAI.RestrictBotLevel", 80);
if (!(m_session->GetSecurity() > SEC_PLAYER))
if (charlvl > maxlvl)
{
PSendSysMessage("|cffff0000You cannot summon |cffffffff[%s]|cffff0000, it's level is too high.(Current Max:lvl |cffffffff%u)", fields[1].GetString(), maxlvl);
SetSentErrorMessage(true);
delete resultlvl;
return false;
}
delete resultlvl;
}
// end of gmconfig patch
if (cmdStr == "add" || cmdStr == "login")
{
if (mgr->GetPlayerBot(guid))
{
PSendSysMessage("Bot already exists in world.");
SetSentErrorMessage(true);
return false;
}
CharacterDatabase.DirectPExecute("UPDATE characters SET online = 1 WHERE guid = '%u'", guid.GetCounter());
mgr->LoginPlayerBot(guid);
PSendSysMessage("Bot added successfully.");
}
else if (cmdStr == "remove" || cmdStr == "logout")
{
if (!mgr->GetPlayerBot(guid))
{
PSendSysMessage("|cffff0000Bot can not be removed because bot does not exist in world.");
SetSentErrorMessage(true);
return false;
}
CharacterDatabase.DirectPExecute("UPDATE characters SET online = 0 WHERE guid = '%u'", guid.GetCounter());
mgr->LogoutPlayerBot(guid);
PSendSysMessage("Bot removed successfully.");
}
return true;
}
|
cala/portaltbc
|
src/game/playerbot/PlayerbotMgr.cpp
|
C++
|
gpl-2.0
| 44,869
|
<?php
/**
* Template Name: Portfolio > Trinity
*
* @package WordPress
*/
get_header(); ?>
<section class="port-fullWidth" id="top">
<div class="wrapper port-ts">
<h1>Trinity Brooks</h1>
<div class="divide"></div>
<p>A collection of images reflecting contemporary urban fashion.</p>
</div>
</section><!-- close port-fullWidth -->
<section class="port-detail">
<div class="wrapper">
<img src="<?php bloginfo('template_directory'); ?>/img/trin-1.jpg" />
<div class="col-xs-12 col-sm-6 col-md-6 col-lg-6 port-desc">
<h3>About this project</h3>
<div class="divide"></div>
<p>Our team was inspired by the geometric lines and shapes found in the Los Angeles urban landscape. In addition to photography, we styled, casted, and location scouted.</p>
</div>
<div class="col-xs-12 col-sm-6 col-md-6 col-lg-6 port-desc">
<img src="<?php bloginfo('template_directory'); ?>/img/detail-ph.gif" class="port-project-image"/>
</div>
<div class="clearfix"></div>
<div class="col-xs-12 col-sm-6 col-md-6 col-lg-6 port-desc">
<img src="<?php bloginfo('template_directory'); ?>/img/trin-2.jpg" />
</div>
<div class="col-xs-12 col-sm-6 col-md-6 col-lg-6 port-desc">
<img src="<?php bloginfo('template_directory'); ?>/img/trin-3.jpg" />
</div>
<div class="clearfix"></div>
</div>
</section><!-- close port-photo -->
<?php get_footer(); ?>
|
amunadesigns/amunadesigns
|
wp-content/themes/amuna_wordpress/trin-detail.php
|
PHP
|
gpl-2.0
| 1,772
|
<?php
/**
* Is Valid Geo Coord
*
* @todo This function can be deprecated.
*
* @param float $lat Latitude.
* @param float $long Longitude.
* @return bool
*/
function wpgeo_is_valid_geo_coord( $lat, $lng ) {
$coord = new WPGeo_Coord( $lat, $lng );
return $coord->is_valid_coord();
}
/**
* CSS Dimension
* If numeric assumes pixels and adds 'px', otherwise treated as string.
*
* @param string|int $str Dimension.
* @return string Dimension as string.
*/
function wpgeo_css_dimension( $str = false ) {
if ( is_numeric( $str ) )
$str .= 'px';
return $str;
}
/**
* Check Domain
* This function checks that the domainname of the page matches the blog site url.
* If it doesn't match we can prevent maps from showing as the Google API Key will not be valid.
* This prevent warnings if the site is accessed through Google cache.
*
* @return boolean
*/
function wpgeo_check_domain() {
$http = is_ssl() ? 'https' : 'http';
$host = $http . '://' . rtrim( $_SERVER["HTTP_HOST"], '/' );
// Blog might not be in site root so strip to domain
$blog = preg_replace( "/(http:\/\/[^\/]*).*/", "$1", get_bloginfo( 'url' ) );
$match = $host == $blog ? true : false;
return $match;
}
/**
* Check Version
* Check if WP Geo version is greater or equal to parameters.
*
* @param string $version Version number in the form 2.1.3.a.
* @return boolean
*/
function wpgeo_check_version( $version ) {
global $wpgeo;
if ( version_compare( $version, $wpgeo->version, '>=' ) )
return true;
return false;
}
/**
* Check DB Version
* Check if WP Geo database version is greater or equal to parameter.
*
* @param numeric $version Database version number.
* @return boolean
*/
function wpgeo_check_db_version( $version ) {
global $wpgeo;
if ( $version >= $wpgeo->db_version )
return true;
return false;
}
/**
* Show Polylines Options
* Polylines options menu for the map.
*
* @param array $args Array of arguments.
* @return array|string Array or HTML select menu.
*/
function wpgeo_show_polylines_options( $args = null ) {
$args = wp_parse_args( $args, array(
'id' => 'show_polylines',
'name' => 'show_polylines',
'return' => 'array',
'selected' => null
) );
$menu_options = array(
'' => __( 'Default', 'wp-geo' ),
'Y' => __( 'Show Polylines', 'wp-geo' ),
'N' => __( 'Hide Polylines', 'wp-geo' )
);
if ( $args['return'] = 'menu' )
return wpgeo_select( $args['name'], $menu_options, $args['selected'] );
return $menu_options;
}
/**
* Checkbox HTML
*
* @param string $name Field ID.
* @param string $val Field value.
* @param string $checked Checked value.
* @param bool $disabled (optional) Is disabled?
* @param int $id (optional) Field ID. Defaults to $name.
* @return string Checkbox HTML.
*/
function wpgeo_checkbox( $name, $val, $checked, $disabled = false, $id = '' ) {
if ( empty( $id ) )
$id = $name;
return '<input name="' . esc_attr( $name ) . '" type="checkbox" id="' . esc_attr( $id ) . '" value="' . esc_attr( $val ) . '"' . checked( $val, $checked, false ) . disabled( true, $disabled, false ) . ' />';
}
/**
* Select HTML
*
* @param string $name Field ID.
* @param string $options Option values.
* @param string $selected (optional) Select value.
* @param bool $disabled (optional) Is disabled?
* @param int $id (optional) Field ID. Defaults to $name.
* @return string Select HTML.
*/
function wpgeo_select( $name, $options, $selected = '', $disabled = false, $id = '' ) {
if ( empty( $id ) )
$id = $name;
$options_html = '';
foreach ( $options as $value => $label ) {
$options_html .= '<option value="' . esc_attr( $value ) . '"' . selected( $selected, $value, false ) . '>' . $label . '</option>';
}
return '<select name="' . esc_attr( $name ) . '" id="' . esc_attr( $id ) . '"' . disabled( true, $disabled, false ) . '>' . $options_html . '</select>';
}
|
DrPollo/wp-commons
|
wp-content/plugins/wp-geo/includes/functions.php
|
PHP
|
gpl-2.0
| 4,172
|
/**
* Ionize Extend Media Manager
*
*/
ION.ExtendMediaManager = new Class({
Implements: [Events, Options],
options: {
thumbSize: 120,
resizeOnUpload: false,
uploadAutostart: false,
uploadMode: '',
standalone: false
},
/**
*
* @param options
*/
initialize: function(options)
{
this.setOptions(options);
this.baseUrl = ION.baseUrl;
this.adminUrl = ION.adminUrl;
this.themeUrl = ION.themeUrl;
this.addMediaUrl = this.adminUrl + 'media/add_media_to_extend';
this.id_parent = null;
this.parent = null;
this.id_extend = null;
this.lang = null;
this.filemanager = null;
if (options)
this.init(options);
return this;
},
/**
*
* @param options
*/
init: function(options)
{
this.parent = options.parent;
this.id_parent = options.id_parent;
this.id_extend = options.id_extend;
this.lang = options.lang;
this.container = (typeOf(options.container) == 'string') ? $(options.container) : options.container;
if (options.tab) this.tab = options.tab;
if (options.extend_label) this.extend_label = options.extend_label;
this.buildContainer();
if (this.filemanager) this.setFilemanagerTargetInfo();
},
buildContainer: function()
{
var self = this;
// Button bar
var p = new Element('p', {'class':'h30'}).inject(this.container);
// Create Button
this.btnAddMedia = new Element('a', {
'class':'button light',
text: Lang.get('ionize_label_add_media')
}).adopt(new Element('i', {'class':'icon-pictures'})).inject(p);
this.btnAddMedia.addEvent('click', function()
{
self.open();
});
// Media List Container
this.mediaContainer = new Element('div', {
'class':''
}).inject(this.container);
// Add video URL Button
/*
* @todo :
* 1. Build the window
* 2. Call extendMediaManager.addMedia()
* with add of the type, which is "external"
* 3. Modify the controller Media->add_media_to_extend()
* to check for type='external' and do the insert
*/
/*
var addVideo = new Element('a', {
'class':'button light',
'data-id': field.id_extend_field,
text: Lang.get('ionize_label_add_video')
}).adopt(new Element('i', {'class':'icon-video'})).inject(p);
addVideo.addEvent('click', function()
{
ION.dataWindow(
'addExternalMedia',
'ionize_label_add_video',
'media/add_external_media_window',
{width:600, height:150},
{
parent: self.parent,
id_parent: self.id_parent
}
)
});
*/
},
getOptions: function()
{
return {
container: this.container,
tab: this.tab,
parent: this.parent,
id_parent: this.id_parent,
id_extend: this.id_extend,
extend_label: this.extend_label,
lang: this.lang
}
},
getExistingInstance: function()
{
var self = this;
if ($('filemanagerWindow'))
{
// Window
var inst = $('filemanagerWindow').retrieve('instance');
// FM instance
this.filemanager = inst.filemanager;
// Set the onComplete target : This class !
this.filemanager.removeEvents('complete');
this.filemanager.setOptions({'onComplete': self.addMedia.bind(self)});
this.setFilemanagerTargetInfo();
// Re-open window if minimized or shake if triing to open another FM
if (inst.isMinimized)
{
inst.restore();
}
else
{
inst.focus();
$('filemanagerWindow').shake();
}
return true;
}
return false;
},
/**
* Adds Target info to the Filemanager window
*/
setFilemanagerTargetInfo: function()
{
if (this.filemanager)
{
var text = Lang.get('ionize_label_filemanager_target') + ' : ' + this.parent + ' ' + this.id_parent;
if (this.id_extend != null)
text = text + ' - Extend : ' + this.extend_label;
if (this.lang != null)
text = text + ' - Lang : ' + this.lang;
this.filemanager.setTargetInfo(text);
}
},
/**
* Adds one medium to the current parent
* Called by callback by the file / image manager
*
* @param file_url Complete URL to the media. Slashes ('/') were replaced by ~ to permit CI management
* @param file
*/
addMedia:function(file_url, file)
{
var data = {
path: file_url,
parent: this.parent,
id_parent: this.id_parent,
id_extend: this.id_extend,
lang: this.lang
};
// Extend Field
new Request.JSON(
{
'url': this.addMediaUrl,
'method': 'post',
'data': data,
'onSuccess': this.successAddMedia.bind(this),
'onFailure': this.failure.bind(this)
}).send();
},
/**
* called after 'addMedia()' success
* calls 'loadList'
*
* @param responseJSON
*/
successAddMedia: function(responseJSON)
{
ION.notification(responseJSON.message_type, responseJSON.message);
this.loadList();
},
/**
* Loads a media list through XHR regarding its type
* called after a media list loading through 'loadList'
*
* @param options Object {
* parent:
* id_parent:
* id_extend:
* lang:
* }
*/
loadList: function()
{
var self = this;
new Request.JSON(
{
url : this.adminUrl + 'media/get_extend_media_list',
data: this.getOptions(),
'method': 'post',
'onFailure': this.failure.bind(this),
'onComplete': function(responseJSON)
{
self.completeLoadList(responseJSON);
}
}).send();
},
/**
* Initiliazes the media list regarding to its type
* called after a media list loading through 'loadList'
*
* @param responseJSON JSON response object.
* responseJSON.type : media type. Can be 'picture', 'video', 'music', 'file'
*/
completeLoadList: function(responseJSON)
{
var self = this;
// Hides the spinner
MUI.hideSpinner();
this.mediaContainer.empty();
if (responseJSON && responseJSON.content)
{
// Feed the mediaContainer with responseJSON content
this.mediaContainer.set('html', responseJSON.content);
// Init the sortable
var sortableMedia = new Sortables(
this.mediaContainer,
{
revert: true,
handle: '.drag',
clone: true,
opacity: 0.5,
onComplete: function()
{
var serialized = this.serialize(0, function(element)
{
if (element.getProperty('id'))
return element.getProperty('data-id');
});
self.sortItemList(responseJSON.type, serialized);
}
}
);
// Store the first ordering after picture list load
this.mediaContainer.store('sortableOrder', sortableMedia.serialize(0, function(element)
{
return element.getProperty('data-id');
}));
// Events on items
var medias = this.mediaContainer.getElements('div.drag');
medias.each(function(media)
{
// Set it to init the values
var parent = self.parent,
id_parent = self.id_parent,
id_extend = self.id_extend,
lang = self.lang
;
// Unlink
var unlink = media.getElement('a.unlink');
if (unlink)
{
unlink.addEvent('click', function()
{
self.detachMedia(this.getProperty('data-id'), parent, id_parent, id_extend, lang);
});
}
// Edit
var edit = media.getElement('a.edit');
if (edit)
{
edit.addEvent('click', function()
{
var id = this.getProperty('data-id');
ION.formWindow(
'media' + id,
'mediaForm' + id,
this.getProperty('data-title'),
ION.adminUrl + 'media/edit/' + id,
{width:520,height:430,resize:false}
);
});
}
// Refresh thumb
var refresh = media.getElement('a.refresh');
if (refresh)
{
refresh.addEvent('click', function()
{
self.initThumbs(this.getProperty('data-id'));
});
}
});
}
// Update the tab'info (number of media)
// if (tab) ION.updateTabNumber(tab, this.container);
},
/**
* Items list ordering
* called on items sorting complete
* calls the XHR server ordering method
*
* @param type Media type. Can be 'picture', 'video', 'music', 'file'
* @param serialized new order as a string. coma separated
*/
sortItemList: function(type, serialized)
{
var sortableOrder = this.mediaContainer.retrieve('sortableOrder');
// Remove "undefined" from serialized, which can comes from the clone.
var serie = new Array();
serialized.each(function(item)
{
if (typeOf(item) != 'null') serie.push(item);
});
// If current <> new ordering : Save it !
if (sortableOrder.toString() != serie.toString() )
{
// Store the new ordering
this.mediaContainer.store('sortableOrder', serie);
serie = serie.join(',');
var data = {
parent: this.parent,
id_parent: this.id_parent,
id_extend: this.id_extend,
lang: this.lang,
order: serie
};
// Save the new ordering
new Request.JSON(
{
url: this.adminUrl + 'media/save_extend_ordering',
method: 'post',
data: data,
onSuccess: function(responseJSON)
{
MUI.hideSpinner();
ION.notification(responseJSON.message_type, responseJSON.message);
}
}).post();
}
},
/*
* Keep for future release
*
getTab: function()
{
var selector = '.' + this.tab + '[data-id=' + this.id_extend + ']';
if(this.lang != null)
selector = selector + '[data-lang=' + this.lang + ']';
var tab = $$(selector);
if (tab.length > 0)
return tab[0];
return null;
},
*/
/**
* On request fail
*
* @param xhr
*/
failure: function(xhr)
{
ION.notification('error', xhr.responseText );
// Hide the spinner
MUI.hideSpinner();
},
/**
* Unlink one media from his parent
*
* @param type Media type
* @param id Media ID
*/
detachMedia: function(id_media, parent, id_parent, id_extend, lang)
{
MUI.showSpinner();
var data = {
id_media: id_media,
parent: parent,
id_parent: id_parent,
id_extend: id_extend,
lang: lang
};
new Request.JSON(
{
url: this.adminUrl + 'media/detach_extend_media',
method: 'post',
data: data,
onSuccess: function()
{
this.loadList();
}.bind(this),
onFailure: this.failure.bind(this)
}).send();
},
/**
* Init thumbnails for one picture
* to be called on pictures list
*
* @param id_picture
*/
initThumbs:function(id_picture)
{
MUI.showSpinner();
new Request.JSON(
{
url: this.adminUrl + 'media/init_thumbs/' + id_picture,
method: 'post',
onSuccess: function(responseJSON)
{
ION.notification(responseJSON.message_type, responseJSON.message );
if (responseJSON.message_type == 'success')
{
this.loadList();
}
}.bind(this)
}).send();
},
/**
* Opens fileManager
*
*/
open:function()
{
// No parent
if ( ! this.id_parent || this.id_parent == '')
{
ION.notification('error', Lang.get('ionize_message_please_save_first'));
}
else
{
// Exit here : no instance needed
if (this.getExistingInstance()) return;
// Create one instance (FM + Window)
this.createInstance();
this.setFilemanagerTargetInfo();
}
},
createInstance: function()
{
var self = this;
// Correct windows levels : Get the current highest level.
MUI.Windows._getWithHighestZIndex();
var zidx = (MUI.Windows.highestZindex).toInt();
MUI.Windows.indexLevel = zidx + 100;
this.filemanager = new Filemanager(
{
url: this.adminUrl + 'media/filemanager',
assetsUrl: this.themeUrl + 'javascript/filemanager/assets',
standalone: false,
createFolders: true,
destroy: ION.Authority.can('delete', 'admin/filemanager'),
rename: ION.Authority.can('rename', 'admin/filemanager'),
upload: ION.Authority.can('upload', 'admin/filemanager'),
move_or_copy: ION.Authority.can('move', 'admin/filemanager'),
resizeOnUpload: self.options.resizeOnUpload,
uploadAutostart: self.options.uploadAutostart,
uploadMode: self.options.uploadMode,
language: Lang.current,
selectable: true,
hideOnSelect: false,
'onComplete': self.addMedia.bind(self),
parentContainer: 'filemanagerWindow_contentWrapper',
mkServerRequestURL: function(fm_obj, request_code, post_data)
{
return {
url: fm_obj.options.url + '/' + request_code,
data: post_data
};
}
});
// MUI Window creation
var winOptions = ION.getFilemanagerWindowOptions();
winOptions.content = this.filemanager.show();
winOptions.onResizeOnDrag = function(){
this.filemanager.fitSizes();
};
self.fileManagerWindow = new MUI.Window(winOptions);
self.fileManagerWindow.filemanager = this.filemanager;
}
});
|
jkzleond/school
|
themes/admin/javascript/ionize/ionize_extendmediamanager.js
|
JavaScript
|
gpl-2.0
| 12,485
|
<?php
require_once('lib_books.php');
require_once('constants.php');
function get_books_with_syntax() {
$res = sql_query("SELECT book_id, status, user_id FROM anaphora_syntax_annotators");
$syntax = array();
while ($r = sql_fetch_array($res)) {
if (!isset($syntax[$r['book_id']]))
$syntax[$r['book_id']] = array(1 => 0, 2 => 0);
if ($r['user_id'] == $_SESSION['user_id'])
$syntax[$r['book_id']]['self'] = $r['status'];
$syntax[$r['book_id']][$r['status']] += 1;
}
$res = sql_query("
SELECT book_id, book_name, old_syntax_moder_id, COUNT(tf_id) AS token_count, syntax_on
FROM books
JOIN paragraphs
USING (book_id)
JOIN sentences
USING (par_id)
JOIN tokens
USING (sent_id)
WHERE syntax_on > 0
GROUP BY book_id
ORDER BY book_id
");
$out = array(
'books' => array(),
'token_count' => 0
);
while ($r = sql_fetch_array($res)) {
$out['books'][] = array(
'id' => $r['book_id'],
'name' => $r['book_name'],
'first_sentence_id' => get_book_first_sentence_id($r['book_id']),
'syntax_moder_id' => $r['old_syntax_moder_id'],
'status' => array(
'syntax' => array(
'self' => isset($syntax[$r['book_id']]['self']) ? $syntax[$r['book_id']]['self'] : 0,
'total' => isset($syntax[$r['book_id']]) ? $syntax[$r['book_id']] : array(1 => 0, 2 => 0),
'moderated' => $r['syntax_on'] > 1 ? true : false
),
'anaphor' => 0
)
);
$out['token_count'] += $r['token_count'];
}
return $out;
}
function get_syntax_group_types() {
$res = sql_query("SELECT type_id, type_name FROM anaphora_syntax_group_types ORDER BY type_name");
$out = array();
while ($r = sql_fetch_array($res))
$out[$r['type_id']] = $r['type_name'];
return $out;
}
function group_type_exists($type) {
if ($type == 0)
return true;
$res = sql_pe("SELECT type_id FROM anaphora_syntax_group_types WHERE type_id=? LIMIT 1", array($type));
return sizeof($res) > 0;
}
function get_group_text($group_id) {
$texts = array();
$res = sql_query("SELECT * FROM anaphora_syntax_groups_simple WHERE group_id = $group_id");
$r = sql_fetchall($res);
if (!empty($r)) {
$token_ids = array_reduce($r, function($ids, $el) {
if ($ids) return $ids.','.$el['token_id'];
return $el['token_id'];
});
$tokens_res = sql_query("SELECT tf_text FROM tokens WHERE tf_id IN ($token_ids)");
while ($r = sql_fetch_array($tokens_res)) {
$texts[] = $r['tf_text'];
}
return join(" ", $texts);
}
$res = sql_query("SELECT * FROM anaphora_syntax_groups_complex WHERE parent_gid = $group_id");
$r = sql_fetch_array($res);
if (!empty($r)) {
$token_ids = get_group_tokens($group_id);
$token_ids = join(',', $token_ids);
$tokens_res = sql_query("SELECT tf_text FROM tokens WHERE tf_id IN ($token_ids)");
while ($r = sql_fetch_array($tokens_res)) {
$texts[] = $r['tf_text'];
}
return join(" ", $texts);
}
}
function get_group_tokens($group_id) {
$tokens = array();
$simple_groups = get_simple_groups_by_complex($group_id);
$gr_ids = join(',', $simple_groups);
$tokens_res = sql_query($gr_ids ?
"SELECT token_id FROM anaphora_syntax_groups_simple WHERE group_id IN ($gr_ids)" :
"SELECT token_id FROM anaphora_syntax_groups_simple WHERE group_id = $group_id");
while ($r = sql_fetch_array($tokens_res)) {
$tokens[] = (int)$r['token_id'];
}
return $tokens;
}
function get_simple_groups_by_complex($group_id) {
$simple = array();
$frontier = array();
$get_children = "SELECT child_gid FROM anaphora_syntax_groups_complex WHERE parent_gid = ";
$res = sql_query($get_children . $group_id);
$frontier = array_map(function($row) {
return $row['child_gid'];
}, sql_fetchall($res));
while (!empty($frontier)) {
$gid = array_pop($frontier);
$res = sql_query($get_children . $gid);
$r = sql_fetchall($res);
if ($r) {
foreach ($r as $row) {
$frontier[] = $row['child_gid'];
}
} else {
$simple[] = $gid;
}
}
return $simple;
}
function get_simple_groups_by_sentence($sent_id, $user_id) {
$out = array();
$res = sql_pe("
SELECT group_id, group_type, token_id, tf_text, head_id, tf.pos
FROM anaphora_syntax_groups_simple sg
JOIN anaphora_syntax_groups g USING (group_id)
JOIN tokens tf ON (sg.token_id = tf.tf_id)
WHERE sent_id = ?
AND user_id = ?
ORDER BY group_id, tf.pos
", array($sent_id, $user_id));
$last_r = NULL;
$token_ids = array();
$token_texts = array();
$token_pos = array();
foreach ($res as $r) {
if ($last_r && $r['group_id'] != $last_r['group_id']) {
$out[] = array(
'id' => $last_r['group_id'],
'type' => $last_r['group_type'],
'tokens' => $token_ids,
'token_texts' => $token_texts,
'head_id' => $last_r['head_id'],
'text' => join(' ', array_values($token_texts)),
'start_pos' => min($token_pos),
'end_pos' => max($token_pos)
);
$token_ids = $token_texts = $token_pos = array();
}
$token_ids[] = $r['token_id'];
$token_pos[] = $r['pos'];
$token_texts[$r['token_id']] = $r['tf_text'];
$last_r = $r;
}
if (sizeof($token_ids) > 0) {
$out[] = array(
'id' => $last_r['group_id'],
'type' => $last_r['group_type'],
'tokens' => $token_ids,
'token_texts' => $token_texts,
'head_id' => $last_r['head_id'],
'text' => join(' ', array_values($token_texts)),
'start_pos' => min($token_pos)
);
}
return $out;
}
function get_complex_groups_by_simple($simple_groups, $user_id) {
$groups = array();
$possible_children = array(0);
$groups_pos = array();
$groups_text = array();
foreach ($simple_groups as $g) {
$possible_children[] = $g['id'];
$groups_pos[$g['id']] = $g['start_pos'];
$groups_text[$g['id']] = $g['text'];
}
$new_added = true;
while ($new_added) {
$new_added = false;
$res = sql_query("
SELECT parent_gid, child_gid, group_type, head_id
FROM anaphora_syntax_groups g
JOIN anaphora_syntax_groups_complex gc
ON (g.group_id = gc.parent_gid)
WHERE user_id = $user_id
AND child_gid IN (".join(',', $possible_children).")
ORDER BY parent_gid
");
while ($r = sql_fetch_array($res)) {
if (isset($groups[$r['parent_gid']])) {
$groups[$r['parent_gid']]['children'] = array_unique(array_merge($groups[$r['parent_gid']]['children'], array($r['child_gid'])));
$groups[$r['parent_gid']]['start_pos'] = min($groups[$r['parent_gid']]['start_pos'], $groups_pos[$r['child_gid']]);
}
else {
// new group
$new_added = true;
// make sure that all the children are already added before their parent is added
$res1 = sql_query("
SELECT child_gid
FROM anaphora_syntax_groups_complex
WHERE parent_gid = ".$r['parent_gid']."
AND child_gid NOT IN (".join(',', $possible_children).")
");
if (sql_num_rows($res1) == 0) {
$possible_children[] = $r['parent_gid'];
$groups[$r['parent_gid']] = array(
'type' => $r['group_type'],
'children' => array($r['child_gid']),
'head_id' => $r['head_id'],
'start_pos' => $groups_pos[$r['child_gid']]
);
}
}
$groups_pos[$r['parent_gid']] = $groups[$r['parent_gid']]['start_pos'];
}
}
$out = array();
foreach ($groups as $id => $g) {
$atext = array();
foreach ($g['children'] as $ch)
$atext[$ch] = array($groups_pos[$ch], $groups_text[$ch]);
uasort($atext, function($a, $b) {
if ($a[0] < $b[0])
return -1;
if ($a[0] > $b[0])
return 1;
return 0;
});
$groups_text[$id] = join(' ', array_map(function($ar) {return $ar[1];}, $atext));
$out[] = array_merge($g, array(
'id' => $id,
'text' => $groups_text[$id],
'children_texts' => $atext,
));
}
return $out;
}
function get_groups_by_sentence($sent_id, $user_id) {
$simple = get_simple_groups_by_sentence($sent_id, $user_id);
return array(
'simple' => $simple,
'complex' => get_complex_groups_by_simple($simple, $user_id)
);
}
function get_moderated_groups_by_token($token_id, $in_head = FALSE) {
$res = sql_query("
SELECT sent_id, tf_text
FROM tokens
WHERE tf_id = $token_id
");
$r = sql_fetch_array($res);
$sent_id = $r['sent_id'];
$token = $r['tf_text'];
$groups = get_moderated_groups_by_sentence($sent_id);
$simple_groups = array();
$complex_groups = array();
foreach ($groups['simple'] as $k => $group) {
if (in_array($token_id, $group['tokens'])) {
$simple_groups[] = $group;
}
}
foreach ($groups['complex'] as $k => $group) {
// костыль
$text = ' '. $group['text']. ' ';
$t = ' '. $token. ' ';
if (mb_strpos($text, $t) !== FALSE) {
$complex_groups[] = $group;
}
}
return array(
'simple' => $simple_groups,
'complex' => $complex_groups
);
}
function get_all_groups_by_sentence($sent_id) {
$res = sql_query("
SELECT DISTINCT user_id
FROM anaphora_syntax_groups_simple sgs
JOIN anaphora_syntax_groups sg USING (group_id)
JOIN tokens tf ON (sgs.token_id = tf.tf_id)
WHERE sent_id = $sent_id
");
$out = array();
while ($r = sql_fetch_array($res)) {
$out[$r['user_id']] = get_groups_by_sentence($sent_id, $r['user_id']);
}
return $out;
}
function get_pronouns_by_sentence($sent_id) {
$token_ids = array();
$res = sql_query("
SELECT tf_id
FROM tokens
LEFT JOIN tf_revisions
USING (tf_id)
WHERE
sent_id=$sent_id
AND is_last = 1
AND rev_text LIKE '%<g v=\"Anph\"/>%'
");
while ($r = sql_fetch_array($res)) {
array_push($token_ids, $r['tf_id']);
}
return $token_ids;
}
function add_group($parts, $type) {
check_permission(PERM_SYNTAX);
$is_complex = false;
$ids = array();
foreach ($parts as $i => $el) {
if ($el['is_group'])
$is_complex = true;
$parts[$i]['id'] = (int)$el['id'];
$ids[] = (int)$el['id'];
}
// TODO check complex groups too
if (!$is_complex && !check_for_same_sentence($ids))
throw new Exception();
sql_begin();
$revset_id = current_revset();
if (!group_type_exists($type))
throw new Exception();
sql_query("INSERT INTO anaphora_syntax_groups VALUES (NULL, $type, $revset_id, 0, ".$_SESSION['user_id'].", '')");
$group_id = sql_insert_id();
foreach ($parts as $el) {
$token_id = $el['id'];
if ($is_complex && !$el['is_group'])
$token_id = get_dummy_group_for_token($token_id, true);
sql_query("INSERT INTO anaphora_syntax_groups_".($is_complex ? "complex" : "simple")." VALUES ($group_id, $token_id)");
}
sql_commit();
return $group_id;
}
function check_for_same_sentence($token_ids) {
$res = sql_query("
SELECT DISTINCT sent_id
FROM tokens
WHERE tf_id IN (".join(',', $token_ids).")
");
return (sql_num_rows($res) == 1);
}
function add_dummy_group($token_id) {
sql_begin();
$revset_id = current_revset();
$gid = add_group(array(array('id' => $token_id, 'is_group' => false)), 16);
sql_commit();
return $gid;
}
function get_dummy_group_for_token($token_id, $create_if_absent=true) {
$res = sql_query("SELECT group_id FROM anaphora_syntax_groups_simple WHERE group_type=16 AND token_id=$token_id");
if (sql_num_rows($res) > 1)
throw new Exception();
if (sql_num_rows($res) == 1) {
$r = sql_fetch_array($res);
return $r['group_id'];
}
// therefore there is none
if ($create_if_absent)
return add_dummy_group($token_id);
else
throw new Exception();
}
function delete_group($group_id) {
check_permission(PERM_SYNTAX);
if (!is_group_owner($group_id, $_SESSION['user_id']))
throw new Exception();
// forbid deletion if group is part of another group
$res = sql_pe("SELECT * FROM anaphora_syntax_groups_complex WHERE child_gid=? LIMIT 1", array($group_id));
if (sizeof($res) > 0)
throw new Exception();
sql_begin();
sql_pe("DELETE FROM anaphora_syntax_groups_simple WHERE group_id=?", array($group_id));
sql_pe("DELETE FROM anaphora_syntax_groups_complex WHERE parent_gid=?", array($group_id));
sql_pe("DELETE FROM anaphora_syntax_groups WHERE group_id=? LIMIT 1", array($group_id));
sql_commit();
}
function set_group_head($group_id, $head_id) {
check_permission(PERM_SYNTAX);
// assume that the head of a complex group is also a group
if (!is_group_owner($group_id, $_SESSION['user_id']))
throw new Exception();
// check if head belongs to the group
$res = sql_pe(
"SELECT * FROM anaphora_syntax_groups_simple WHERE group_id=? AND token_id=? LIMIT 1",
array($group_id, $head_id)
);
if (!sizeof($res)) {
// perhaps the group is complex then
$res = sql_pe(
"SELECT * FROM anaphora_syntax_groups_complex WHERE parent_gid=? AND child_gid=?",
array($group_id, $head_id)
);
if (!sizeof($res))
throw new Exception();
}
// set the head
sql_pe(
"UPDATE anaphora_syntax_groups SET head_id=? WHERE group_id=? LIMIT 1",
array($head_id, $group_id)
);
}
function set_group_type($group_id, $type_id) {
check_permission(PERM_SYNTAX);
if (!is_group_owner($group_id, $_SESSION['user_id'])) {
throw new Exception();
}
if (!group_type_exists($type_id)) {
throw new UnexpectedValueException();
}
sql_pe(
"UPDATE anaphora_syntax_groups SET group_type=? WHERE group_id=? LIMIT 1",
array($type_id, $group_id)
);
}
function is_group_owner($group_id, $user_id) {
$res = sql_pe("SELECT * FROM anaphora_syntax_groups WHERE group_id=? AND user_id=? LIMIT 1", array($group_id, $user_id));
return sizeof($res) > 0;
}
function set_syntax_annot_status($book_id, $status) {
if (!$book_id || !in_array($status, array(0, 1, 2)))
throw new UnexpectedValueException();
check_permission(PERM_SYNTAX);
$user_id = $_SESSION['user_id'];
sql_begin();
sql_pe("DELETE FROM anaphora_syntax_annotators WHERE user_id=? AND book_id=?", array($user_id, $book_id));
if ($status > 0)
sql_pe("INSERT INTO anaphora_syntax_annotators VALUES (?, ?, ?)", array($user_id, $book_id, $status));
sql_commit();
}
// SYNTAX MODERATION
function become_syntax_moderator($book_id) {
if (!$book_id)
throw new UnexpectedValueException();
check_permission(PERM_SYNTAX);
$res = sql_pe("
SELECT old_syntax_moder_id AS mid
FROM books
WHERE book_id=?
LIMIT 1
", array($book_id));
if ($res[0]['mid'] > 0)
throw new Exception("Место модератора занято");
sql_pe("
UPDATE books
SET old_syntax_moder_id = ?
WHERE book_id = ?
LIMIT 1
", array($_SESSION['user_id'], $book_id));
}
function finish_syntax_moderation($book_id) {
if (!$book_id)
throw new UnexpectedValueException();
check_permission(PERM_SYNTAX);
$res = sql_pe("
SELECT old_syntax_moder_id AS mid
FROM books
WHERE book_id = ?
LIMIT 1
", array($book_id));
if ($res[0]['mid'] != $_SESSION['user_id'])
throw new Exception("Вы не модератор");
sql_pe("
UPDATE books
SET syntax_on = 2
WHERE book_id = ?
LIMIT 1
", array($book_id));
}
function copy_group($source_group_id, $dest_user) {
check_permission(PERM_SYNTAX);
if (!$source_group_id || !$dest_user)
throw new UnexpectedValueException();
sql_begin();
$revset_id = current_revset();
sql_query("
INSERT INTO anaphora_syntax_groups
(
SELECT NULL, group_type, $revset_id, head_id, $dest_user, marks
FROM anaphora_syntax_groups
WHERE group_id = $source_group_id
LIMIT 1
)
");
$copy_id = sql_insert_id();
// save head
$r = sql_fetch_array(sql_query("SELECT head_id FROM anaphora_syntax_groups WHERE group_id = $copy_id LIMIT 1"));
$head_id = $r['head_id'];
// simple group
copy_simple_group($source_group_id, $copy_id);
// complex group (recursive)
$res = sql_query("
SELECT child_gid
FROM anaphora_syntax_groups_complex
WHERE parent_gid = $source_group_id
");
while ($r = sql_fetch_array($res)) {
$gid = copy_group($r['child_gid'], $dest_user);
sql_query("INSERT INTO anaphora_syntax_groups_complex VALUES ($copy_id, $gid)");
if ($r['child_gid'] == $head_id)
$head_id = $gid;
}
// update head
sql_query("UPDATE anaphora_syntax_groups SET head_id=$head_id WHERE group_id=$copy_id LIMIT 1");
sql_commit();
return $copy_id;
}
function copy_simple_group($source_group_id, $dest_group_id) {
sql_query("
INSERT INTO anaphora_syntax_groups_simple
(
SELECT $dest_group_id, token_id
FROM anaphora_syntax_groups_simple
WHERE group_id = $source_group_id
)
");
}
function get_sentence_moderator($sent_id) {
$res = sql_pe("
SELECT old_syntax_moder_id AS mid
FROM sentences
JOIN paragraphs USING (par_id)
JOIN books USING (book_id)
WHERE sent_id=?
LIMIT 1
", array($sent_id));
return $res[0]['mid'];
}
function get_moderated_groups_by_sentence($sent_id) {
return get_groups_by_sentence($sent_id, get_sentence_moderator($sent_id));
}
// ANAPHORA
function add_anaphora($anaphor_id, $antecedent_id) {
check_permission(PERM_SYNTAX);
// check that anaphor exists and has Anph grammeme
$res = sql_pe("SELECT rev_text FROM tf_revisions WHERE tf_id=? AND is_last=1 LIMIT 1", array($anaphor_id));
if (sizeof($res) == 0)
throw new Exception();
$r = $res[0];
if (strpos($r['rev_text'], '<g v="Anph"/>') === false)
throw new Exception();
// check that antecedent exists
$res = sql_pe("SELECT * FROM anaphora_syntax_groups WHERE group_id=? LIMIT 1", array($antecedent_id));
if (sizeof($res) == 0)
throw new Exception();
// TODO check that the group belongs to the moderator
// TODO check that both token and group are within one book
sql_begin();
$revset_id = current_revset();
sql_pe("INSERT INTO anaphora VALUES (NULL, ?, ?, ?, ?)", array($anaphor_id, $antecedent_id, $revset_id, $_SESSION['user_id']));
$id = sql_insert_id();
sql_commit();
return $id;
}
function delete_anaphora($ref_id) {
check_permission(PERM_SYNTAX);
sql_pe("DELETE FROM anaphora WHERE ref_id=? LIMIT 1", array($ref_id));
}
function get_anaphora_by_book($book_id) {
$res = sql_pe("
SELECT token_id, group_id, ref_id, tf.tf_text as token
FROM anaphora a
JOIN tokens tf ON (a.token_id = tf.tf_id)
JOIN sentences USING (sent_id)
JOIN paragraphs USING (par_id)
WHERE book_id = ?
", array($book_id));
$out = array();
foreach ($res as $r) {
$out[$r['ref_id']] = $r;
$out[$r['ref_id']]['group_text'] = get_group_text($r['group_id']);
$out[$r['ref_id']]['group_tokens'] = get_group_tokens($r['group_id']);
}
return $out;
}
|
OpenCorpora/opencorpora
|
lib/lib_anaphora_syntax.php
|
PHP
|
gpl-2.0
| 20,904
|
<?php
/**
* Social Login
*
* @version 1.0
* @author Arkadiy, Joomline
* @copyright © 2012. All rights reserved.
* @license GNU/GPL v.3 or later.
*/
// No direct access
defined('_JEXEC') or die;
class plgSlogin_authVkontakte extends JPlugin
{
private $provider = 'vkontakte';
public function onSloginAuth()
{
$redirect = JURI::base().'?option=com_slogin&task=check&plugin=vkontakte';
$scope = 'offline';
if($this->params->get('repost_comments', 0))
{
$scope .= ',wall';
//$scope .= ',groups';
}
$params = array(
'client_id=' . $this->params->get('id'),
'response_type=code',
'redirect_uri=' . urlencode($redirect),
'scope=' . $scope
);
$params = implode('&', $params);
$url = 'http://oauth.vk.com/authorize?' . $params;
return $url;
}
public function onSloginCheck()
{
require_once JPATH_BASE.'/components/com_slogin/controller.php';
$controller = new SLoginController();
$input = JFactory::getApplication()->input;
$code = $input->get('code', null, 'STRING');
$returnRequest = new SloginRequest();
if ($code) {
$data = $this->getToken($code);
if (empty($data->access_token) || $data->error) {
$error = (!empty($data->error_description)) ? $data->error_description : $data->info;
die($error);
}
// Получение данных о пользователе поле fields
// Нужное можно указать!
// uid, first_name, last_name, nickname, screen_name, sex, bdate (birthdate), city, country,
// timezone, photo, photo_medium, photo_big, has_mobile, rate, contacts, education, online, counters.
// По умолчанию возвращает uid, first_name и last_name
// name_case - дополнительный параметр
// падеж для склонения имени и фамилии пользователя.
// Возможные значения:
// именительный – nom,
// родительный – gen,
// дательный – dat,
// винительный – acc,
// творительный – ins,
// предложный – abl.
// По умолчанию nom.
$ResponseUrl = 'https://api.vk.com/method/getProfiles?uid='.$data->user_id.'&access_token='.$data->access_token.'&fields=nickname,contacts,photo_big';
$request = json_decode($controller->open_http($ResponseUrl))->response[0];
if(empty($request)){
echo 'Error - empty user data';
exit;
}
else if(!empty($request->error)){
echo 'Error - '. $request->error;
exit;
}
//сохраняем данные токена в сессию
//expire - время устаревания скрипта, метка времени Unix
JFactory::getApplication()->setUserState('slogin.token', array(
'provider' => $this->provider,
'token' => $data->access_token,
'expire' => $data->expires_in,
'repost_comments' => $this->params->get('repost_comments', 0),
'slogin_user' => $data->user_id,
'app_id' => $this->params->get('id', 0),
'app_secret' => $this->params->get('password', 0)
));
$returnRequest->first_name = $request->first_name;
$returnRequest->last_name = $request->last_name;
$returnRequest->id = $request->uid;
$returnRequest->real_name = $request->first_name.' '.$request->last_name;
$returnRequest->display_name = $request->nickname;
$returnRequest->all_request = $request;
return $returnRequest;
}
else{
echo 'Error - empty code';
exit;
}
}
public function getToken($code)
{
require_once JPATH_BASE.'/components/com_slogin/controller.php';
$controller = new SLoginController();
$redirect = urlencode(JURI::base().'?option=com_slogin&task=check&plugin=vkontakte');
//подключение к API
$params = array(
'client_id=' . $this->params->get('id'),
'client_secret=' . $this->params->get('password'),
'code=' . $code,
'redirect_uri=' . $redirect
);
$params = implode('&', $params);
$url = 'https://oauth.vk.com/access_token?' . $params;
$data = json_decode($controller->open_http($url));
return $data;
}
public function onCreateSloginLink(&$links, $add = '')
{
$i = count($links);
$links[$i]['link'] = 'index.php?option=com_slogin&task=auth&plugin=vkontakte' . $add;
$links[$i]['class'] = 'vkontakteslogin';
$links[$i]['plugin_name'] = $this->provider;
}
}
|
raimov/broneering
|
plugins/slogin_auth/vkontakte/vkontakte.php
|
PHP
|
gpl-2.0
| 5,087
|
#include <linux/version.h>
#include <linux/dvb/version.h>
#include <lib/dvb/dvb.h>
#include <lib/dvb/frontendparms.h>
#include <lib/base/cfile.h>
#include <lib/base/eerror.h>
#include <lib/base/nconfig.h> // access to python config
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sstream>
#include "absdiff.h"
#ifndef I2C_SLAVE_FORCE
#define I2C_SLAVE_FORCE 0x0706
#endif
#define eDebugNoSimulate(x...) \
do { \
if (!m_simulate) \
eDebug(x); \
} while(0)
#if 0
else \
{ \
eDebugNoNewLineStart("SIMULATE:"); \
eDebugNoNewLine(x); \
eDebugNoNewLine("\n"); \
}
#endif
#define eDebugNoSimulateNoNewLineStart(x...) \
do { \
if (!m_simulate) \
eDebugNoNewLineStart(x); \
} while(0)
#if 0
else \
{ \
eDebugNoNewLineStart("SIMULATE:"); \
eDebugNoNewLine(x); \
}
#endif
void eDVBDiseqcCommand::setCommandString(const char *str)
{
if (!str)
return;
len=0;
int slen = strlen(str);
if (slen % 2)
{
eDebug("[eDVBDiseqcCommand] invalid diseqc command string length (not 2 byte aligned)");
return;
}
if (slen > MAX_DISEQC_LENGTH*2)
{
eDebug("[eDVBDiseqcCommand] invalid diseqc command string length (string is to long)");
return;
}
unsigned char val=0;
for (int i=0; i < slen; ++i)
{
unsigned char c = str[i];
switch(c)
{
case '0' ... '9': c-=48; break;
case 'a' ... 'f': c-=87; break;
case 'A' ... 'F': c-=55; break;
default:
eDebug("[eDVBDiseqcCommand] invalid character in hex string..ignore complete diseqc command !");
return;
}
if ( i % 2 )
{
val |= c;
data[i/2] = val;
}
else
val = c << 4;
}
len = slen/2;
}
void eDVBFrontendParametersSatellite::set(const SatelliteDeliverySystemDescriptor &descriptor)
{
frequency = descriptor.getFrequency() * 10;
symbol_rate = descriptor.getSymbolRate() * 100;
polarisation = descriptor.getPolarization();
fec = descriptor.getFecInner();
if (fec != FEC_None && fec > FEC_9_10 )
fec = FEC_Auto;
inversion = eDVBFrontendParametersSatellite::Inversion_Unknown;
pilot = eDVBFrontendParametersSatellite::Pilot_Unknown;
orbital_position = ((descriptor.getOrbitalPosition() >> 12) & 0xF) * 1000;
orbital_position += ((descriptor.getOrbitalPosition() >> 8) & 0xF) * 100;
orbital_position += ((descriptor.getOrbitalPosition() >> 4) & 0xF) * 10;
orbital_position += ((descriptor.getOrbitalPosition()) & 0xF);
if (orbital_position && (!descriptor.getWestEastFlag()))
orbital_position = 3600 - orbital_position;
system = descriptor.getModulationSystem();
modulation = descriptor.getModulation();
if (system == System_DVB_S && modulation != Modulation_QPSK)
{
eDebug("[eDVBFrontendParametersSatellite] satellite_delivery_descriptor invalid modulation type.. force QPSK");
modulation = Modulation_QPSK;
}
rolloff = descriptor.getRollOff();
if (system == System_DVB_S2)
{
eDebug("[eDVBFrontendParametersSatellite] SAT DVB-S2 freq %d, %s, pos %d, sr %d, fec %d, modulation %d, rolloff %d",
frequency,
polarisation ? "hor" : "vert",
orbital_position,
symbol_rate, fec,
modulation,
rolloff);
}
else
{
eDebug("[eDVBFrontendParametersSatellite] SAT DVB-S freq %d, %s, pos %d, sr %d, fec %d",
frequency,
polarisation ? "hor" : "vert",
orbital_position,
symbol_rate, fec);
}
}
void eDVBFrontendParametersCable::set(const CableDeliverySystemDescriptor &descriptor)
{
frequency = descriptor.getFrequency() / 10;
symbol_rate = descriptor.getSymbolRate() * 100;
switch (descriptor.getFecInner())
{
default:
case 0: fec_inner = FEC_Auto; break;
case 1: fec_inner = FEC_1_2; break;
case 2: fec_inner = FEC_2_3; break;
case 3: fec_inner = FEC_3_4; break;
case 4: fec_inner = FEC_5_6; break;
case 5: fec_inner = FEC_7_8; break;
case 6: fec_inner = FEC_8_9; break;
case 7: fec_inner = FEC_3_5; break;
case 8: fec_inner = FEC_4_5; break;
case 9: fec_inner = FEC_9_10; break;
}
modulation = descriptor.getModulation();
if (modulation > Modulation_QAM256)
modulation = Modulation_Auto;
inversion = Inversion_Unknown;
system = System_DVB_C_ANNEX_A;
eDebug("[eDVBFrontendParametersCable] Cable freq %d, mod %d, sr %d, fec %d",
frequency,
modulation, symbol_rate, fec_inner);
}
void eDVBFrontendParametersTerrestrial::set(const TerrestrialDeliverySystemDescriptor &descriptor)
{
frequency = descriptor.getCentreFrequency() * 10;
switch (descriptor.getBandwidth())
{
case 0: bandwidth = 8000000; break;
case 1: bandwidth = 7000000; break;
case 2: bandwidth = 6000000; break;
case 3: bandwidth = 5000000; break;
default: bandwidth = 0; break;
}
code_rate_HP = descriptor.getCodeRateHpStream();
if (code_rate_HP > FEC_Auto)
code_rate_HP = FEC_Auto;
code_rate_LP = descriptor.getCodeRateLpStream();
if (code_rate_LP > FEC_Auto)
code_rate_LP = FEC_Auto;
switch (descriptor.getTransmissionMode())
{
case 0: transmission_mode = TransmissionMode_2k; break;
case 1: transmission_mode = TransmissionMode_8k; break;
case 2: transmission_mode = TransmissionMode_4k; break;
default: transmission_mode = TransmissionMode_Auto; break;
}
guard_interval = descriptor.getGuardInterval();
if (guard_interval > GuardInterval_1_4)
guard_interval = GuardInterval_Auto;
hierarchy = descriptor.getHierarchyInformation();
if (hierarchy > Hierarchy_Auto)
hierarchy = Hierarchy_Auto;
modulation = descriptor.getConstellation();
if (modulation > Modulation_Auto)
modulation = Modulation_Auto;
inversion = Inversion_Unknown;
system = System_DVB_T;
plp_id = 0;
eDebug("[eDVBFrontendParametersTerrestrial] Terr freq %d, bw %d, cr_hp %d, cr_lp %d, tm_mode %d, guard %d, hierarchy %d, const %d",
frequency, bandwidth, code_rate_HP, code_rate_LP, transmission_mode,
guard_interval, hierarchy, modulation);
}
void eDVBFrontendParametersTerrestrial::set(const T2DeliverySystemDescriptor &descriptor)
{
switch (descriptor.getBandwidth())
{
case 0: bandwidth = 8000000; break;
case 1: bandwidth = 7000000; break;
case 2: bandwidth = 6000000; break;
case 3: bandwidth = 5000000; break;
case 4: bandwidth = 1712000; break;
case 5: bandwidth = 10000000; break;
default: bandwidth = 0; break;
}
switch (descriptor.getTransmissionMode())
{
case 0: transmission_mode = TransmissionMode_2k; break;
case 1: transmission_mode = TransmissionMode_8k; break;
case 2: transmission_mode = TransmissionMode_4k; break;
case 3: transmission_mode = TransmissionMode_1k; break;
case 4: transmission_mode = TransmissionMode_16k; break;
case 5: transmission_mode = TransmissionMode_32k; break;
default: transmission_mode = TransmissionMode_Auto; break;
}
switch (descriptor.getGuardInterval())
{
case 0: guard_interval = GuardInterval_1_32; break;
case 1: guard_interval = GuardInterval_1_16; break;
case 2: guard_interval = GuardInterval_1_8; break;
case 3: guard_interval = GuardInterval_1_4; break;
case 4: guard_interval = GuardInterval_1_128; break;
case 5: guard_interval = GuardInterval_19_128; break;
case 6: guard_interval = GuardInterval_19_256; break;
case 7: guard_interval = GuardInterval_Auto; break;
}
plp_id = descriptor.getPlpId();
code_rate_HP = code_rate_LP = FEC_Auto;
hierarchy = Hierarchy_Auto;
modulation = Modulation_Auto;
inversion = Inversion_Unknown;
system = System_DVB_T2;
eDebug("[eDVBFrontendParametersTerrestrial] T2 bw %d, tm_mode %d, guard %d, plp_id %d",
bandwidth, transmission_mode, guard_interval, plp_id);
}
eDVBFrontendParameters::eDVBFrontendParameters()
:m_type(-1), m_flags(0)
{
}
DEFINE_REF(eDVBFrontendParameters);
RESULT eDVBFrontendParameters::getSystem(int &t) const
{
t = m_type;
return (m_type == -1) ? -1 : 0;
}
RESULT eDVBFrontendParameters::getDVBS(eDVBFrontendParametersSatellite &p) const
{
if (m_type != iDVBFrontend::feSatellite)
return -1;
p = sat;
return 0;
}
RESULT eDVBFrontendParameters::getDVBC(eDVBFrontendParametersCable &p) const
{
if (m_type != iDVBFrontend::feCable)
return -1;
p = cable;
return 0;
}
RESULT eDVBFrontendParameters::getDVBT(eDVBFrontendParametersTerrestrial &p) const
{
if (m_type != iDVBFrontend::feTerrestrial)
return -1;
p = terrestrial;
return 0;
}
RESULT eDVBFrontendParameters::getATSC(eDVBFrontendParametersATSC &p) const
{
if (m_type != iDVBFrontend::feATSC)
return -1;
p = atsc;
return 0;
}
RESULT eDVBFrontendParameters::setDVBS(const eDVBFrontendParametersSatellite &p, bool no_rotor_command_on_tune)
{
sat = p;
sat.no_rotor_command_on_tune = no_rotor_command_on_tune;
m_type = iDVBFrontend::feSatellite;
return 0;
}
RESULT eDVBFrontendParameters::setDVBC(const eDVBFrontendParametersCable &p)
{
cable = p;
m_type = iDVBFrontend::feCable;
return 0;
}
RESULT eDVBFrontendParameters::setDVBT(const eDVBFrontendParametersTerrestrial &p)
{
terrestrial = p;
m_type = iDVBFrontend::feTerrestrial;
return 0;
}
RESULT eDVBFrontendParameters::setATSC(const eDVBFrontendParametersATSC &p)
{
atsc = p;
m_type = iDVBFrontend::feATSC;
return 0;
}
RESULT eDVBFrontendParameters::calculateDifference(const iDVBFrontendParameters *parm, int &diff, bool exact) const
{
if (!parm)
return -1;
int type;
parm->getSystem(type);
if (type != m_type)
{
diff = 1<<30; // big difference
return 0;
}
switch (type)
{
case iDVBFrontend::feSatellite:
{
eDVBFrontendParametersSatellite osat;
if (parm->getDVBS(osat))
return -2;
if (sat.orbital_position != osat.orbital_position)
diff = 1<<29;
else if (sat.polarisation != osat.polarisation)
diff = 1<<28;
else if (exact && sat.fec != osat.fec && sat.fec != eDVBFrontendParametersSatellite::FEC_Auto && osat.fec != eDVBFrontendParametersSatellite::FEC_Auto)
diff = 1<<27;
else if (exact && sat.modulation != osat.modulation && sat.modulation != eDVBFrontendParametersSatellite::Modulation_Auto && osat.modulation != eDVBFrontendParametersSatellite::Modulation_Auto)
diff = 1<<27;
else
{
diff = absdiff(sat.frequency, osat.frequency);
diff += absdiff(sat.symbol_rate, osat.symbol_rate);
}
return 0;
}
case iDVBFrontend::feCable:
{
eDVBFrontendParametersCable ocable;
if (parm->getDVBC(ocable))
return -2;
if (exact && cable.modulation != ocable.modulation
&& cable.modulation != eDVBFrontendParametersCable::Modulation_Auto
&& ocable.modulation != eDVBFrontendParametersCable::Modulation_Auto)
diff = 1 << 29;
else if (exact && cable.fec_inner != ocable.fec_inner && cable.fec_inner != eDVBFrontendParametersCable::FEC_Auto && ocable.fec_inner != eDVBFrontendParametersCable::FEC_Auto)
diff = 1 << 27;
else
{
diff = absdiff(cable.frequency, ocable.frequency);
diff += absdiff(cable.symbol_rate, ocable.symbol_rate);
}
return 0;
}
case iDVBFrontend::feTerrestrial:
{
eDVBFrontendParametersTerrestrial oterrestrial;
if (parm->getDVBT(oterrestrial))
return -2;
if (exact && oterrestrial.bandwidth != terrestrial.bandwidth &&
oterrestrial.bandwidth && terrestrial.bandwidth)
diff = 1 << 30;
else if (exact && oterrestrial.modulation != terrestrial.modulation &&
oterrestrial.modulation != eDVBFrontendParametersTerrestrial::Modulation_Auto &&
terrestrial.modulation != eDVBFrontendParametersTerrestrial::Modulation_Auto)
diff = 1 << 30;
else if (exact && oterrestrial.transmission_mode != terrestrial.transmission_mode &&
oterrestrial.transmission_mode != eDVBFrontendParametersTerrestrial::TransmissionMode_Auto &&
terrestrial.transmission_mode != eDVBFrontendParametersTerrestrial::TransmissionMode_Auto)
diff = 1 << 30;
else if (exact && oterrestrial.guard_interval != terrestrial.guard_interval &&
oterrestrial.guard_interval != eDVBFrontendParametersTerrestrial::GuardInterval_Auto &&
terrestrial.guard_interval != eDVBFrontendParametersTerrestrial::GuardInterval_Auto)
diff = 1 << 30;
else if (exact && oterrestrial.hierarchy != terrestrial.hierarchy &&
oterrestrial.hierarchy != eDVBFrontendParametersTerrestrial::Hierarchy_Auto &&
terrestrial.hierarchy != eDVBFrontendParametersTerrestrial::Hierarchy_Auto)
diff = 1 << 30;
else if (exact && oterrestrial.code_rate_LP != terrestrial.code_rate_LP &&
oterrestrial.code_rate_LP != eDVBFrontendParametersTerrestrial::FEC_Auto &&
terrestrial.code_rate_LP != eDVBFrontendParametersTerrestrial::FEC_Auto)
diff = 1 << 30;
else if (exact && oterrestrial.code_rate_HP != terrestrial.code_rate_HP &&
oterrestrial.code_rate_HP != eDVBFrontendParametersTerrestrial::FEC_Auto &&
terrestrial.code_rate_HP != eDVBFrontendParametersTerrestrial::FEC_Auto)
diff = 1 << 30;
else if (oterrestrial.plp_id != terrestrial.plp_id)
diff = 1 << 27;
else if (oterrestrial.system != terrestrial.system)
diff = 1 << 30;
else
diff = absdiff(terrestrial.frequency, oterrestrial.frequency) / 1000;
return 0;
}
case iDVBFrontend::feATSC:
{
eDVBFrontendParametersATSC oatsc;
if (parm->getATSC(oatsc))
return -2;
if (exact && atsc.modulation != oatsc.modulation
&& atsc.modulation != eDVBFrontendParametersATSC::Modulation_Auto
&& oatsc.modulation != eDVBFrontendParametersATSC::Modulation_Auto)
diff = 1 << 29;
else
{
diff = absdiff(atsc.frequency, oatsc.frequency);
}
return 0;
}
default:
return -1;
}
return 0;
}
RESULT eDVBFrontendParameters::getHash(unsigned long &hash) const
{
switch (m_type)
{
case iDVBFrontend::feSatellite:
{
hash = (sat.orbital_position << 16);
hash |= ((sat.frequency/1000)&0xFFFF)|((sat.polarisation&1) << 15);
return 0;
}
case iDVBFrontend::feCable:
{
hash = 0xFFFF0000;
hash |= (cable.frequency/1000)&0xFFFF;
return 0;
}
case iDVBFrontend::feTerrestrial:
{
hash = 0xEEEE0000;
hash |= (terrestrial.frequency/1000000)&0xFFFF;
return 0;
}
case iDVBFrontend::feATSC:
{
hash = atsc.system == eDVBFrontendParametersATSC::System_ATSC ? 0xEEEE0000 : 0xFFFF0000;
hash |= (atsc.frequency/1000000)&0xFFFF;
return 0;
}
default:
{
return -1;
}
}
}
RESULT eDVBFrontendParameters::calcLockTimeout(unsigned int &timeout) const
{
switch (m_type)
{
case iDVBFrontend::feSatellite:
{
/* high symbol rate transponders tune faster, due to
requiring less zigzag and giving more symbols faster.
5s are definitely not enough on really low SR when
zigzag has to find the exact frequency first.
*/
if (sat.symbol_rate > 20000000)
timeout = 5000;
else if (sat.symbol_rate > 10000000)
timeout = 10000;
else
timeout = 20000;
return 0;
}
case iDVBFrontend::feCable:
{
timeout = 5000;
return 0;
}
case iDVBFrontend::feTerrestrial:
{
timeout = 5000;
return 0;
}
case iDVBFrontend::feATSC:
{
timeout = 5000;
return 0;
}
default:
{
return -1;
}
}
}
DEFINE_REF(eDVBFrontend);
int eDVBFrontend::PriorityOrder=0;
int eDVBFrontend::PreferredFrontendIndex = -1;
eDVBFrontend::eDVBFrontend(const char *devicenodename, int fe, int &ok, bool simulate, eDVBFrontend *simulate_fe)
:m_simulate(simulate), m_enabled(false), m_fbc(false), m_simulate_fe(simulate_fe), m_type(-1), m_dvbid(fe), m_slotid(fe)
,m_fd(-1), m_dvbversion(0), m_rotor_mode(false), m_need_rotor_workaround(false), m_multitype(false)
,m_state(stateClosed), m_timeout(0), m_tuneTimer(0)
{
char filename[64];
m_filename = devicenodename;
m_timeout = eTimer::create(eApp);
CONNECT(m_timeout->timeout, eDVBFrontend::timeout);
m_tuneTimer = eTimer::create(eApp);
CONNECT(m_tuneTimer->timeout, eDVBFrontend::tuneLoop);
for (int i=0; i<eDVBFrontend::NUM_DATA_ENTRIES; ++i)
m_data[i] = -1;
m_data[FREQ_OFFSET] = 0;
m_idleInputpower[0]=m_idleInputpower[1]=0;
snprintf(filename, sizeof(filename), "/proc/stb/frontend/%d/fbc_id", m_slotid);
if (access(filename, F_OK) == 0)
m_fbc = true;
ok = !openFrontend();
closeFrontend();
}
void eDVBFrontend::reopenFrontend()
{
sleep(1);
m_delsys.clear();
openFrontend();
}
int eDVBFrontend::openFrontend()
{
if (m_state != stateClosed)
return -1; // already opened
m_state=stateIdle;
m_tuning=0;
if (!m_simulate)
{
eDebug("[eDVBFrontend] opening frontend %d", m_dvbid);
if (m_fd < 0)
{
m_fd = ::open(m_filename.c_str(), O_RDWR | O_NONBLOCK | O_CLOEXEC);
if (m_fd < 0)
{
eWarning("[eDVBFrontend] opening %s failed: %m", m_filename.c_str());
return -1;
}
}
else
eWarning("[eDVBFrontend] frontend %d already opened", m_dvbid);
if (m_dvbversion == 0)
{
m_dvbversion = DVB_VERSION(3, 0);
#if defined DTV_API_VERSION
struct dtv_property p;
struct dtv_properties cmdseq;
cmdseq.props = &p;
cmdseq.num = 1;
p.cmd = DTV_API_VERSION;
if (ioctl(m_fd, FE_GET_PROPERTY, &cmdseq) >= 0)
{
m_dvbversion = p.u.data;
eDebug("[eDVBFrontend] frontend %d has DVB API %02x ", m_dvbid, m_dvbversion);
}
#endif
}
if (m_delsys.empty())
{
if (::ioctl(m_fd, FE_GET_INFO, &fe_info) < 0)
{
eWarning("[eDVBFrontend] ioctl FE_GET_INFO failed");
::close(m_fd);
m_fd = -1;
return -1;
}
strncpy(m_description, fe_info.name, sizeof(m_description));
#if defined DTV_ENUM_DELSYS
struct dtv_property p[1];
p[0].cmd = DTV_ENUM_DELSYS;
struct dtv_properties cmdseq;
cmdseq.num = 1;
cmdseq.props = p;
if (::ioctl(m_fd, FE_GET_PROPERTY, &cmdseq) >= 0)
{
m_delsys.clear();
for (; p[0].u.buffer.len > 0; p[0].u.buffer.len--)
{
fe_delivery_system_t delsys = (fe_delivery_system_t)p[0].u.buffer.data[p[0].u.buffer.len - 1];
m_delsys[delsys] = true;
}
}
else
#else
/* no DTV_ENUM_DELSYS support */
if (1)
#endif
{
/* old DVB API, fill delsys map with some defaults */
switch (fe_info.type)
{
case FE_QPSK:
{
m_delsys[SYS_DVBS] = true;
#if DVB_API_VERSION >= 5
if (m_dvbversion >= DVB_VERSION(5, 0))
{
if (fe_info.caps & FE_CAN_2G_MODULATION) m_delsys[SYS_DVBS2] = true;
}
#endif
break;
}
case FE_QAM:
{
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 6
/* no need for a m_dvbversion check, SYS_DVBC_ANNEX_A replaced SYS_DVBC_ANNEX_AC (same value) */
m_delsys[SYS_DVBC_ANNEX_A] = true;
#else
m_delsys[SYS_DVBC_ANNEX_AC] = true;
#endif
break;
}
case FE_OFDM:
{
m_delsys[SYS_DVBT] = true;
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 3
if (m_dvbversion >= DVB_VERSION(5, 3))
{
if (fe_info.caps & FE_CAN_2G_MODULATION) m_delsys[SYS_DVBT2] = true;
}
#endif
break;
}
case FE_ATSC:
{
m_delsys[SYS_ATSC] = true;
break;
}
}
}
}
if (m_simulate_fe)
{
m_simulate_fe->m_delsys = m_delsys;
}
m_sn = eSocketNotifier::create(eApp, m_fd, eSocketNotifier::Read, false);
CONNECT(m_sn->activated, eDVBFrontend::feEvent);
}
else
{
fe_info.frequency_min = 900000;
fe_info.frequency_max = 2200000;
}
m_multitype = m_delsys[SYS_DVBS] && (m_delsys[SYS_DVBT] || m_delsys[SYS_DVBC_ANNEX_A]);
if (!m_multitype)
m_type = feSatellite;
setTone(iDVBFrontend::toneOff);
setVoltage(iDVBFrontend::voltageOff);
return 0;
}
int eDVBFrontend::closeFrontend(bool force, bool no_delayed)
{
if (!force && m_data[CUR_VOLTAGE] != -1 && m_data[CUR_VOLTAGE] != iDVBFrontend::voltageOff)
{
long tmp = m_data[LINKED_NEXT_PTR];
while (tmp != -1)
{
eDVBRegisteredFrontend *linked_fe = (eDVBRegisteredFrontend*)tmp;
if (linked_fe->m_inuse)
{
eDebugNoSimulate("[eDVBFrontend] dont close frontend %d until the linked frontend %d in slot %d is still in use",
m_dvbid, linked_fe->m_frontend->getDVBID(), linked_fe->m_frontend->getSlotID());
return -1;
}
linked_fe->m_frontend->getData(LINKED_NEXT_PTR, tmp);
}
}
if (m_fd >= 0)
{
eDebugNoSimulate("[eDVBFrontend] close frontend %d", m_dvbid);
if (m_data[SATCR] != -1)
{
if (!no_delayed)
{
m_sec->prepareTurnOffSatCR(*this);
m_tuneTimer->start(0, true);
if(!m_tuneTimer->isActive())
{
int timeout=0;
eDebug("[eDVBFrontend] [turnOffSatCR] no mainloop");
while(true)
{
timeout = tuneLoopInt();
if (timeout == -1)
break;
usleep(timeout*1000); // blockierendes wait.. eTimer gibts ja nicht mehr
}
}
else
eDebug("[eDVBFrontend] [turnOffSatCR] running mainloop");
return 0;
}
else
m_data[ROTOR_CMD] = -1;
}
setTone(iDVBFrontend::toneOff);
setVoltage(iDVBFrontend::voltageOff);
m_tuneTimer->stop();
if (m_sec && !m_simulate)
m_sec->setRotorMoving(m_slotid, false);
if (!::close(m_fd))
m_fd=-1;
else
eWarning("[eDVBFrontend] couldnt close frontend %d", m_dvbid);
}
else if (m_simulate)
{
setTone(iDVBFrontend::toneOff);
setVoltage(iDVBFrontend::voltageOff);
}
m_sn=0;
m_state = stateClosed;
return 0;
}
eDVBFrontend::~eDVBFrontend()
{
m_data[LINKED_PREV_PTR] = m_data[LINKED_NEXT_PTR] = -1;
closeFrontend();
}
void eDVBFrontend::feEvent(int w)
{
eDVBFrontend *sec_fe = this;
long tmp = m_data[LINKED_PREV_PTR];
while (tmp != -1)
{
eDVBRegisteredFrontend *linked_fe = (eDVBRegisteredFrontend*)tmp;
sec_fe = linked_fe->m_frontend;
sec_fe->getData(LINKED_NEXT_PTR, tmp);
}
while (1)
{
dvb_frontend_event event;
int res;
int state;
res = ::ioctl(m_fd, FE_GET_EVENT, &event);
if (res && (errno == EAGAIN))
break;
if (w < 0)
continue;
eDebug("[eDVBFrontend] (%d)fe event: status %x, inversion %s, m_tuning %d", m_dvbid, event.status, (event.parameters.inversion == INVERSION_ON) ? "on" : "off", m_tuning);
if (event.status & FE_HAS_LOCK)
{
state = stateLock;
}
else
{
if (m_tuning) {
state = stateTuning;
if (event.status & FE_TIMEDOUT) {
eDebug("[eDVBFrontend] FE_TIMEDOUT! ..abort");
m_tuneTimer->stop();
timeout();
return;
}
++m_tuning;
}
else
{
eDebug("[eDVBFrontend] stateLostLock");
state = stateLostLock;
if (!m_rotor_mode)
sec_fe->m_data[CSW] = sec_fe->m_data[UCSW] = sec_fe->m_data[TONEBURST] = -1; // reset diseqc
}
}
if (m_state != state)
{
m_state = state;
m_stateChanged(this);
}
}
}
void eDVBFrontend::timeout()
{
m_tuning = 0;
if (m_state == stateTuning)
{
m_state = stateFailed;
m_data[CSW] = m_data[UCSW] = m_data[TONEBURST] = -1; // reset diseqc
m_stateChanged(this);
}
}
#define INRANGE(X,Y,Z) (((X<=Y) && (Y<=Z))||((Z<=Y) && (Y<=X)) ? 1 : 0)
/* unsigned 32 bit division */
static inline uint32_t fe_udiv(uint32_t a, uint32_t b)
{
return (a + b / 2) / b;
}
void eDVBFrontend::calculateSignalQuality(int snr, int &signalquality, int &signalqualitydb)
{
int sat_max = 1600; // for stv0288 / bsbe2
int ret = 0x12345678;
int ter_max = 2900;
int atsc_max = 4200;
if (!strcmp(m_description, "AVL2108")) // ET9000
{
ret = (int)(snr / 40.5);
sat_max = 1618;
}
if (!strcmp(m_description, "AVL6211")) // ET10000
{
ret = (int)(snr / 37.5);
sat_max = 1700;
}
else if (strstr("Nova-T StickNovaT 500StickDTB03", m_description)) // dib0700
{
if ( snr > 300 )
ret = 0; //error condition
else
ret = (int)(snr * 10);
}
else if (!strcmp(m_description, "BCM4501 (internal)"))
{
eDVBFrontendParametersSatellite parm;
float SDS_SNRE = snr << 16;
float snr_in_db;
oparm.getDVBS(parm);
if (parm.system == eDVBFrontendParametersSatellite::System_DVB_S) // DVB-S1 / QPSK
{
static float SNR_COEFF[6] = {
100.0 / 4194304.0,
-7136.0 / 4194304.0,
197418.0 / 4194304.0,
-2602183.0 / 4194304.0,
20377212.0 / 4194304.0,
-37791203.0 / 4194304.0,
};
float fval1 = 12.44714 - (2.0 * log10(SDS_SNRE / 256.0)),
fval2 = pow(10.0, fval1)-1;
fval1 = 10.0 * log10(fval2);
if (fval1 < 10.0)
{
fval2 = SNR_COEFF[0];
for (int i=1; i<6; ++i)
{
fval2 *= fval1;
fval2 += SNR_COEFF[i];
}
fval1 = fval2;
}
snr_in_db = fval1;
}
else
{
float fval1 = SDS_SNRE / 268435456.0,
fval2, fval3, fval4;
if (parm.modulation == eDVBFrontendParametersSatellite::Modulation_QPSK)
{
fval2 = 6.76;
fval3 = 4.35;
}
else // 8PSK
{
fval1 *= 0.5;
fval2 = 8.06;
fval3 = 6.18;
}
fval4 = -10.0 * log10(fval1);
fval1 = fval4;
for (int i=0; i < 5; ++i)
fval1 = fval4 - fval2 * log10(1.0+pow(10.0, (fval3-fval1)/fval2));
snr_in_db = fval1;
}
sat_max = 1750;
ret = (int)(snr_in_db * 100);
}
else if (strstr(m_description, "Alps BSBE1 C01A") ||
strstr(m_description, "Alps -S(STV0288)"))
{
if (snr == 0)
ret = 0;
else if (snr == 0xFFFF) // i think this should not happen
ret = 100*100;
else
{
enum { REALVAL, REGVAL };
const long CN_lookup[31][2] = {
{20,8900}, {25,8680}, {30,8420}, {35,8217}, {40,7897},
{50,7333}, {60,6747}, {70,6162}, {80,5580}, {90,5029},
{100,4529}, {110,4080}, {120,3685}, {130,3316}, {140,2982},
{150,2688}, {160,2418}, {170,2188}, {180,1982}, {190,1802},
{200,1663}, {210,1520}, {220,1400}, {230,1295}, {240,1201},
{250,1123}, {260,1058}, {270,1004}, {280,957}, {290,920},
{300,890}
};
int add=strchr(m_description, '.') ? 0xA250 : 0xA100;
long regval = 0xFFFF - ((snr / 3) + add), // revert some dvb api calulations to get the real register value
Imin=0,
Imax=30,
i;
if(INRANGE(CN_lookup[Imin][REGVAL],regval,CN_lookup[Imax][REGVAL]))
{
while((Imax-Imin)>1)
{
i=(Imax+Imin)/2;
if(INRANGE(CN_lookup[Imin][REGVAL],regval,CN_lookup[i][REGVAL]))
Imax = i;
else
Imin = i;
}
ret = (((regval - CN_lookup[Imin][REGVAL])
* (CN_lookup[Imax][REALVAL] - CN_lookup[Imin][REALVAL])
/ (CN_lookup[Imax][REGVAL] - CN_lookup[Imin][REGVAL]))
+ CN_lookup[Imin][REALVAL]) * 10;
}
else
ret = 100;
}
}
else if (!strcmp(m_description, "Alps BSBE1 702A") || // some frontends with STV0299
!strcmp(m_description, "Alps -S") ||
!strcmp(m_description, "Philips -S") ||
!strcmp(m_description, "LG -S") )
{
sat_max = 1500;
ret = (int)((snr-39075)/17.647);
}
else if (!strcmp(m_description, "Alps BSBE2"))
{
ret = (int)((snr >> 7) * 10);
}
else if (!strcmp(m_description, "Philips CU1216Mk3"))
{
eDVBFrontendParametersCable parm;
int mse = (~snr) & 0xFF;
oparm.getDVBC(parm);
switch (parm.modulation)
{
case eDVBFrontendParametersCable::Modulation_QAM16: ret = fe_udiv(1950000, (32 * mse) + 138) + 1000; break;
case eDVBFrontendParametersCable::Modulation_QAM32: ret = fe_udiv(2150000, (40 * mse) + 500) + 1350; break;
case eDVBFrontendParametersCable::Modulation_QAM64: ret = fe_udiv(2100000, (40 * mse) + 500) + 1250; break;
case eDVBFrontendParametersCable::Modulation_QAM128: ret = fe_udiv(1850000, (38 * mse) + 400) + 1380; break;
case eDVBFrontendParametersCable::Modulation_QAM256: ret = fe_udiv(1800000, (100 * mse) + 40) + 2030; break;
default: break;
}
}
else if (!strcmp(m_description, "Philips TU1216"))
{
snr = 0xFF - (snr & 0xFF);
if (snr != 0)
ret = 10 * (int)(-100 * (log10(snr) - log10(255)));
}
else if (strstr(m_description, "Si2166B")) // DM7080HD/DM7020HD/DM820/DM800se DVB-S2 Dual NIM
{
ret = (snr * 240) >> 8;
}
else if (strstr(m_description, "BCM4506") || strstr(m_description, "BCM4505"))
{
ret = (snr * 100) >> 8;
}
else if (!strcmp(m_description, "Vuplus DVB-S NIM(AVL2108)")) // VU+Ultimo/VU+Uno DVB-S2 NIM
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1600) + 0.2100) * 100);
}
else if (!strcmp(m_description, "Vuplus DVB-S NIM(AVL6222)")) // VU+ DVB-S2 Dual NIM
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1244) + 2.5079) * 100);
sat_max = 1490;
}
else if (!strcmp(m_description, "Vuplus DVB-S NIM(AVL6211)")) // VU+ DVB-S2 Dual NIM
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1244) + 2.5079) * 100);
}
else if (!strcmp(m_description, "BCM7335 DVB-S2 NIM (internal)")) // VU+DUO DVB-S2 NIM
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1244) + 2.5079) * 100);
}
else if (!strcmp(m_description, "BCM7346 (internal)")) // MaxDigital XP1000
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1880) + 0.1959) * 100);
}
else if (!strcmp(m_description, "BCM7356 DVB-S2 NIM (internal)")) // VU+ Solo2
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1800) - 1.0000) * 100);
}
else if (!strcmp(m_description, "Vuplus DVB-S NIM(7376 FBC)")) // VU+ Solo4k
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.1480) + 0.9560) * 100);
}
else if (!strcmp(m_description, "BCM7362 (internal) DVB-S2")) // Xsarius
{
ret = (int)((((double(snr) / (65536.0 / 100.0)) * 0.28) - 10.0) * 100);
}
else if (!strcmp(m_description, "Genpix"))
{
ret = (int)((snr << 1) / 5);
}
else if (!strcmp(m_description, "CXD1981"))
{
eDVBFrontendParametersCable parm;
int mse = (~snr) & 0xFF;
oparm.getDVBC(parm);
switch (parm.modulation)
{
case eDVBFrontendParametersCable::Modulation_QAM16:
case eDVBFrontendParametersCable::Modulation_QAM64:
case eDVBFrontendParametersCable::Modulation_QAM256: ret = (int)(-950 * log(((double)mse) / 760)); break;
case eDVBFrontendParametersCable::Modulation_QAM32:
case eDVBFrontendParametersCable::Modulation_QAM128: ret = (int)(-875 * log(((double)mse) / 650)); break;
default: break;
}
}
else if (!strcmp(m_description, "Broadcom BCM73XX") ||
!strcmp(m_description, "FTS-260 (Montage RS6000)") ||
!strcmp(m_description, "Panasonic MN88472") ||
!strcmp(m_description, "Panasonic MN88473")) // xcore
{
ret = snr * 100 / 256;
if (!strcmp(m_description, "FTS-260 (Montage RS6000)"))
sat_max = 1490;
}
else if (!strcmp(m_description, "Si216x"))
{
eDVBFrontendParametersTerrestrial parm;
oparm.getDVBT(parm);
switch (parm.system)
{
case eDVBFrontendParametersTerrestrial::System_DVB_T:
case eDVBFrontendParametersTerrestrial::System_DVB_T2:
case eDVBFrontendParametersTerrestrial::System_DVB_T_T2: ret = (int)(snr / 58); ter_max = 1700; break;
default: break;
}
}
else if (strstr(m_description, "Sundtek DVB-T (III)")) // Sundtek MediaTV Digital Home III...dvb-t/t2 mode
{
ret = (int)(snr / 75);
ter_max = 1700;
}
else if (strstr(m_description, "Sundtek DVB-S/S2 (IV)"))
{
ret = (int)(snr / 40.5);
sat_max = 1900;
}
else if(!strcmp(m_description, "TBS-5925") || !strcmp(m_description, "DVBS2BOX"))
{
ret = (snr * 2000) / 0xFFFF;
sat_max = 2000;
}
else if(!strcmp(m_description, "WinTV HVR-850") || !strcmp(m_description, "Hauppauge"))
{
eDVBFrontendParametersATSC parm;
oparm.getATSC(parm);
switch (parm.modulation)
{
case eDVBFrontendParametersATSC::Modulation_QAM256: atsc_max = 4000; break;
case eDVBFrontendParametersATSC::Modulation_QAM64: atsc_max = 2900; break;
case eDVBFrontendParametersATSC::Modulation_VSB_8: atsc_max = 2700; break;
default: break;
}
ret = snr * 10;
}
signalqualitydb = ret;
if (ret == 0x12345678) // no snr db calculation avail.. return untouched snr value..
{
signalquality = snr;
}
else
{
int type = -1;
oparm.getSystem(type);
switch (type)
{
case feSatellite:
signalquality = (ret >= sat_max ? 65536 : ret * 65536 / sat_max);
break;
case feCable: // we assume a max of 42db here
signalquality = (ret >= 4200 ? 65536 : ret * 65536 / 4200);
break;
case feTerrestrial: // we assume a max of 29db here
signalquality = (ret >= ter_max ? 65536 : ret * 65536 / ter_max);
break;
case feATSC: // we assume a max of 42db here
signalquality = (ret >= atsc_max ? 65536 : ret * 65536 / atsc_max);
break;
}
}
}
int eDVBFrontend::readFrontendData(int type)
{
switch(type)
{
case iFrontendInformation_ENUMS::bitErrorRate:
if (m_state == stateLock)
{
uint32_t ber=0;
if (!m_simulate)
{
if (ioctl(m_fd, FE_READ_BER, &ber) < 0 && errno != ERANGE)
eDebug("[eDVBFrontend] FE_READ_BER failed: %m");
}
return ber;
}
break;
case iFrontendInformation_ENUMS::snrValue:
if (m_state == stateLock)
{
uint16_t snr = 0;
if (!m_simulate)
{
if (ioctl(m_fd, FE_READ_SNR, &snr) < 0 && errno != ERANGE)
eDebug("[eDVBFrontend] FE_READ_SNR failed: %m");
}
return snr;
}
break;
case iFrontendInformation_ENUMS::signalQuality:
case iFrontendInformation_ENUMS::signalQualitydB: /* this moved into the driver on DVB API 5.10 */
if (m_state == stateLock)
{
int signalquality = 0;
int signalqualitydb = 0;
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 10
if (m_dvbversion >= DVB_VERSION(5, 10))
{
dtv_property prop[1];
prop[0].cmd = DTV_STAT_CNR;
dtv_properties props;
props.props = prop;
props.num = 1;
if (::ioctl(m_fd, FE_GET_PROPERTY, &props) < 0 && errno != ERANGE)
{
eDebug("[eDVBFrontend] DTV_STAT_CNR failed: %m");
}
else
{
for(unsigned int i=0; i<prop[0].u.st.len; i++)
{
if (prop[0].u.st.stat[i].scale == FE_SCALE_DECIBEL &&
type == iFrontendInformation_ENUMS::signalQualitydB)
{
signalqualitydb = prop[0].u.st.stat[i].svalue / 10;
return signalqualitydb;
}
else if (prop[0].u.st.stat[i].scale == FE_SCALE_RELATIVE &&
type == iFrontendInformation_ENUMS::signalQuality)
{
signalquality = prop[0].u.st.stat[i].svalue;
return signalquality;
}
}
}
}
#endif
/* fallback to old DVB API */
int snr = readFrontendData(iFrontendInformation_ENUMS::snrValue);
calculateSignalQuality(snr, signalquality, signalqualitydb);
if (type == iFrontendInformation_ENUMS::signalQuality)
{
return signalquality;
}
else
{
return signalqualitydb;
}
}
break;
case iFrontendInformation_ENUMS::signalPower:
if (m_state == stateLock)
{
uint16_t strength=0;
if (!m_simulate)
{
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 10
if (m_dvbversion >= DVB_VERSION(5, 10))
{
dtv_property prop[1];
prop[0].cmd = DTV_STAT_SIGNAL_STRENGTH;
dtv_properties props;
props.props = prop;
props.num = 1;
if (::ioctl(m_fd, FE_GET_PROPERTY, &props) < 0 && errno != ERANGE)
{
eDebug("[eDVBFrontend] DTV_STAT_SIGNAL_STRENGTH failed: %m");
}
else
{
for(unsigned int i=0; i<prop[0].u.st.len; i++)
{
if (prop[0].u.st.stat[i].scale == FE_SCALE_RELATIVE)
strength = prop[0].u.st.stat[i].uvalue;
}
}
}
#endif
// fallback to old DVB API
if (!strength && ioctl(m_fd, FE_READ_SIGNAL_STRENGTH, &strength) < 0 && errno != ERANGE)
eDebug("[eDVBFrontend] FE_READ_SIGNAL_STRENGTH failed: %m");
}
return strength;
}
break;
case iFrontendInformation_ENUMS::lockState:
return !!(readFrontendData(iFrontendInformation_ENUMS::frontendStatus) & FE_HAS_LOCK);
case iFrontendInformation_ENUMS::syncState:
return !!(readFrontendData(iFrontendInformation_ENUMS::frontendStatus) & FE_HAS_SYNC);
case iFrontendInformation_ENUMS::frontendNumber:
return m_slotid;
case iFrontendInformation_ENUMS::frontendStatus:
{
fe_status_t status;
if (!m_simulate)
{
if ( ioctl(m_fd, FE_READ_STATUS, &status) < 0 && errno != ERANGE )
eDebug("[eDVBFrontend] FE_READ_STATUS failed: %m");
return (int)status;
}
return (FE_HAS_SYNC | FE_HAS_LOCK);
}
case iFrontendInformation_ENUMS::frequency:
{
struct dtv_property p;
struct dtv_properties cmdseq;
oparm.getSystem(type);
cmdseq.props = &p;
cmdseq.num = 1;
p.cmd = DTV_FREQUENCY;
if (ioctl(m_fd, FE_GET_PROPERTY, &cmdseq) < 0)
{
return 0;
}
return type == feSatellite ? p.u.data + m_data[FREQ_OFFSET] : p.u.data;
}
}
return 0;
}
void eDVBFrontend::getFrontendStatus(ePtr<iDVBFrontendStatus> &dest)
{
ePtr<eDVBFrontend> fe = this;
dest = new eDVBFrontendStatus(fe);
}
void eDVBFrontend::getTransponderData(ePtr<iDVBTransponderData> &dest, bool original)
{
int type = -1;
struct dtv_property p[16];
struct dtv_properties cmdseq;
oparm.getSystem(type);
cmdseq.props = p;
cmdseq.num = 0;
if (m_simulate || m_fd == -1 || original)
{
original = true;
}
else
{
p[cmdseq.num++].cmd = DTV_DELIVERY_SYSTEM;
p[cmdseq.num++].cmd = DTV_FREQUENCY;
p[cmdseq.num++].cmd = DTV_INVERSION;
p[cmdseq.num++].cmd = DTV_MODULATION;
if (type == feSatellite)
{
p[cmdseq.num++].cmd = DTV_SYMBOL_RATE;
p[cmdseq.num++].cmd = DTV_INNER_FEC;
p[cmdseq.num++].cmd = DTV_ROLLOFF;
p[cmdseq.num++].cmd = DTV_PILOT;
}
else if (type == feCable)
{
p[cmdseq.num++].cmd = DTV_SYMBOL_RATE;
p[cmdseq.num++].cmd = DTV_INNER_FEC;
}
else if (type == feTerrestrial)
{
p[cmdseq.num++].cmd = DTV_BANDWIDTH_HZ;
p[cmdseq.num++].cmd = DTV_CODE_RATE_HP;
p[cmdseq.num++].cmd = DTV_CODE_RATE_LP;
p[cmdseq.num++].cmd = DTV_TRANSMISSION_MODE;
p[cmdseq.num++].cmd = DTV_GUARD_INTERVAL;
p[cmdseq.num++].cmd = DTV_HIERARCHY;
}
else if (type == feATSC)
{
}
if (ioctl(m_fd, FE_GET_PROPERTY, &cmdseq) < 0)
{
eDebug("[eDVBFrontend] FE_GET_PROPERTY failed: %m");
original = true;
}
}
switch (type)
{
case feSatellite:
{
eDVBFrontendParametersSatellite s;
oparm.getDVBS(s);
dest = new eDVBSatelliteTransponderData(cmdseq.props, cmdseq.num, s, m_data[FREQ_OFFSET], original);
break;
}
case feCable:
{
eDVBFrontendParametersCable c;
oparm.getDVBC(c);
dest = new eDVBCableTransponderData(cmdseq.props, cmdseq.num, c, original);
break;
}
case feTerrestrial:
{
eDVBFrontendParametersTerrestrial t;
oparm.getDVBT(t);
dest = new eDVBTerrestrialTransponderData(cmdseq.props, cmdseq.num, t, original);
break;
}
case feATSC:
{
eDVBFrontendParametersATSC a;
oparm.getATSC(a);
dest = new eDVBATSCTransponderData(cmdseq.props, cmdseq.num, a, original);
break;
}
}
}
void eDVBFrontend::getFrontendData(ePtr<iDVBFrontendData> &dest)
{
ePtr<eDVBFrontend> fe = this;
dest = new eDVBFrontendData(fe);
}
#ifndef FP_IOCTL_GET_ID
#define FP_IOCTL_GET_ID 0
#endif
int eDVBFrontend::readInputpower()
{
if (m_simulate)
return 0;
int power=m_slotid; // this is needed for read inputpower from the correct tuner !
char proc_name[64];
sprintf(proc_name, "/proc/stb/frontend/%d/lnb_sense", m_slotid);
if (CFile::parseInt(&power, proc_name) == 0)
return power;
sprintf(proc_name, "/proc/stb/fp/lnb_sense%d", m_slotid);
if (CFile::parseInt(&power, proc_name) == 0)
return power;
// open front processor
int fp=::open("/dev/dbox/fp0", O_RDWR);
if (fp < 0)
{
eDebug("[eDVBFrontend] Failed to open /dev/dbox/fp0");
return -1;
}
static bool old_fp = (::ioctl(fp, FP_IOCTL_GET_ID) < 0);
if ( ioctl( fp, old_fp ? 9 : 0x100, &power ) < 0 )
{
eDebug("[eDVBFrontend] FP_IOCTL_GET_LNB_CURRENT failed: %m");
power = -1;
}
::close(fp);
return power;
}
bool eDVBFrontend::setSecSequencePos(int steps)
{
eDebugNoSimulate("[eDVBFrontend] set sequence pos %d", steps);
if (!steps)
return false;
while( steps > 0 )
{
if (m_sec_sequence.current() != m_sec_sequence.end())
++m_sec_sequence.current();
--steps;
}
while( steps < 0 )
{
if (m_sec_sequence.current() != m_sec_sequence.begin() && m_sec_sequence.current() != m_sec_sequence.end())
--m_sec_sequence.current();
++steps;
}
return true;
}
void eDVBFrontend::tuneLoop()
{
tuneLoopInt();
}
int eDVBFrontend::tuneLoopInt() // called by m_tuneTimer
{
int delay=-1;
eDVBFrontend *sec_fe = this;
eDVBRegisteredFrontend *regFE = 0;
long tmp = m_data[LINKED_PREV_PTR];
while ( tmp != -1 )
{
eDVBRegisteredFrontend *prev = (eDVBRegisteredFrontend *)tmp;
sec_fe = prev->m_frontend;
tmp = prev->m_frontend->m_data[LINKED_PREV_PTR];
if (tmp == -1 && sec_fe != this && !prev->m_inuse) {
int state = sec_fe->m_state;
// workaround to put the kernel frontend thread into idle state!
if (state != eDVBFrontend::stateIdle && state != stateClosed)
{
sec_fe->closeFrontend(true);
state = sec_fe->m_state;
}
// sec_fe is closed... we must reopen it here..
if (state == stateClosed)
{
regFE = prev;
prev->inc_use();
}
}
}
if ( m_sec_sequence && m_sec_sequence.current() != m_sec_sequence.end() )
{
long *sec_fe_data = sec_fe->m_data;
// eDebugNoSimulate("[eDVBFrontend] tuneLoop %d\n", m_sec_sequence.current()->cmd);
delay = 0;
switch (m_sec_sequence.current()->cmd)
{
case eSecCommand::SLEEP:
delay = m_sec_sequence.current()++->msec;
eDebugNoSimulate("[eDVBFrontend] sleep %dms", delay);
break;
case eSecCommand::GOTO:
if ( !setSecSequencePos(m_sec_sequence.current()->steps) )
++m_sec_sequence.current();
break;
case eSecCommand::SET_VOLTAGE:
{
int voltage = m_sec_sequence.current()++->voltage;
eDebugNoSimulate("[eDVBFrontend] setVoltage %d", voltage);
sec_fe->setVoltage(voltage);
break;
}
case eSecCommand::IF_VOLTAGE_GOTO:
{
eSecCommand::pair &compare = m_sec_sequence.current()->compare;
if ( compare.voltage == sec_fe_data[CUR_VOLTAGE] && setSecSequencePos(compare.steps) )
break;
++m_sec_sequence.current();
break;
}
case eSecCommand::IF_NOT_VOLTAGE_GOTO:
{
eSecCommand::pair &compare = m_sec_sequence.current()->compare;
if ( compare.voltage != sec_fe_data[CUR_VOLTAGE] && setSecSequencePos(compare.steps) )
break;
++m_sec_sequence.current();
break;
}
case eSecCommand::IF_TONE_GOTO:
{
eSecCommand::pair &compare = m_sec_sequence.current()->compare;
if ( compare.tone == sec_fe_data[CUR_TONE] && setSecSequencePos(compare.steps) )
break;
++m_sec_sequence.current();
break;
}
case eSecCommand::IF_NOT_TONE_GOTO:
{
eSecCommand::pair &compare = m_sec_sequence.current()->compare;
if ( compare.tone != sec_fe_data[CUR_TONE] && setSecSequencePos(compare.steps) )
break;
++m_sec_sequence.current();
break;
}
case eSecCommand::SET_TONE:
eDebugNoSimulate("[eDVBFrontend] setTone %d", m_sec_sequence.current()->tone);
sec_fe->setTone(m_sec_sequence.current()++->tone);
break;
case eSecCommand::SEND_DISEQC:
sec_fe->sendDiseqc(m_sec_sequence.current()->diseqc);
eDebugNoSimulateNoNewLineStart("[eDVBFrontend] sendDiseqc: ");
for (int i=0; i < m_sec_sequence.current()->diseqc.len; ++i)
eDebugNoNewLine("%02x", m_sec_sequence.current()->diseqc.data[i]);
if (!memcmp(m_sec_sequence.current()->diseqc.data, "\xE0\x00\x00", 3))
eDebugNoNewLine("(DiSEqC reset)\n");
else if (!memcmp(m_sec_sequence.current()->diseqc.data, "\xE0\x00\x03", 3))
eDebugNoNewLine("(DiSEqC peripherial power on)\n");
else
eDebugNoNewLine("(?)\n");
++m_sec_sequence.current();
break;
case eSecCommand::SEND_TONEBURST:
eDebugNoSimulate("[eDVBFrontend] sendToneburst: %d", m_sec_sequence.current()->toneburst);
sec_fe->sendToneburst(m_sec_sequence.current()++->toneburst);
break;
case eSecCommand::SET_FRONTEND:
{
int enableEvents = (m_sec_sequence.current()++)->val;
eDebugNoSimulate("[eDVBFrontend] setFrontend %d", enableEvents);
setFrontend(enableEvents);
break;
}
case eSecCommand::START_TUNE_TIMEOUT:
{
int tuneTimeout = m_sec_sequence.current()->timeout;
eDebugNoSimulate("[eDVBFrontend] startTuneTimeout %d", tuneTimeout);
if (!m_simulate)
m_timeout->start(tuneTimeout, 1);
++m_sec_sequence.current();
break;
}
case eSecCommand::SET_TIMEOUT:
m_timeoutCount = m_sec_sequence.current()++->val;
eDebugNoSimulate("[eDVBFrontend] set timeout %d", m_timeoutCount);
break;
case eSecCommand::IF_TIMEOUT_GOTO:
if (!m_timeoutCount)
{
eDebugNoSimulate("[eDVBFrontend] rotor timout");
setSecSequencePos(m_sec_sequence.current()->steps);
}
else
++m_sec_sequence.current();
break;
case eSecCommand::MEASURE_IDLE_INPUTPOWER:
{
int idx = m_sec_sequence.current()++->val;
if ( idx == 0 || idx == 1 )
{
m_idleInputpower[idx] = sec_fe->readInputpower();
eDebugNoSimulate("[eDVBFrontend] idleInputpower[%d] is %d", idx, m_idleInputpower[idx]);
}
else
eDebugNoSimulate("[eDVBFrontend] idleInputpower measure index(%d) out of bound !!!", idx);
break;
}
case eSecCommand::IF_MEASURE_IDLE_WAS_NOT_OK_GOTO:
{
eSecCommand::pair &compare = m_sec_sequence.current()->compare;
int idx = compare.val;
if ( !m_simulate && (idx == 0 || idx == 1) )
{
int idle = sec_fe->readInputpower();
int diff = abs(idle-m_idleInputpower[idx]);
if ( diff > 0)
{
eDebugNoSimulate("[eDVBFrontend] measure idle(%d) was not okay.. (%d - %d = %d) retry", idx, m_idleInputpower[idx], idle, diff);
setSecSequencePos(compare.steps);
break;
}
}
++m_sec_sequence.current();
break;
}
case eSecCommand::IF_TUNER_LOCKED_GOTO:
{
eSecCommand::rotor &cmd = m_sec_sequence.current()->measure;
if (m_simulate)
{
setSecSequencePos(cmd.steps);
break;
}
int signal = 0;
int isLocked = readFrontendData(iFrontendInformation_ENUMS::lockState);
m_idleInputpower[0] = m_idleInputpower[1] = 0;
--m_timeoutCount;
if (!m_timeoutCount && m_retryCount > 0)
--m_retryCount;
if (isLocked && ((abs((signal = readFrontendData(iFrontendInformation_ENUMS::signalQualitydB)) - cmd.lastSignal) < 40) || !cmd.lastSignal))
{
if (cmd.lastSignal)
eDebugNoSimulate("[eDVBFrontend] locked step %d ok (%d %d)", cmd.okcount, signal, cmd.lastSignal);
else
{
eDebugNoSimulate("[eDVBFrontend] locked step %d ok", cmd.okcount);
if (!cmd.okcount)
cmd.lastSignal = signal;
}
++cmd.okcount;
if (cmd.okcount > 4)
{
eDebugNoSimulate("[eDVBFrontend] ok > 4 .. goto %d\n", cmd.steps);
setSecSequencePos(cmd.steps);
m_state = stateLock;
m_stateChanged(this);
feEvent(-1); // flush events
m_sn->start();
break;
}
}
else
{
if (isLocked)
eDebugNoSimulate("[eDVBFrontend] rotor locked step %d failed (oldSignal %d, curSignal %d)", cmd.okcount, signal, cmd.lastSignal);
else
eDebugNoSimulate("[eDVBFrontend] rotor locked step %d failed (not locked)", cmd.okcount);
cmd.okcount=0;
cmd.lastSignal=0;
}
++m_sec_sequence.current();
break;
}
case eSecCommand::MEASURE_RUNNING_INPUTPOWER:
m_runningInputpower = sec_fe->readInputpower();
eDebugNoSimulate("[eDVBFrontend] runningInputpower is %d", m_runningInputpower);
++m_sec_sequence.current();
break;
case eSecCommand::SET_ROTOR_MOVING:
if (!m_simulate)
m_sec->setRotorMoving(m_slotid, true);
++m_sec_sequence.current();
break;
case eSecCommand::SET_ROTOR_STOPPED:
if (!m_simulate)
m_sec->setRotorMoving(m_slotid, false);
++m_sec_sequence.current();
break;
case eSecCommand::IF_INPUTPOWER_DELTA_GOTO:
{
eSecCommand::rotor &cmd = m_sec_sequence.current()->measure;
if (m_simulate)
{
setSecSequencePos(cmd.steps);
break;
}
int idleInputpower = m_idleInputpower[ (sec_fe_data[CUR_VOLTAGE]&1) ? 0 : 1];
const char *txt = cmd.direction ? "running" : "stopped";
--m_timeoutCount;
if (!m_timeoutCount && m_retryCount > 0)
--m_retryCount;
eDebugNoSimulate("[eDVBFrontend] waiting for rotor %s %d, idle %d, delta %d",
txt,
m_runningInputpower,
idleInputpower,
cmd.deltaA);
if ( (cmd.direction && abs(m_runningInputpower - idleInputpower) >= cmd.deltaA)
|| (!cmd.direction && abs(m_runningInputpower - idleInputpower) <= cmd.deltaA) )
{
++cmd.okcount;
eDebugNoSimulate("[eDVBFrontend] rotor %s step %d ok", txt, cmd.okcount);
if ( cmd.okcount > 6 )
{
eDebugNoSimulate("[eDVBFrontend] rotor is %s", txt);
if (setSecSequencePos(cmd.steps))
break;
}
}
else
{
eDebugNoSimulate("[eDVBFrontend] rotor not %s... reset counter.. increase timeout", txt);
cmd.okcount=0;
}
++m_sec_sequence.current();
break;
}
case eSecCommand::IF_ROTORPOS_VALID_GOTO:
if (sec_fe_data[ROTOR_CMD] != -1 && sec_fe_data[ROTOR_POS] != -1)
setSecSequencePos(m_sec_sequence.current()->steps);
else
++m_sec_sequence.current();
break;
case eSecCommand::INVALIDATE_CURRENT_SWITCHPARMS:
eDebugNoSimulate("[eDVBFrontend] invalidate current switch params");
sec_fe_data[CSW] = -1;
sec_fe_data[UCSW] = -1;
sec_fe_data[TONEBURST] = -1;
++m_sec_sequence.current();
break;
case eSecCommand::UPDATE_CURRENT_SWITCHPARMS:
sec_fe_data[CSW] = sec_fe_data[NEW_CSW];
sec_fe_data[UCSW] = sec_fe_data[NEW_UCSW];
sec_fe_data[TONEBURST] = sec_fe_data[NEW_TONEBURST];
eDebugNoSimulate("[eDVBFrontend] update current switch params");
++m_sec_sequence.current();
break;
case eSecCommand::INVALIDATE_CURRENT_ROTORPARMS:
eDebugNoSimulate("[eDVBFrontend] invalidate current rotorparams");
sec_fe_data[ROTOR_CMD] = -1;
sec_fe_data[ROTOR_POS] = -1;
++m_sec_sequence.current();
break;
case eSecCommand::UPDATE_CURRENT_ROTORPARAMS:
sec_fe_data[ROTOR_CMD] = sec_fe_data[NEW_ROTOR_CMD];
sec_fe_data[ROTOR_POS] = sec_fe_data[NEW_ROTOR_POS];
eDebugNoSimulate("[eDVBFrontend] update current rotorparams %d %04lx %ld", m_timeoutCount, sec_fe_data[ROTOR_CMD], sec_fe_data[ROTOR_POS]);
++m_sec_sequence.current();
break;
case eSecCommand::SET_ROTOR_DISEQC_RETRYS:
m_retryCount = m_sec_sequence.current()++->val;
eDebugNoSimulate("[eDVBFrontend] set rotor retries %d", m_retryCount);
break;
case eSecCommand::IF_NO_MORE_ROTOR_DISEQC_RETRYS_GOTO:
if (!m_retryCount)
{
eDebugNoSimulate("[eDVBFrontend] no more rotor retrys");
setSecSequencePos(m_sec_sequence.current()->steps);
}
else
++m_sec_sequence.current();
break;
case eSecCommand::SET_POWER_LIMITING_MODE:
{
if (!m_simulate)
{
char proc_name[64];
sprintf(proc_name, "/proc/stb/frontend/%d/static_current_limiting", sec_fe->m_dvbid);
CFile f(proc_name, "w");
if (f) // new interface exist?
{
bool slimiting = m_sec_sequence.current()->mode == eSecCommand::modeStatic;
if (fprintf(f, "%s", slimiting ? "on" : "off") <= 0)
eDebugNoSimulate("[eDVBFrontend] write %s failed: %m", proc_name);
else
eDebugNoSimulate("[eDVBFrontend] set %s current limiting", slimiting ? "static" : "dynamic");
}
else if (sec_fe->m_need_rotor_workaround)
{
char dev[16];
int slotid = sec_fe->m_slotid;
// FIXMEEEEEE hardcoded i2c devices for dm7025 and dm8000
if (slotid < 2)
sprintf(dev, "/dev/i2c-%d", slotid);
else if (slotid == 2)
sprintf(dev, "/dev/i2c-2"); // first nim socket on DM8000 use /dev/i2c-2
else if (slotid == 3)
sprintf(dev, "/dev/i2c-4"); // second nim socket on DM8000 use /dev/i2c-4
int fd = ::open(dev, O_RDWR);
if (fd >= 0)
{
unsigned char data[2];
::ioctl(fd, I2C_SLAVE_FORCE, 0x10 >> 1);
if(::read(fd, data, 1) != 1)
eDebugNoSimulate("[eDVBFrontend] error read lnbp: %m");
if ( m_sec_sequence.current()->mode == eSecCommand::modeStatic )
{
data[0] |= 0x80; // enable static current limiting
eDebugNoSimulate("[eDVBFrontend] set static current limiting");
}
else
{
data[0] &= ~0x80; // enable dynamic current limiting
eDebugNoSimulate("[eDVBFrontend] set dynamic current limiting");
}
if(::write(fd, data, 1) != 1)
eDebugNoSimulate("[eDVBFrontend] error write lnbp: %m");
::close(fd);
}
}
}
++m_sec_sequence.current();
break;
}
case eSecCommand::DELAYED_CLOSE_FRONTEND:
{
eDebugNoSimulate("[eDVBFrontend] delayed close frontend");
closeFrontend(false, true);
++m_sec_sequence.current();
break;
}
default:
eDebugNoSimulate("[eDVBFrontend] unhandled sec command %d",
++m_sec_sequence.current()->cmd);
++m_sec_sequence.current();
}
if (!m_simulate)
m_tuneTimer->start(delay,true);
}
if (regFE)
regFE->dec_use();
if (m_simulate && m_sec_sequence.current() != m_sec_sequence.end())
tuneLoop();
return delay;
}
void eDVBFrontend::setFrontend(bool recvEvents)
{
if (!m_simulate)
{
int type = -1;
oparm.getSystem(type);
eDebug("[eDVBFrontend] setting frontend %d", m_dvbid);
if (recvEvents)
m_sn->start();
feEvent(-1); // flush events
struct dtv_property p[16];
struct dtv_properties cmdseq;
cmdseq.props = p;
cmdseq.num = 0;
p[cmdseq.num].cmd = DTV_CLEAR, cmdseq.num++;
if (type == iDVBFrontend::feSatellite)
{
eDVBFrontendParametersSatellite parm;
fe_rolloff_t rolloff = ROLLOFF_35;
fe_pilot_t pilot = PILOT_OFF;
fe_modulation_t modulation = QPSK;
fe_delivery_system_t system = SYS_DVBS;
oparm.getDVBS(parm);
switch (parm.system)
{
default:
case eDVBFrontendParametersSatellite::System_DVB_S: system = SYS_DVBS; break;
case eDVBFrontendParametersSatellite::System_DVB_S2: system = SYS_DVBS2; break;
}
p[cmdseq.num].cmd = DTV_DELIVERY_SYSTEM, p[cmdseq.num].u.data = system, cmdseq.num++;
p[cmdseq.num].cmd = DTV_INVERSION;
switch (parm.inversion)
{
case eDVBFrontendParametersSatellite::Inversion_Off: p[cmdseq.num].u.data = INVERSION_OFF; break;
case eDVBFrontendParametersSatellite::Inversion_On: p[cmdseq.num].u.data = INVERSION_ON; break;
default:
case eDVBFrontendParametersSatellite::Inversion_Unknown: p[cmdseq.num].u.data = INVERSION_AUTO; break;
}
cmdseq.num++;
switch (parm.modulation)
{
case eDVBFrontendParametersSatellite::Modulation_QPSK: modulation = QPSK; break;
case eDVBFrontendParametersSatellite::Modulation_8PSK: modulation = PSK_8; break;
case eDVBFrontendParametersSatellite::Modulation_QAM16: modulation = QAM_16; break;
case eDVBFrontendParametersSatellite::Modulation_16APSK: modulation = APSK_16; break;
case eDVBFrontendParametersSatellite::Modulation_32APSK: modulation = APSK_32; break;
}
switch (parm.pilot)
{
case eDVBFrontendParametersSatellite::Pilot_Off: pilot = PILOT_OFF; break;
case eDVBFrontendParametersSatellite::Pilot_On: pilot = PILOT_ON; break;
default:
case eDVBFrontendParametersSatellite::Pilot_Unknown: pilot = PILOT_AUTO; break;
}
switch (parm.rolloff)
{
case eDVBFrontendParametersSatellite::RollOff_alpha_0_20: rolloff = ROLLOFF_20; break;
case eDVBFrontendParametersSatellite::RollOff_alpha_0_25: rolloff = ROLLOFF_25; break;
case eDVBFrontendParametersSatellite::RollOff_alpha_0_35: rolloff = ROLLOFF_35; break;
default:
case eDVBFrontendParametersSatellite::RollOff_auto: rolloff = ROLLOFF_AUTO; break;
}
p[cmdseq.num].cmd = DTV_FREQUENCY, p[cmdseq.num].u.data = satfrequency, cmdseq.num++;
p[cmdseq.num].cmd = DTV_MODULATION, p[cmdseq.num].u.data = modulation, cmdseq.num++;
p[cmdseq.num].cmd = DTV_SYMBOL_RATE, p[cmdseq.num].u.data = parm.symbol_rate, cmdseq.num++;
p[cmdseq.num].cmd = DTV_INNER_FEC;
switch (parm.fec)
{
case eDVBFrontendParametersSatellite::FEC_1_2: p[cmdseq.num].u.data = FEC_1_2; break;
case eDVBFrontendParametersSatellite::FEC_2_3: p[cmdseq.num].u.data = FEC_2_3; break;
case eDVBFrontendParametersSatellite::FEC_3_4: p[cmdseq.num].u.data = FEC_3_4; break;
case eDVBFrontendParametersSatellite::FEC_3_5: p[cmdseq.num].u.data = FEC_3_5; break;
case eDVBFrontendParametersSatellite::FEC_4_5: p[cmdseq.num].u.data = FEC_4_5; break;
case eDVBFrontendParametersSatellite::FEC_5_6: p[cmdseq.num].u.data = FEC_5_6; break;
case eDVBFrontendParametersSatellite::FEC_6_7: p[cmdseq.num].u.data = FEC_6_7; break;
case eDVBFrontendParametersSatellite::FEC_7_8: p[cmdseq.num].u.data = FEC_7_8; break;
case eDVBFrontendParametersSatellite::FEC_8_9: p[cmdseq.num].u.data = FEC_8_9; break;
case eDVBFrontendParametersSatellite::FEC_9_10: p[cmdseq.num].u.data = FEC_9_10; break;
case eDVBFrontendParametersSatellite::FEC_None: p[cmdseq.num].u.data = FEC_NONE; break;
default:
case eDVBFrontendParametersSatellite::FEC_Auto: p[cmdseq.num].u.data = FEC_AUTO; break;
}
cmdseq.num++;
if (system == SYS_DVBS2)
{
p[cmdseq.num].cmd = DTV_ROLLOFF, p[cmdseq.num].u.data = rolloff, cmdseq.num++;
p[cmdseq.num].cmd = DTV_PILOT, p[cmdseq.num].u.data = pilot, cmdseq.num++;
}
}
else if (type == iDVBFrontend::feCable)
{
eDVBFrontendParametersCable parm;
oparm.getDVBC(parm);
p[cmdseq.num].cmd = DTV_DELIVERY_SYSTEM;
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 6
if (m_dvbversion >= DVB_VERSION(5, 6))
{
switch (parm.system)
{
default:
case eDVBFrontendParametersCable::System_DVB_C_ANNEX_A: p[cmdseq.num].u.data = SYS_DVBC_ANNEX_A; break;
case eDVBFrontendParametersCable::System_DVB_C_ANNEX_C: p[cmdseq.num].u.data = SYS_DVBC_ANNEX_C; break;
}
}
else
{
p[cmdseq.num].u.data = SYS_DVBC_ANNEX_A; /* old value for SYS_DVBC_ANNEX_AC */
}
#else
p[cmdseq.num].u.data = SYS_DVBC_ANNEX_AC;
#endif
cmdseq.num++;
p[cmdseq.num].cmd = DTV_FREQUENCY, p[cmdseq.num].u.data = parm.frequency * 1000, cmdseq.num++;
p[cmdseq.num].cmd = DTV_INVERSION;
switch (parm.inversion)
{
case eDVBFrontendParametersCable::Inversion_Off: p[cmdseq.num].u.data = INVERSION_OFF; break;
case eDVBFrontendParametersCable::Inversion_On: p[cmdseq.num].u.data = INVERSION_ON; break;
default:
case eDVBFrontendParametersCable::Inversion_Unknown: p[cmdseq.num].u.data = INVERSION_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_SYMBOL_RATE, p[cmdseq.num].u.data = parm.symbol_rate, cmdseq.num++;
p[cmdseq.num].cmd = DTV_INNER_FEC;
switch (parm.fec_inner)
{
default:
case eDVBFrontendParametersCable::FEC_Auto: p[cmdseq.num].u.data = FEC_AUTO; break;
case eDVBFrontendParametersCable::FEC_1_2: p[cmdseq.num].u.data = FEC_1_2; break;
case eDVBFrontendParametersCable::FEC_2_3: p[cmdseq.num].u.data = FEC_2_3; break;
case eDVBFrontendParametersCable::FEC_3_4: p[cmdseq.num].u.data = FEC_3_4; break;
case eDVBFrontendParametersCable::FEC_5_6: p[cmdseq.num].u.data = FEC_5_6; break;
case eDVBFrontendParametersCable::FEC_7_8: p[cmdseq.num].u.data = FEC_7_8; break;
case eDVBFrontendParametersCable::FEC_8_9: p[cmdseq.num].u.data = FEC_8_9; break;
case eDVBFrontendParametersCable::FEC_3_5: p[cmdseq.num].u.data = FEC_3_5; break;
case eDVBFrontendParametersCable::FEC_4_5: p[cmdseq.num].u.data = FEC_4_5; break;
case eDVBFrontendParametersCable::FEC_9_10: p[cmdseq.num].u.data = FEC_9_10; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_MODULATION;
switch (parm.modulation)
{
default:
case eDVBFrontendParametersCable::Modulation_Auto: p[cmdseq.num].u.data = QAM_AUTO; break;
case eDVBFrontendParametersCable::Modulation_QAM16: p[cmdseq.num].u.data = QAM_16; break;
case eDVBFrontendParametersCable::Modulation_QAM32: p[cmdseq.num].u.data = QAM_32; break;
case eDVBFrontendParametersCable::Modulation_QAM64: p[cmdseq.num].u.data = QAM_64; break;
case eDVBFrontendParametersCable::Modulation_QAM128: p[cmdseq.num].u.data = QAM_128; break;
case eDVBFrontendParametersCable::Modulation_QAM256: p[cmdseq.num].u.data = QAM_256; break;
}
cmdseq.num++;
}
else if (type == iDVBFrontend::feTerrestrial)
{
eDVBFrontendParametersTerrestrial parm;
fe_delivery_system_t system = SYS_DVBT;
oparm.getDVBT(parm);
switch (parm.system)
{
default:
case eDVBFrontendParametersTerrestrial::System_DVB_T: system = SYS_DVBT; break;
case eDVBFrontendParametersTerrestrial::System_DVB_T2: system = SYS_DVBT2; break;
}
p[cmdseq.num].cmd = DTV_DELIVERY_SYSTEM, p[cmdseq.num].u.data = system, cmdseq.num++;
p[cmdseq.num].cmd = DTV_FREQUENCY, p[cmdseq.num].u.data = parm.frequency, cmdseq.num++;
p[cmdseq.num].cmd = DTV_INVERSION;
switch (parm.inversion)
{
case eDVBFrontendParametersTerrestrial::Inversion_Off: p[cmdseq.num].u.data = INVERSION_OFF; break;
case eDVBFrontendParametersTerrestrial::Inversion_On: p[cmdseq.num].u.data = INVERSION_ON; break;
default:
case eDVBFrontendParametersTerrestrial::Inversion_Unknown: p[cmdseq.num].u.data = INVERSION_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_CODE_RATE_LP;
switch (parm.code_rate_LP)
{
case eDVBFrontendParametersTerrestrial::FEC_1_2: p[cmdseq.num].u.data = FEC_1_2; break;
case eDVBFrontendParametersTerrestrial::FEC_2_3: p[cmdseq.num].u.data = FEC_2_3; break;
case eDVBFrontendParametersTerrestrial::FEC_3_4: p[cmdseq.num].u.data = FEC_3_4; break;
case eDVBFrontendParametersTerrestrial::FEC_5_6: p[cmdseq.num].u.data = FEC_5_6; break;
case eDVBFrontendParametersTerrestrial::FEC_6_7: p[cmdseq.num].u.data = FEC_6_7; break;
case eDVBFrontendParametersTerrestrial::FEC_7_8: p[cmdseq.num].u.data = FEC_7_8; break;
case eDVBFrontendParametersTerrestrial::FEC_8_9: p[cmdseq.num].u.data = FEC_8_9; break;
default:
case eDVBFrontendParametersTerrestrial::FEC_Auto: p[cmdseq.num].u.data = FEC_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_CODE_RATE_HP;
switch (parm.code_rate_HP)
{
case eDVBFrontendParametersTerrestrial::FEC_1_2: p[cmdseq.num].u.data = FEC_1_2; break;
case eDVBFrontendParametersTerrestrial::FEC_2_3: p[cmdseq.num].u.data = FEC_2_3; break;
case eDVBFrontendParametersTerrestrial::FEC_3_4: p[cmdseq.num].u.data = FEC_3_4; break;
case eDVBFrontendParametersTerrestrial::FEC_5_6: p[cmdseq.num].u.data = FEC_5_6; break;
case eDVBFrontendParametersTerrestrial::FEC_6_7: p[cmdseq.num].u.data = FEC_6_7; break;
case eDVBFrontendParametersTerrestrial::FEC_7_8: p[cmdseq.num].u.data = FEC_7_8; break;
case eDVBFrontendParametersTerrestrial::FEC_8_9: p[cmdseq.num].u.data = FEC_8_9; break;
default:
case eDVBFrontendParametersTerrestrial::FEC_Auto: p[cmdseq.num].u.data = FEC_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_MODULATION;
switch (parm.modulation)
{
case eDVBFrontendParametersTerrestrial::Modulation_QPSK: p[cmdseq.num].u.data = QPSK; break;
case eDVBFrontendParametersTerrestrial::Modulation_QAM16: p[cmdseq.num].u.data = QAM_16; break;
case eDVBFrontendParametersTerrestrial::Modulation_QAM64: p[cmdseq.num].u.data = QAM_64; break;
case eDVBFrontendParametersTerrestrial::Modulation_QAM256: p[cmdseq.num].u.data = QAM_256; break;
default:
case eDVBFrontendParametersTerrestrial::Modulation_Auto: p[cmdseq.num].u.data = QAM_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_TRANSMISSION_MODE;
switch (parm.transmission_mode)
{
case eDVBFrontendParametersTerrestrial::TransmissionMode_2k: p[cmdseq.num].u.data = TRANSMISSION_MODE_2K; break;
case eDVBFrontendParametersTerrestrial::TransmissionMode_4k: p[cmdseq.num].u.data = TRANSMISSION_MODE_4K; break;
case eDVBFrontendParametersTerrestrial::TransmissionMode_8k: p[cmdseq.num].u.data = TRANSMISSION_MODE_8K; break;
#if defined TRANSMISSION_MODE_1K
case eDVBFrontendParametersTerrestrial::TransmissionMode_1k: p[cmdseq.num].u.data = TRANSMISSION_MODE_1K; break;
case eDVBFrontendParametersTerrestrial::TransmissionMode_16k: p[cmdseq.num].u.data = TRANSMISSION_MODE_16K; break;
case eDVBFrontendParametersTerrestrial::TransmissionMode_32k: p[cmdseq.num].u.data = TRANSMISSION_MODE_32K; break;
#endif
default:
case eDVBFrontendParametersTerrestrial::TransmissionMode_Auto: p[cmdseq.num].u.data = TRANSMISSION_MODE_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_GUARD_INTERVAL;
switch (parm.guard_interval)
{
case eDVBFrontendParametersTerrestrial::GuardInterval_1_32: p[cmdseq.num].u.data = GUARD_INTERVAL_1_32; break;
case eDVBFrontendParametersTerrestrial::GuardInterval_1_16: p[cmdseq.num].u.data = GUARD_INTERVAL_1_16; break;
case eDVBFrontendParametersTerrestrial::GuardInterval_1_8: p[cmdseq.num].u.data = GUARD_INTERVAL_1_8; break;
case eDVBFrontendParametersTerrestrial::GuardInterval_1_4: p[cmdseq.num].u.data = GUARD_INTERVAL_1_4; break;
#if defined GUARD_INTERVAL_1_128
case eDVBFrontendParametersTerrestrial::GuardInterval_1_128: p[cmdseq.num].u.data = GUARD_INTERVAL_1_128; break;
case eDVBFrontendParametersTerrestrial::GuardInterval_19_128: p[cmdseq.num].u.data = GUARD_INTERVAL_19_128; break;
case eDVBFrontendParametersTerrestrial::GuardInterval_19_256: p[cmdseq.num].u.data = GUARD_INTERVAL_19_256; break;
#endif
default:
case eDVBFrontendParametersTerrestrial::GuardInterval_Auto: p[cmdseq.num].u.data = GUARD_INTERVAL_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_HIERARCHY;
switch (parm.hierarchy)
{
case eDVBFrontendParametersTerrestrial::Hierarchy_None: p[cmdseq.num].u.data = HIERARCHY_NONE; break;
case eDVBFrontendParametersTerrestrial::Hierarchy_1: p[cmdseq.num].u.data = HIERARCHY_1; break;
case eDVBFrontendParametersTerrestrial::Hierarchy_2: p[cmdseq.num].u.data = HIERARCHY_2; break;
case eDVBFrontendParametersTerrestrial::Hierarchy_4: p[cmdseq.num].u.data = HIERARCHY_4; break;
default:
case eDVBFrontendParametersTerrestrial::Hierarchy_Auto: p[cmdseq.num].u.data = HIERARCHY_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_BANDWIDTH_HZ, p[cmdseq.num].u.data = parm.bandwidth, cmdseq.num++;
if (system == SYS_DVBT2)
{
if (m_dvbversion >= DVB_VERSION(5, 3))
{
#if defined DTV_STREAM_ID
p[cmdseq.num].cmd = DTV_STREAM_ID, p[cmdseq.num].u.data = parm.plp_id, cmdseq.num++;
#elif defined DTV_DVBT2_PLP_ID
p[cmdseq.num].cmd = DTV_DVBT2_PLP_ID, p[cmdseq.num].u.data = parm.plp_id, cmdseq.num++;
#endif
}
}
}
else if (type == iDVBFrontend::feATSC)
{
eDVBFrontendParametersATSC parm;
oparm.getATSC(parm);
p[cmdseq.num].cmd = DTV_DELIVERY_SYSTEM;
switch (parm.system)
{
default:
case eDVBFrontendParametersATSC::System_ATSC: p[cmdseq.num].u.data = SYS_ATSC; break;
case eDVBFrontendParametersATSC::System_DVB_C_ANNEX_B: p[cmdseq.num].u.data = SYS_DVBC_ANNEX_B; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_FREQUENCY, p[cmdseq.num].u.data = parm.frequency, cmdseq.num++;
p[cmdseq.num].cmd = DTV_INVERSION;
switch (parm.inversion)
{
case eDVBFrontendParametersATSC::Inversion_Off: p[cmdseq.num].u.data = INVERSION_OFF; break;
case eDVBFrontendParametersATSC::Inversion_On: p[cmdseq.num].u.data = INVERSION_ON; break;
default:
case eDVBFrontendParametersATSC::Inversion_Unknown: p[cmdseq.num].u.data = INVERSION_AUTO; break;
}
cmdseq.num++;
p[cmdseq.num].cmd = DTV_MODULATION;
switch (parm.modulation)
{
default:
case eDVBFrontendParametersATSC::Modulation_Auto: p[cmdseq.num].u.data = QAM_AUTO; break;
case eDVBFrontendParametersATSC::Modulation_QAM16: p[cmdseq.num].u.data = QAM_16; break;
case eDVBFrontendParametersATSC::Modulation_QAM32: p[cmdseq.num].u.data = QAM_32; break;
case eDVBFrontendParametersATSC::Modulation_QAM64: p[cmdseq.num].u.data = QAM_64; break;
case eDVBFrontendParametersATSC::Modulation_QAM128: p[cmdseq.num].u.data = QAM_128; break;
case eDVBFrontendParametersATSC::Modulation_QAM256: p[cmdseq.num].u.data = QAM_256; break;
case eDVBFrontendParametersATSC::Modulation_VSB_8: p[cmdseq.num].u.data = VSB_8; break;
case eDVBFrontendParametersATSC::Modulation_VSB_16: p[cmdseq.num].u.data = VSB_16; break;
}
cmdseq.num++;
}
p[cmdseq.num].cmd = DTV_TUNE, cmdseq.num++;
if (ioctl(m_fd, FE_SET_PROPERTY, &cmdseq) == -1)
{
perror("FE_SET_PROPERTY failed");
return;
}
}
}
RESULT eDVBFrontend::prepare_sat(const eDVBFrontendParametersSatellite &feparm, unsigned int tunetimeout)
{
int res;
satfrequency = feparm.frequency;
if (!m_sec)
{
eWarning("[eDVBFrontend] no SEC module active!");
return -ENOENT;
}
res = m_sec->prepare(*this, feparm, satfrequency, 1 << m_slotid, tunetimeout);
if (!res)
{
eDebugNoSimulate("[eDVBFrontend] prepare_sat System %d Freq %d Pol %d SR %d INV %d FEC %d orbpos %d system %d modulation %d pilot %d, rolloff %d",
feparm.system,
feparm.frequency,
feparm.polarisation,
feparm.symbol_rate,
feparm.inversion,
feparm.fec,
feparm.orbital_position,
feparm.system,
feparm.modulation,
feparm.pilot,
feparm.rolloff);
if ((unsigned int)satfrequency < fe_info.frequency_min || (unsigned int)satfrequency > fe_info.frequency_max)
{
eDebugNoSimulate("[eDVBFrontend] %d mhz out of tuner range.. dont tune", satfrequency / 1000);
return -EINVAL;
}
eDebugNoSimulate("[eDVBFrontend] tuning to %d mhz", satfrequency / 1000);
}
oparm.setDVBS(feparm, feparm.no_rotor_command_on_tune);
return res;
}
RESULT eDVBFrontend::prepare_cable(const eDVBFrontendParametersCable &feparm)
{
eDebugNoSimulate("[eDVBFrontend] tuning to %d khz, sr %d, fec %d, modulation %d, inversion %d",
feparm.frequency,
feparm.symbol_rate,
feparm.fec_inner,
feparm.modulation,
feparm.inversion);
oparm.setDVBC(feparm);
return 0;
}
RESULT eDVBFrontend::prepare_terrestrial(const eDVBFrontendParametersTerrestrial &feparm)
{
oparm.setDVBT(feparm);
return 0;
}
RESULT eDVBFrontend::prepare_atsc(const eDVBFrontendParametersATSC &feparm)
{
oparm.setATSC(feparm);
return 0;
}
RESULT eDVBFrontend::tune(const iDVBFrontendParameters &where)
{
unsigned int timeout = 5000;
int type;
eDebugNoSimulate("[eDVBFrontend] (%d)tune", m_dvbid);
m_timeout->stop();
int res=0;
if (where.getSystem(type) < 0)
{
res = -EINVAL;
goto tune_error;
}
if (!m_sn && !m_simulate)
{
eDebug("[eDVBFrontend] no frontend device opened... do not try to tune !!!");
res = -ENODEV;
goto tune_error;
}
if (m_type == feSatellite && type != feSatellite)
setTone(iDVBFrontend::toneOff);
else if (type == feSatellite && m_type != feSatellite)
setDeliverySystem("DVB-S");
if (!m_simulate)
m_sn->stop();
m_sec_sequence.clear();
where.calcLockTimeout(timeout);
switch (type)
{
case feSatellite:
{
eDVBFrontendParametersSatellite feparm;
if (where.getDVBS(feparm))
{
eDebug("[eDVBFrontend] no dvbs data!");
res = -EINVAL;
goto tune_error;
}
if (m_rotor_mode != feparm.no_rotor_command_on_tune && !feparm.no_rotor_command_on_tune)
{
eDVBFrontend *sec_fe = this;
long tmp = m_data[LINKED_PREV_PTR];
while (tmp != -1)
{
eDVBRegisteredFrontend *linked_fe = (eDVBRegisteredFrontend*)tmp;
sec_fe = linked_fe->m_frontend;
sec_fe->getData(LINKED_NEXT_PTR, tmp);
}
eDebug("[eDVBFrontend] (fe%d) reset diseqc after leave rotor mode!", m_dvbid);
sec_fe->m_data[CSW] = sec_fe->m_data[UCSW] = sec_fe->m_data[TONEBURST] = sec_fe->m_data[ROTOR_CMD] = sec_fe->m_data[ROTOR_POS] = -1; // reset diseqc
}
m_rotor_mode = feparm.no_rotor_command_on_tune;
if (!m_simulate)
m_sec->setRotorMoving(m_slotid, false);
res=prepare_sat(feparm, timeout);
if (res)
goto tune_error;
break;
}
case feCable:
{
eDVBFrontendParametersCable feparm;
if (where.getDVBC(feparm))
{
res = -EINVAL;
goto tune_error;
}
res=prepare_cable(feparm);
if (res)
goto tune_error;
m_sec_sequence.push_back( eSecCommand(eSecCommand::START_TUNE_TIMEOUT, timeout) );
m_sec_sequence.push_back( eSecCommand(eSecCommand::SET_FRONTEND, 1) );
break;
}
case feTerrestrial:
{
eDVBFrontendParametersTerrestrial feparm;
if (where.getDVBT(feparm))
{
eDebug("[eDVBFrontend] no -T data");
res = -EINVAL;
goto tune_error;
}
res=prepare_terrestrial(feparm);
if (res)
goto tune_error;
char configStr[255];
snprintf(configStr, 255, "config.Nims.%d.terrestrial_5V", m_slotid);
m_sec_sequence.push_back( eSecCommand(eSecCommand::START_TUNE_TIMEOUT, timeout) );
if (eConfigManager::getConfigBoolValue(configStr))
m_sec_sequence.push_back( eSecCommand(eSecCommand::SET_VOLTAGE, iDVBFrontend::voltage13) );
else
m_sec_sequence.push_back( eSecCommand(eSecCommand::SET_VOLTAGE, iDVBFrontend::voltageOff) );
m_sec_sequence.push_back( eSecCommand(eSecCommand::SET_FRONTEND, 1) );
break;
}
case feATSC:
{
eDVBFrontendParametersATSC feparm;
if (where.getATSC(feparm))
{
res = -EINVAL;
goto tune_error;
}
res=prepare_atsc(feparm);
if (res)
goto tune_error;
m_sec_sequence.push_back( eSecCommand(eSecCommand::START_TUNE_TIMEOUT, timeout) );
m_sec_sequence.push_back( eSecCommand(eSecCommand::SET_FRONTEND, 1) );
break;
}
default:
res = -EINVAL;
goto tune_error;
}
m_sec_sequence.current() = m_sec_sequence.begin();
if (!m_simulate)
{
if(m_type != type)
{
eDebug("[eDVBFrontend] tune setting type to %d from %d", type, m_type);
m_type = type;
}
m_tuneTimer->start(0,true);
m_tuning = 1;
if (m_state != stateTuning)
{
m_state = stateTuning;
m_stateChanged(this);
}
}
else
tuneLoop();
return res;
tune_error:
m_tuneTimer->stop();
return res;
}
RESULT eDVBFrontend::connectStateChange(const Slot1<void,iDVBFrontend*> &stateChange, ePtr<eConnection> &connection)
{
connection = new eConnection(this, m_stateChanged.connect(stateChange));
return 0;
}
RESULT eDVBFrontend::setVoltage(int voltage)
{
bool increased=false;
fe_sec_voltage_t vlt;
m_data[CUR_VOLTAGE]=voltage;
switch (voltage)
{
case voltageOff:
m_data[CSW]=m_data[UCSW]=m_data[TONEBURST]=-1; // reset diseqc
vlt = SEC_VOLTAGE_OFF;
break;
case voltage13_5:
increased = true;
case voltage13:
vlt = SEC_VOLTAGE_13;
break;
case voltage18_5:
increased = true;
case voltage18:
vlt = SEC_VOLTAGE_18;
break;
default:
return -ENODEV;
}
if (m_simulate)
return 0;
::ioctl(m_fd, FE_ENABLE_HIGH_LNB_VOLTAGE, increased);
return ::ioctl(m_fd, FE_SET_VOLTAGE, vlt);
}
RESULT eDVBFrontend::getState(int &state)
{
state = m_state;
return 0;
}
RESULT eDVBFrontend::setTone(int t)
{
fe_sec_tone_mode_t tone;
if (m_simulate)
return 0;
if (m_type != feSatellite)
{
eDebug("[eDVBFrontend] sendTone allowed only in feSatellite (%d)", m_type);
return 0;
}
m_data[CUR_TONE]=t;
switch (t)
{
case toneOn:
tone = SEC_TONE_ON;
break;
case toneOff:
tone = SEC_TONE_OFF;
break;
default:
return -ENODEV;
}
return ::ioctl(m_fd, FE_SET_TONE, tone);
}
RESULT eDVBFrontend::sendDiseqc(const eDVBDiseqcCommand &diseqc)
{
struct dvb_diseqc_master_cmd cmd;
if (m_simulate)
return 0;
if (m_type != feSatellite)
{
eDebug("[eDVBFrontend] sendDiseqc allowed only in feSatellite (%d)", m_type);
return 0;
}
memcpy(cmd.msg, diseqc.data, diseqc.len);
cmd.msg_len = diseqc.len;
if (::ioctl(m_fd, FE_DISEQC_SEND_MASTER_CMD, &cmd))
return -EINVAL;
return 0;
}
RESULT eDVBFrontend::sendToneburst(int burst)
{
fe_sec_mini_cmd_t cmd;
if (m_simulate)
return 0;
if (m_type != feSatellite)
{
eDebug("[eDVBFrontend] sendToneburst allowed only in feSatellite (%d)", m_type);
return 0;
}
if (burst == eDVBSatelliteDiseqcParameters::B)
cmd = SEC_MINI_B;
else
cmd = SEC_MINI_A;
if (::ioctl(m_fd, FE_DISEQC_SEND_BURST, cmd))
return -EINVAL;
return 0;
}
RESULT eDVBFrontend::setSEC(iDVBSatelliteEquipmentControl *sec)
{
m_sec = sec;
return 0;
}
RESULT eDVBFrontend::setSecSequence(eSecCommandList &list)
{
if (m_data[SATCR] != -1 && m_sec_sequence.current() != m_sec_sequence.end())
m_sec_sequence.push_back(list);
else
m_sec_sequence = list;
return 0;
}
RESULT eDVBFrontend::getData(int num, long &data)
{
if ( num < NUM_DATA_ENTRIES )
{
data = m_data[num];
return 0;
}
return -EINVAL;
}
RESULT eDVBFrontend::setData(int num, long val)
{
if ( num < NUM_DATA_ENTRIES )
{
m_data[num] = val;
return 0;
}
return -EINVAL;
}
int eDVBFrontend::isCompatibleWith(ePtr<iDVBFrontendParameters> &feparm)
{
int type;
int score = 0;
bool preferred = (eDVBFrontend::getPreferredFrontend() >= 0 && m_slotid == eDVBFrontend::getPreferredFrontend());
if (feparm->getSystem(type) || !m_enabled)
{
return 0;
}
if (type == eDVBFrontend::feSatellite)
{
eDVBFrontendParametersSatellite parm;
bool can_handle_dvbs, can_handle_dvbs2;
if (feparm->getDVBS(parm) < 0)
{
return 0;
}
can_handle_dvbs = supportsDeliverySystem(SYS_DVBS, !m_multitype);
can_handle_dvbs2 = supportsDeliverySystem(SYS_DVBS2, !m_multitype);
if (parm.system == eDVBFrontendParametersSatellite::System_DVB_S2 && !can_handle_dvbs2)
{
return 0;
}
if (parm.system == eDVBFrontendParametersSatellite::System_DVB_S && !can_handle_dvbs)
{
return 0;
}
score = m_sec ? m_sec->canTune(parm, this, 1 << m_slotid) : 0;
if (score > 1 && parm.system == eDVBFrontendParametersSatellite::System_DVB_S && can_handle_dvbs2)
{
/* prefer to use an S tuner, try to keep S2 free for S2 transponders */
score--;
}
}
else if (type == eDVBFrontend::feCable)
{
eDVBFrontendParametersCable parm;
bool can_handle_dvbc_annex_a, can_handle_dvbc_annex_c;
if (feparm->getDVBC(parm) < 0)
{
return 0;
}
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 6
if (m_dvbversion >= DVB_VERSION(5, 6))
{
can_handle_dvbc_annex_a = supportsDeliverySystem(SYS_DVBC_ANNEX_A, !m_multitype);
can_handle_dvbc_annex_c = supportsDeliverySystem(SYS_DVBC_ANNEX_C, !m_multitype);
}
else
{
can_handle_dvbc_annex_a = can_handle_dvbc_annex_c = supportsDeliverySystem(SYS_DVBC_ANNEX_A, !m_multitype); /* new value for SYS_DVB_ANNEX_AC */
}
#else
can_handle_dvbc_annex_a = can_handle_dvbc_annex_c = supportsDeliverySystem(SYS_DVBC_ANNEX_AC, !m_multitype);
#endif
if (parm.system == eDVBFrontendParametersCable::System_DVB_C_ANNEX_A && !can_handle_dvbc_annex_a)
{
return 0;
}
if (parm.system == eDVBFrontendParametersCable::System_DVB_C_ANNEX_C && !can_handle_dvbc_annex_c)
{
return 0;
}
score = 2;
}
else if (type == eDVBFrontend::feTerrestrial)
{
eDVBFrontendParametersTerrestrial parm;
bool can_handle_dvbt, can_handle_dvbt2;
can_handle_dvbt = supportsDeliverySystem(SYS_DVBT, !m_multitype);
can_handle_dvbt2 = supportsDeliverySystem(SYS_DVBT2, !m_multitype);
if (feparm->getDVBT(parm) < 0)
{
return 0;
}
if (parm.system == eDVBFrontendParametersTerrestrial::System_DVB_T && !can_handle_dvbt)
{
return 0;
}
if (parm.system == eDVBFrontendParametersTerrestrial::System_DVB_T2 && !can_handle_dvbt2)
{
return 0;
}
if (parm.system == eDVBFrontendParametersTerrestrial::System_DVB_T_T2 && !(can_handle_dvbt || can_handle_dvbt2))
{
return 0;
}
score = 2;
if (parm.system == eDVBFrontendParametersTerrestrial::System_DVB_T && can_handle_dvbt2)
{
/* prefer to use a T tuner, try to keep T2 free for T2 transponders */
score--;
}
}
else if (type == eDVBFrontend::feATSC)
{
eDVBFrontendParametersATSC parm;
bool can_handle_atsc, can_handle_dvbc_annex_b;
can_handle_dvbc_annex_b = supportsDeliverySystem(SYS_DVBC_ANNEX_B, !m_multitype);
can_handle_atsc = supportsDeliverySystem(SYS_ATSC, !m_multitype);
if (feparm->getATSC(parm) < 0)
{
return 0;
}
if (!can_handle_atsc && !can_handle_dvbc_annex_b)
{
return 0;
}
if (parm.system == eDVBFrontendParametersATSC::System_DVB_C_ANNEX_B && !can_handle_dvbc_annex_b)
{
return 0;
}
if (parm.system == eDVBFrontendParametersATSC::System_ATSC && !can_handle_atsc)
{
return 0;
}
score = 2;
}
if (score > 1 && m_multitype)
{
/* prefer to use a non multitype tuner, try to keep multitype tuners free for other DVB types */
score--;
}
if (score && preferred)
{
/* make 'sure' we always prefer this frontend */
score += 100000; /* the offset has to be so ridiculously high because of the high scores which are used for DVB-S(2) */
}
return score;
}
bool eDVBFrontend::supportsDeliverySystem(const fe_delivery_system_t &sys, bool obeywhitelist)
{
std::map<fe_delivery_system_t, bool>::iterator it = m_delsys.find(sys);
if (it != m_delsys.end() && it->second)
{
if (obeywhitelist && !m_delsys_whitelist.empty())
{
it = m_delsys_whitelist.find(sys);
if (it == m_delsys_whitelist.end() || !it->second) return false;
}
return true;
}
return false;
}
void eDVBFrontend::setDeliverySystemWhitelist(const std::vector<fe_delivery_system_t> &whitelist)
{
m_delsys_whitelist.clear();
for (unsigned int i = 0; i < whitelist.size(); i++)
{
m_delsys_whitelist[whitelist[i]] = true;
}
if (m_simulate_fe)
{
m_simulate_fe->setDeliverySystemWhitelist(whitelist);
}
}
bool eDVBFrontend::setDeliverySystem(const char *type)
{
struct dtv_property p[1];
struct dtv_properties cmdseq;
int fetype;
if (m_fd < 0)
{
eDebug("[eDVBFrontend] setDeliverySystem cannot change delivery system with closed frontend");
return false;
}
if (m_simulate)
{
return false;
}
cmdseq.props = p;
cmdseq.num = 1;
p[0].cmd = DTV_DELIVERY_SYSTEM;
p[0].u.data = SYS_UNDEFINED;
if (!strcmp(type, "DVB-S2"))
{
p[0].u.data = SYS_DVBS2;
fetype = feSatellite;
}
else if (!strcmp(type, "DVB-S"))
{
p[0].u.data = SYS_DVBS;
fetype = feSatellite;
}
else if (!strcmp(type, "DVB-T2"))
{
p[0].u.data = SYS_DVBT2;
fetype = feTerrestrial;
}
else if (!strcmp(type, "DVB-T"))
{
p[0].u.data = SYS_DVBT;
fetype = feTerrestrial;
}
else if (!strcmp(type, "DVB-C"))
{
p[0].u.data = SYS_DVBC_ANNEX_A;
fetype = feCable;
}
else if (!strcmp(type, "ATSC"))
{
p[0].u.data = SYS_ATSC;
fetype = feATSC;
}
else
{
eDebug("[eDVBFrontend] setDeliverySystem not supported delivery system type: %s", type);
return false;
}
if (ioctl(m_fd, FE_SET_PROPERTY, &cmdseq) < 0)
{
eDebug("[eDVBFrontend] setDeliverySystem FE_SET_PROPERTY failed: %m type: %s data: %d", type, p[0].u.data);
return false;
}
eDebug("[eDVBFrontend] setDeliverySystem setting type to %d from %d", fetype, m_type);
m_type = fetype;
eDebug("[eDVBFrontend] setDeliverySystem succefully changed delivery system to %s", type);
return true;
}
bool eDVBFrontend::setSlotInfo(int id, const char *descr, bool enabled, bool isDVBS2, int frontendid)
{
if (frontendid < 0 || frontendid != m_dvbid)
{
return false;
}
m_slotid = id;
m_enabled = enabled;
strncpy(m_description, descr, sizeof(m_description));
// HACK.. the rotor workaround is neede for all NIMs with LNBP21 voltage regulator...
m_need_rotor_workaround = !!strstr(m_description, "Alps BSBE1") ||
!!strstr(m_description, "Alps BSBE2") ||
!!strstr(m_description, "Alps -S") ||
!!strstr(m_description, "BCM4501");
if (isDVBS2)
{
/* HACK for legacy dvb api without DELSYS support */
m_delsys[SYS_DVBS2] = true;
}
eDebugNoSimulate("[eDVBFrontend] setSlotInfo for dvb frontend %d to slotid %d, descr %s, need rotorworkaround %s, enabled %s, DVB-S2 %s",
m_dvbid, m_slotid, m_description, m_need_rotor_workaround ? "Yes" : "No", m_enabled ? "Yes" : "No", isDVBS2 ? "Yes" : "No" );
return true;
}
bool eDVBFrontend::is_multistream()
{
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 8
return fe_info.caps & FE_CAN_MULTISTREAM;
#else //if DVB_API_VERSION < 5
return 0;
#endif
}
std::string eDVBFrontend::getCapabilities()
{
std::stringstream ss;
ss << "DVB API version: " << m_dvbversion / 256 << "." << m_dvbversion % 256 << std::endl;
ss << "Frontend: " << fe_info.name << std::endl;
ss << "Frequency:";
ss << " min " << fe_info.frequency_min;
ss << " max " << fe_info.frequency_max;
ss << " stepsize " << fe_info.frequency_stepsize;
ss << " tolerance " << fe_info.frequency_tolerance << std::endl;
ss << "Symbolrate:";
ss << " min " << fe_info.symbol_rate_min;
ss << " max " << fe_info.symbol_rate_max;
ss << " tolerance " << fe_info.symbol_rate_tolerance << std::endl;
ss << "Capabilities:";
if (fe_info.caps == FE_IS_STUPID) ss << " stupid FE";
if (fe_info.caps & FE_CAN_INVERSION_AUTO) ss << " auto inversion";
if (fe_info.caps & FE_CAN_FEC_1_2) ss << " FEC 1/2";
if (fe_info.caps & FE_CAN_FEC_2_3) ss << " FEC 2/3";
if (fe_info.caps & FE_CAN_FEC_3_4) ss << " FEC 3/4";
if (fe_info.caps & FE_CAN_FEC_4_5) ss << " FEC 4/5";
if (fe_info.caps & FE_CAN_FEC_5_6) ss << " FEC 5/6";
if (fe_info.caps & FE_CAN_FEC_6_7) ss << " FEC 6/7";
if (fe_info.caps & FE_CAN_FEC_7_8) ss << " FEC 7/8";
if (fe_info.caps & FE_CAN_FEC_8_9) ss << " FEC 8/9";
if (fe_info.caps & FE_CAN_FEC_AUTO) ss << " FEC AUTO";
if (fe_info.caps & FE_CAN_QPSK) ss << " QPSK";
if (fe_info.caps & FE_CAN_QAM_16) ss << " QAM 16";
if (fe_info.caps & FE_CAN_QAM_32) ss << " QAM 32";
if (fe_info.caps & FE_CAN_QAM_64) ss << " QAM 64";
if (fe_info.caps & FE_CAN_QAM_128) ss << " QAM 128";
if (fe_info.caps & FE_CAN_QAM_256) ss << " QAM 256";
if (fe_info.caps & FE_CAN_QAM_AUTO) ss << " QAM AUTO";
if (fe_info.caps & FE_CAN_TRANSMISSION_MODE_AUTO) ss << " auto transmission mode";
if (fe_info.caps & FE_CAN_BANDWIDTH_AUTO) ss << " auto bandwidth";
if (fe_info.caps & FE_CAN_GUARD_INTERVAL_AUTO) ss << " auto guard interval";
if (fe_info.caps & FE_CAN_HIERARCHY_AUTO) ss << " auto hierarchy";
if (fe_info.caps & FE_CAN_8VSB) ss << " FE_CAN_8VSB";
if (fe_info.caps & FE_CAN_16VSB) ss << " FE_CAN_16VSB";
if (fe_info.caps & FE_HAS_EXTENDED_CAPS) ss << " FE_HAS_EXTENDED_CAPS";
#if DVB_API_VERSION > 5 || DVB_API_VERSION == 5 && DVB_API_VERSION_MINOR >= 8
if (fe_info.caps & FE_CAN_MULTISTREAM) ss << " FE_CAN_MULTISTREAM";
#endif
if (fe_info.caps & FE_CAN_TURBO_FEC) ss << " FE_CAN_TURBO_FEC";
if (fe_info.caps & FE_CAN_2G_MODULATION) ss << " FE_CAN_2G_MODULATION";
if (fe_info.caps & FE_NEEDS_BENDING) ss << " FE_NEEDS_BENDING";
if (fe_info.caps & FE_CAN_RECOVER) ss << " FE_CAN_RECOVER";
if (fe_info.caps & FE_CAN_MUTE_TS) ss << " FE_CAN_MUTE_TS";
ss << std::endl;
ss << "Delivery Systems:";
std::map<fe_delivery_system_t, bool>::iterator it;
for (it = m_delsys.begin(); it != m_delsys.end(); it++)
{
if (!it->second) continue;
switch (it->first)
{
case SYS_ATSC: ss << " ATSC"; break;
case SYS_ATSCMH: ss << " ATSCMH"; break;
case SYS_CMMB: ss << " CMBB"; break;
case SYS_DAB: ss << " DAB"; break;
case SYS_DSS: ss << " DSS"; break;
case SYS_DVBC_ANNEX_B: ss << " DVBC_ANNEX_B"; break;
case SYS_DVBH: ss << " DVBH"; break;
case SYS_DVBS: ss << " DVBS"; break;
case SYS_DVBS2: ss << " DVBS2"; break;
case SYS_DVBT: ss << " DVBT"; break;
case SYS_ISDBC: ss << " ISDBC"; break;
case SYS_ISDBS: ss << " ISDBS"; break;
case SYS_ISDBT: ss << " ISDBT"; break;
case SYS_UNDEFINED: ss << " UNDEFINED"; break;
case SYS_DVBC_ANNEX_A: ss << " DVBC_ANNEX_A"; break;
case SYS_DVBC_ANNEX_C: ss << " DVBC_ANNEX_C"; break;
case SYS_DVBT2: ss << " DVBT2"; break;
case SYS_TURBO: ss << " TURBO"; break;
}
}
return ss.str();
}
|
oostende/openblachole
|
lib/dvb/frontend.cpp
|
C++
|
gpl-2.0
| 87,073
|
/*
* Copyright (C) 2005 aCaB <acab@clamav.net>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*/
/*
** spin.c
**
** 19/07/2k5 - Finally started coding something
** 21/07/2k5 - Works, started clearing the mess
** 31/07/2k5 - Porting to libclamav
*/
/*
** Unpacks pespin v1.1
**
** Funny thing to reverse
**
** [ A big fat thank to christoph for not letting me give up ]
*/
/*
** TODO ( a fat one ):
**
** OEP restore and unhijacking
** code redir handling (at least near OEP)
** passwd protection (didn't really look at it)
**
** All this stuff really needs a way better emu and a hell of unlaming
** ATM not worth the effort... and pespin v1.3 is out :@
**
*/
#if HAVE_CONFIG_H
#include "clamav-config.h"
#endif
#include <stdlib.h>
#include <string.h>
#include "cltypes.h"
#include "pe.h"
#include "rebuildpe.h"
#include "execs.h"
#include "others.h"
#include "packlibs.h"
#include "spin.h"
static char exec86(uint8_t aelle, uint8_t cielle, char *curremu, int *retval) {
int len = 0;
*retval=0;
while (len <0x24) {
uint8_t opcode = curremu[len], support;
len++;
switch (opcode) {
case 0xeb:
len++;
case 0x0a:
len++;
case 0x90:
case 0xf8:
case 0xf9:
break;
case 0x02: /* add al, cl */
aelle+=cielle;
len++;
break;
case 0x2a: /* sub al, cl */
aelle-=cielle;
len++;
break;
case 0x04: /* add al, ?? */
aelle+=curremu[len];
len++;
break;
case 0x2c: /* sub al, ?? */
aelle-=curremu[len];
len++;
break;
case 0x32: /* xor al, cl */
aelle^=cielle;
len++;
break;
case 0x34: /* xor al, ?? */
aelle^=curremu[len];
len++;
break;
case 0xfe: /* inc/dec al */
if ( curremu[len] == '\xc0' ) aelle++;
else aelle--;
len++;
break;
case 0xc0: /* ror/rol al, ?? */
support = curremu[len];
len++;
if ( support == 0xc0 ) CLI_ROL(aelle, curremu[len]);
else CLI_ROR(aelle, curremu[len]);
len++;
break;
default:
cli_dbgmsg("spin: bogus opcode %x\n", opcode);
*retval=1;
return aelle;
}
}
if ( len!=0x24 || curremu[len]!='\xaa' ) {
cli_dbgmsg("spin: bad emucode\n");
*retval=1;
}
return aelle;
}
static uint32_t summit (char *src, int size)
{
uint32_t eax=0xffffffff, ebx=0xffffffff;
int i;
while(size) {
eax ^= *src++<<8 & 0xff00;
eax = eax>>3 & 0x1fffffff;
for (i=0; i<4; i++) {
uint32_t swap;
eax ^= ebx>>8 & 0xff;
eax += 0x7801a108;
eax ^= ebx;
CLI_ROR(eax, ebx&0xff);
swap = eax;
eax = ebx;
ebx = swap;
}
size--;
}
return ebx;
}
int unspin(char *src, int ssize, struct cli_exe_section *sections, int sectcnt, uint32_t nep, int desc, cli_ctx *ctx) {
char *curr, *emu, *ep, *spinned;
char **sects;
int blobsz=0, j;
uint32_t key32, bitmap, bitman;
uint32_t len;
uint8_t key8;
cli_dbgmsg("in unspin\n");
if ((spinned = (char *) cli_malloc(sections[sectcnt].rsz)) == NULL )
return 1;
memcpy(spinned, src + sections[sectcnt].raw, sections[sectcnt].rsz);
ep = spinned + nep - sections[sectcnt].rva;
curr = ep+0xdb;
if ( *curr != '\xbb' ) {
free(spinned);
cli_dbgmsg("spin: Not spinned or bad version\n");
return 1;
}
key8 = (uint8_t)*++curr;
curr+=4;
if ( *curr != '\xb9' ) {
free(spinned);
cli_dbgmsg("spin: Not spinned or bad version\n");
return 1;
}
if ( (len = cli_readint32(curr+1)) != 0x11fe ) {
free(spinned);
cli_dbgmsg("spin: Not spinned or bad version\n");
return 1;
}
cli_dbgmsg("spin: Key8 is %x, Len is %x\n", key8, len);
if (!CLI_ISCONTAINED(spinned, sections[sectcnt].rsz, ep, len+0x1fe5-1)) {
free(spinned);
cli_dbgmsg("spin: len out of bounds, giving up\n");
return 1;
}
if ( ep[0x1e0]!='\xb8' )
cli_dbgmsg("spin: prolly not spinned, expect failure\n");
if ( (cli_readint32(ep+0x1e1) & 0x00200000) )
cli_dbgmsg("spin: password protected, expect failure\n");
curr = ep+0x1fe5+len-1;
while ( len-- ) {
*curr=(*curr)^(key8--);
curr--;
}
if (!CLI_ISCONTAINED(spinned, sections[sectcnt].rsz, ep+0x3217, 4)) {
free(spinned);
cli_dbgmsg("spin: key out of bounds, giving up\n");
return 1;
}
curr = ep+0x26eb;
key32 = cli_readint32(curr);
if ( (len = cli_readint32(curr+5)) != 0x5a0) {
free(spinned);
cli_dbgmsg("spin: Not spinned or bad version\n");
return 1;
}
curr = ep+0x2d5;
cli_dbgmsg("spin: Key is %x, Len is %x\n", key32, len);
while ( len-- ) {
if ( key32 & 1 ) {
key32 = key32>>1;
key32 ^= 0x8c328834;
} else {
key32 = key32>>1;
}
*curr = *curr ^ (key32 & 0xff);
curr++;
}
len = ssize - cli_readint32(ep+0x429); /* sub size, value */
if ( len >= (uint32_t)ssize ) {
free(spinned);
cli_dbgmsg("spin: crc out of bounds, giving up\n");
return 1;
}
key32 = cli_readint32(ep+0x3217) - summit(src,len);
memcpy(src + sections[sectcnt].raw, spinned, sections[sectcnt].rsz);
free(spinned); /* done CRC'ing - can have a dirty buffer now */
ep = src + nep + sections[sectcnt].raw - sections[sectcnt].rva; /* Fix the helper */
if (!CLI_ISCONTAINED(src, ssize, ep+0x3207, 4)) { /* this one holds all ep based checks */
cli_dbgmsg("spin: key out of bounds, giving up\n");
return 1;
}
bitmap = cli_readint32(ep+0x3207);
cli_dbgmsg("spin: Key32 is %x - XORbitmap is %x\n", key32, bitmap);
cli_dbgmsg("spin: Decrypting sects (xor)\n");
for (j=0; j<sectcnt; j++) {
if (bitmap&1) {
uint32_t size = sections[j].rsz;
char *ptr = src + sections[j].raw;
uint32_t keydup = key32;
if (!CLI_ISCONTAINED(src, ssize, ptr, size)) {
cli_dbgmsg("spin: sect %d out of file, giving up\n", j);
return 1; /* FIXME: Already checked in pe.c? */
}
while (size--) {
if (! (keydup & 1)) {
keydup = keydup>>1;
keydup ^= 0xed43af31;
} else {
keydup = keydup>>1;
}
*ptr = *ptr ^ (keydup & 0xff);
ptr++;
}
}
bitmap = bitmap >>1;
}
cli_dbgmsg("spin: done\n");
curr = ep+0x644;
if ( (len = cli_readint32(curr)) != 0x180) {
cli_dbgmsg("spin: Not spinned or bad version\n");
return 1;
}
key32 = cli_readint32(curr+0x0c);
cli_dbgmsg("spin: Key is %x, Len is %x\n", key32, len);
curr = ep+0x28d3;
if (!CLI_ISCONTAINED(src, ssize, curr, len)) { /* always true but i may decide to remove the previous check */
cli_dbgmsg("spin: key out of bounds, giving up\n");
return 1;
}
while ( len-- ) {
if ( key32 & 1 ) {
key32 = key32>>1;
key32 ^= 0xed43af32;
} else {
key32 = key32>>1;
}
*curr = *curr ^ (key32 & 0xff);
curr++;
}
curr = ep+0x28dd;
if ( (len = cli_readint32(curr)) != 0x1a1 ) {
cli_dbgmsg("spin: Not spinned or bad version\n");
return 1;
}
cli_dbgmsg("spin: POLY1 len is %x\n", len);
curr+=0xf; /* POLY1 */
emu = ep+0x6d4;
if (!CLI_ISCONTAINED(src, ssize, emu, len)) {
cli_dbgmsg("spin: poly1 out of bounds\n");
return 1;
}
while (len) {
int xcfailure=0;
*emu=exec86(*emu, len-- & 0xff, curr, &xcfailure); /* unlame POLY1 */
if (xcfailure) {
cli_dbgmsg("spin: cannot exec poly1\n");
return 1;
}
emu++;
}
bitmap = cli_readint32(ep+0x6f1);
cli_dbgmsg("spin: POLYbitmap is %x - decrypting sects (poly)\n", bitmap);
curr = ep+0x755;
for (j=0; j<sectcnt; j++) {
if (bitmap&1) {
uint32_t notthesamelen = sections[j].rsz;
emu = src + sections[j].raw;
if (!CLI_ISCONTAINED(src,ssize,curr,0x24)) { /* section bounds already checked twice now */
cli_dbgmsg("spin: poly1 emucode is out of file?\n");
return 1;
}
while (notthesamelen) {
int xcfailure=0;
*emu=exec86(*emu, notthesamelen-- & 0xff, curr, &xcfailure);
if (xcfailure) {
cli_dbgmsg("spin: cannot exec section\n");
return 1;
}
emu++;
}
}
bitmap = bitmap >>1;
}
cli_dbgmsg("spin: done\n");
bitmap = cli_readint32(ep+0x3061);
bitman = bitmap;
if(ctx->limits && ctx->limits->maxfilesize) {
unsigned long int filesize = 0;
for (j=0; j<sectcnt; j++) {
if (bitmap&1) {
if ( filesize > ctx->limits->maxfilesize || sections[j].vsz > ctx->limits->maxfilesize - filesize ) return 2;
filesize += sections[j].vsz;
}
bitmap>>=1;
}
bitmap = bitman;
}
cli_dbgmsg("spin: Compression bitmap is %x\n", bitmap);
if ( (sects= (char **) cli_malloc(sectcnt*sizeof(char *))) == NULL )
return 1;
len = 0;
for (j=0; j<sectcnt; j++) {
if (bitmap&1) {
if ( (sects[j] = (char *) cli_malloc(sections[j].vsz) ) == NULL ) {
cli_dbgmsg("spin: malloc(%d) failed\n", sections[j].vsz);
len = 1;
break;
}
blobsz+=sections[j].vsz;
memset(sects[j], 0, sections[j].vsz);
cli_dbgmsg("spin: Growing sect%d: was %x will be %x\n", j, sections[j].rsz, sections[j].vsz);
if ( cli_unfsg(src + sections[j].raw, sects[j], sections[j].rsz, sections[j].vsz, NULL, NULL) == -1 ) {
len++;
cli_dbgmsg("spin: Unpack failure\n");
}
} else {
blobsz+=sections[j].rsz;
sects[j] = src + sections[j].raw;
cli_dbgmsg("spin: Not growing sect%d\n", j);
}
bitmap>>=1;
}
cli_dbgmsg("spin: decompression complete\n");
if ( len ) {
int t;
for (t=0 ; t<j ; t++) {
if (bitman&1)
free(sects[t]);
bitman = bitman >>1 & 0x7fffffff;
}
free(sects);
return 1;
}
key32 = cli_readint32(ep+0x2fee);
if (key32) {
/* len = cli_readint32(ep+0x2fc8); -- Using vsizes instead */
for (j=0; j<sectcnt; j++) {
if (sections[j].rva <= key32 && sections[j].rva+sections[j].rsz > key32)
break;
}
if (j!=sectcnt && ((bitman & (1<<j)) == 0)) { /* FIXME: not really sure either the res sect is lamed or just compressed, but this'll save some major headakes */
cli_dbgmsg("spin: Resources (sect%d) appear to be compressed\n\tuncompressed offset %x, len %x\n\tcompressed offset %x, len %x\n", j, sections[j].rva, key32 - sections[j].rva, key32, sections[j].vsz - (key32 - sections[j].rva));
if ( (curr=(char *)cli_malloc(sections[j].vsz)) != NULL ) {
memcpy(curr, src + sections[j].raw, key32 - sections[j].rva); /* Uncompressed part */
memset(curr + key32 - sections[j].rva, 0, sections[j].vsz - (key32 - sections[j].rva)); /* bzero */
if ( cli_unfsg(src + sections[j].raw + key32 - sections[j].rva, curr + key32 - sections[j].rva, sections[j].rsz - (key32 - sections[j].rva), sections[j].vsz - (key32 - sections[j].rva), NULL, NULL) ) {
free(curr);
cli_dbgmsg("spin: Failed to grow resources, continuing anyway\n");
blobsz+=sections[j].rsz;
} else {
sects[j]=curr;
bitman|=1<<j;
cli_dbgmsg("spin: Resources grown\n");
blobsz+=sections[j].vsz;
}
} else {
/* malloc failed but i'm too deep into this crap to quit without leaking more :( */
blobsz+=sections[j].rsz;
}
} else {
cli_dbgmsg("spin: No res?!\n");
}
}
bitmap=bitman; /* save as a free() bitmap */
if ( (ep = (char *) cli_malloc(blobsz)) != NULL ) {
struct cli_exe_section *rebhlp;
if ( (rebhlp = (struct cli_exe_section *) cli_malloc(sizeof(struct cli_exe_section)*(sectcnt))) != NULL ) {
char *to = ep;
int retval = 0;
for (j = 0; j < sectcnt; j++) {
rebhlp[j].raw = (j>0)?(rebhlp[j-1].raw + rebhlp[j-1].rsz):0;
rebhlp[j].rsz = (bitmap &1) ? sections[j].vsz : sections[j].rsz;
rebhlp[j].rva = sections[j].rva;
rebhlp[j].vsz = sections[j].vsz;
memcpy(to, sects[j], rebhlp[j].rsz);
to+=rebhlp[j].rsz;
if ( bitmap & 1 ) free(sects[j]);
bitmap = bitmap >>1;
}
if (! cli_rebuildpe(ep, rebhlp, sectcnt, 0x400000, 0x1000, 0, 0, desc)) { /* can't be bothered fixing those values: the rebuilt exe is completely broken anyway. */
cli_dbgmsg("spin: Cannot write unpacked file\n");
retval = 1;
}
free(rebhlp);
free(ep);
free(sects);
return retval;
}
free(ep);
}
cli_dbgmsg ("spin: free bitmap is %x\n", bitman);
for (j=0; j<sectcnt; j++) {
if (bitmap&1) free(sects[j]);
bitman = bitman >>1 & 0x7fffffff;
}
free(sects);
return 1; /* :( */
}
|
kidmaple/CoolWall
|
user/clamav/libclamav/spin.c
|
C
|
gpl-2.0
| 12,993
|
package com.charlesdream.office.word.enums;
import com.charlesdream.office.BaseEnum;
import java.util.EnumSet;
import java.util.HashMap;
import java.util.Map;
/**
* Specifies how to associate a style sheet with the document.
* <p>
*
* @author Charles Cui on 3/5/2016.
* @since 1.0
*/
public enum WdStyleSheetLinkType implements BaseEnum {
/**
* Imported internal style sheet.
*
* @since 1.0
*/
wdStyleSheetLinkTypeImported(1),
/**
* Linked external style sheet.
*
* @since 1.0
*/
wdStyleSheetLinkTypeLinked(0);
private static final Map<Integer, WdStyleSheetLinkType> lookup;
static {
lookup = new HashMap<>();
for (WdStyleSheetLinkType e : EnumSet.allOf(WdStyleSheetLinkType.class)) {
lookup.put(e.value(), e);
}
}
private final int value;
WdStyleSheetLinkType(int value) {
this.value = value;
}
/**
* Find the enum type by its value.
*
* @param value The enum value.
* @return The enum type, or null if this enum value does not exists.
* @since 1.0
*/
public static WdStyleSheetLinkType find(int value) {
WdStyleSheetLinkType result = lookup.get(value);
return result;
}
/**
* Find the enum type by its value, with the default value.
*
* @param value The enum value.
* @param defaultValue The default return value if the enum value does not exists.
* @return The enum type, or the default value if this enum value does not exists.
* @since 1.0
*/
public static WdStyleSheetLinkType find(int value, WdStyleSheetLinkType defaultValue) {
WdStyleSheetLinkType result = WdStyleSheetLinkType.find(value);
if (result == null) {
result = defaultValue;
}
return result;
}
/**
* Get the value of a enum type.
*
* @return The value of a enum type.
* @since 1.0
*/
public int value() {
return this.value;
}
}
|
Lotusun/OfficeHelper
|
src/main/java/com/charlesdream/office/word/enums/WdStyleSheetLinkType.java
|
Java
|
gpl-2.0
| 2,047
|
using System;
using System.Collections;
using Server.Targeting;
using Server.Network;
using Server.Mobiles;
using Server.Items;
using Server.Spells;
namespace Server.ACC.CSS.Systems.Cleric
{
public class ClericDivineFocusSpell : ClericSpell
{
private static SpellInfo m_Info = new SpellInfo(
"Divine Focus", "Divinium Cogitatus",
//SpellCircle.First,
212,
9041
);
public override SpellCircle Circle
{
get { return SpellCircle.First; }
}
public override int RequiredTithing{ get{ return 15; } }
public override double RequiredSkill{ get{ return 35.0; } }
private static Hashtable m_Table = new Hashtable();
public ClericDivineFocusSpell( Mobile caster, Item scroll ) : base( caster, scroll, m_Info )
{
}
public static double GetScalar( Mobile m )
{
double val = 1.0;
if ( m.CanBeginAction( typeof( ClericDivineFocusSpell ) ) )
val = 1.5;
return val;
}
public override bool CheckCast()
{
if ( !base.CheckCast() )
{
return false;
}
if ( !Caster.CanBeginAction( typeof( ClericDivineFocusSpell ) ) )
{
Caster.SendMessage( "This spell is already in effect" );
return false;
}
return true;
}
public override void OnCast()
{
if ( !Caster.CanBeginAction( typeof( ClericDivineFocusSpell ) ) )
{
Caster.SendMessage( "This spell is already in effect" );
return;
}
if ( CheckSequence() )
{
Caster.BeginAction( typeof( ClericDivineFocusSpell ) );
Timer t = new InternalTimer( Caster );
m_Table[Caster] = t;
t.Start();
Caster.FixedParticles( 0x375A, 1, 15, 0x480, 1, 4, EffectLayer.Waist );
}
}
private class InternalTimer : Timer
{
private Mobile m_Owner;
public InternalTimer( Mobile owner ) : base( TimeSpan.Zero, TimeSpan.FromSeconds( 1.5 ) )
{
m_Owner = owner;
}
protected override void OnTick()
{
if ( !m_Owner.CheckAlive() || m_Owner.Mana < 3 )
{
m_Owner.EndAction( typeof( ClericDivineFocusSpell ) );
m_Table.Remove( m_Owner );
m_Owner.SendMessage( "Your mind weakens and you are unable to maintain your divine focus." );
Stop();
}
else
{
m_Owner.Mana -= 3;
}
}
}
}
}
|
alucardxlx/kaltar
|
Scripts/Kaltar/Complete Spell System/-=+ 03 Systems/Cleric/Spells/DivineFocusSpell.cs
|
C#
|
gpl-2.0
| 2,576
|
package org.wordpress.android.ui.notifications;
import android.app.Activity;
import android.app.Fragment;
import android.app.NotificationManager;
import android.content.Intent;
import android.os.Bundle;
import android.os.Parcelable;
import android.support.annotation.StringRes;
import android.support.v7.widget.DefaultItemAnimator;
import android.support.v7.widget.LinearLayoutManager;
import android.support.v7.widget.RecyclerView;
import android.text.TextUtils;
import android.view.LayoutInflater;
import android.view.View;
import android.view.ViewGroup;
import android.widget.Button;
import android.widget.TextView;
import com.cocosw.undobar.UndoBarController;
import com.simperium.client.Bucket;
import com.simperium.client.BucketObject;
import com.simperium.client.BucketObjectMissingException;
import org.wordpress.android.GCMIntentService;
import org.wordpress.android.R;
import org.wordpress.android.models.AccountHelper;
import org.wordpress.android.models.CommentStatus;
import org.wordpress.android.models.Note;
import org.wordpress.android.ui.ActivityLauncher;
import org.wordpress.android.ui.RequestCodes;
import org.wordpress.android.ui.comments.CommentActions;
import org.wordpress.android.ui.main.WPMainActivity;
import org.wordpress.android.ui.notifications.adapters.NotesAdapter;
import org.wordpress.android.ui.notifications.utils.SimperiumUtils;
import org.wordpress.android.util.AppLog;
import org.wordpress.android.util.StringUtils;
import org.wordpress.android.util.ToastUtils;
import javax.annotation.Nonnull;
public class NotificationsListFragment extends Fragment
implements Bucket.Listener<Note>,
WPMainActivity.OnScrollToTopListener {
public static final String NOTE_ID_EXTRA = "noteId";
public static final String NOTE_INSTANT_REPLY_EXTRA = "instantReply";
public static final String NOTE_MODERATE_ID_EXTRA = "moderateNoteId";
public static final String NOTE_MODERATE_STATUS_EXTRA = "moderateNoteStatus";
private static final String KEY_LIST_SCROLL_POSITION = "scrollPosition";
private NotesAdapter mNotesAdapter;
private LinearLayoutManager mLinearLayoutManager;
private RecyclerView mRecyclerView;
private ViewGroup mEmptyView;
private int mRestoredScrollPosition;
private Bucket<Note> mBucket;
public static NotificationsListFragment newInstance() {
return new NotificationsListFragment();
}
/**
* For responding to tapping of notes
*/
public interface OnNoteClickListener {
public void onClickNote(String noteId);
}
@Override
public View onCreateView(@Nonnull LayoutInflater inflater, ViewGroup container, Bundle savedInstanceState) {
View view = inflater.inflate(R.layout.notifications_fragment_notes_list, container, false);
mRecyclerView = (RecyclerView) view.findViewById(R.id.recycler_view_notes);
mEmptyView = (ViewGroup) view.findViewById(R.id.empty_view);
RecyclerView.ItemAnimator animator = new DefaultItemAnimator();
animator.setSupportsChangeAnimations(true);
mRecyclerView.setItemAnimator(animator);
mLinearLayoutManager = new LinearLayoutManager(getActivity());
mRecyclerView.setLayoutManager(mLinearLayoutManager);
// setup the initial notes adapter, starts listening to the bucket
mBucket = SimperiumUtils.getNotesBucket();
if (mBucket != null) {
if (mNotesAdapter == null) {
mNotesAdapter = new NotesAdapter(getActivity(), mBucket);
mNotesAdapter.setOnNoteClickListener(new OnNoteClickListener() {
@Override
public void onClickNote(String noteId) {
if (TextUtils.isEmpty(noteId)) return;
// open the latest version of this note just in case it has changed - this can
// happen if the note was tapped from the list fragment after it was updated
// by another fragment (such as NotificationCommentLikeFragment)
openNote(getActivity(), noteId, false, true);
}
});
}
mRecyclerView.setAdapter(mNotesAdapter);
} else {
if (!AccountHelper.isSignedInWordPressDotCom()) {
// let user know that notifications require a wp.com account and enable sign-in
showEmptyView(R.string.notifications_account_required, true);
} else {
// failed for some other reason
showEmptyView(R.string.error_refresh_notifications, false);
}
}
return view;
}
@Override
public void onActivityCreated(Bundle savedInstanceState) {
super.onActivityCreated(savedInstanceState);
if (savedInstanceState != null) {
setRestoredListPosition(savedInstanceState.getInt(KEY_LIST_SCROLL_POSITION, RecyclerView.NO_POSITION));
}
}
@Override
public void onResume() {
super.onResume();
refreshNotes();
// start listening to bucket change events
if (mBucket != null) {
mBucket.addListener(this);
}
// Remove notification if it is showing when we resume this activity.
NotificationManager notificationManager = (NotificationManager) getActivity().getSystemService(GCMIntentService.NOTIFICATION_SERVICE);
notificationManager.cancel(GCMIntentService.PUSH_NOTIFICATION_ID);
if (SimperiumUtils.isUserAuthorized()) {
SimperiumUtils.startBuckets();
AppLog.i(AppLog.T.NOTIFS, "Starting Simperium buckets");
}
}
@Override
public void onPause() {
// unregister the listener
if (mBucket != null) {
mBucket.removeListener(this);
}
super.onPause();
}
@Override
public void onDestroy() {
// Close Simperium cursor
if (mNotesAdapter != null) {
mNotesAdapter.closeCursor();
}
super.onDestroy();
}
/**
* Open a note fragment based on the type of note
*/
public static void openNote(Activity activity,
String noteId,
boolean shouldShowKeyboard,
boolean shouldSlideIn) {
if (noteId == null || activity == null) {
return;
}
Intent detailIntent = new Intent(activity, NotificationsDetailActivity.class);
detailIntent.putExtra(NOTE_ID_EXTRA, noteId);
detailIntent.putExtra(NOTE_INSTANT_REPLY_EXTRA, shouldShowKeyboard);
if (shouldSlideIn) {
ActivityLauncher.slideInFromRightForResult(activity, detailIntent, RequestCodes.NOTE_DETAIL);
} else {
activity.startActivityForResult(detailIntent, RequestCodes.NOTE_DETAIL);
}
}
private void setNoteIsHidden(String noteId, boolean isHidden) {
if (mNotesAdapter == null) return;
if (isHidden) {
mNotesAdapter.addHiddenNoteId(noteId);
} else {
// Scroll the row into view if it isn't visible so the animation can be seen
int notePosition = mNotesAdapter.getPositionForNote(noteId);
if (notePosition != RecyclerView.NO_POSITION &&
mLinearLayoutManager.findFirstCompletelyVisibleItemPosition() > notePosition) {
mLinearLayoutManager.scrollToPosition(notePosition);
}
mNotesAdapter.removeHiddenNoteId(noteId);
}
}
private void setNoteIsModerating(String noteId, boolean isModerating) {
if (mNotesAdapter == null) return;
if (isModerating) {
mNotesAdapter.addModeratingNoteId(noteId);
} else {
mNotesAdapter.removeModeratingNoteId(noteId);
}
}
public void updateLastSeenTime() {
// set the timestamp to now
try {
if (mNotesAdapter != null && mNotesAdapter.getCount() > 0 && SimperiumUtils.getMetaBucket() != null) {
Note newestNote = mNotesAdapter.getNote(0);
BucketObject meta = SimperiumUtils.getMetaBucket().get("meta");
if (meta != null && newestNote != null) {
meta.setProperty("last_seen", newestNote.getTimestamp());
meta.save();
}
}
} catch (BucketObjectMissingException e) {
// try again later, meta is created by wordpress.com
}
}
private void showEmptyView(@StringRes int stringResId, boolean showSignIn) {
if (isAdded() && mEmptyView != null) {
((TextView) mEmptyView.findViewById(R.id.text_empty)).setText(stringResId);
mEmptyView.setVisibility(View.VISIBLE);
Button btnSignIn = (Button) mEmptyView.findViewById(R.id.button_sign_in);
btnSignIn.setVisibility(showSignIn ? View.VISIBLE : View.GONE);
if (showSignIn) {
btnSignIn.setOnClickListener(new View.OnClickListener() {
@Override
public void onClick(View v) {
ActivityLauncher.showSignInForResult(getActivity());
}
});
}
}
}
private void hideEmptyView() {
if (isAdded() && mEmptyView != null) {
mEmptyView.setVisibility(View.GONE);
}
}
void refreshNotes() {
if (!isAdded() || mNotesAdapter == null) {
return;
}
getActivity().runOnUiThread(new Runnable() {
@Override
public void run() {
mNotesAdapter.reloadNotes();
restoreListScrollPosition();
if (mNotesAdapter.getCount() > 0) {
hideEmptyView();
} else {
showEmptyView(R.string.notifications_empty_list, false);
}
}
});
}
private void restoreListScrollPosition() {
if (isAdded() && mRecyclerView != null && mRestoredScrollPosition != RecyclerView.NO_POSITION
&& mRestoredScrollPosition < mNotesAdapter.getCount()) {
// Restore scroll position in list
mLinearLayoutManager.scrollToPosition(mRestoredScrollPosition);
mRestoredScrollPosition = RecyclerView.NO_POSITION;
}
}
@Override
public void onSaveInstanceState(@Nonnull Bundle outState) {
if (outState.isEmpty()) {
outState.putBoolean("bug_19917_fix", true);
}
// Save list view scroll position
outState.putInt(KEY_LIST_SCROLL_POSITION, getScrollPosition());
super.onSaveInstanceState(outState);
}
private int getScrollPosition() {
if (!isAdded() || mRecyclerView == null) {
return RecyclerView.NO_POSITION;
}
return mLinearLayoutManager.findFirstVisibleItemPosition();
}
private void setRestoredListPosition(int listPosition) {
mRestoredScrollPosition = listPosition;
}
@Override
public void onActivityResult(int requestCode, int resultCode, Intent data) {
if (requestCode == RequestCodes.NOTE_DETAIL && resultCode == Activity.RESULT_OK && data != null) {
if (SimperiumUtils.getNotesBucket() == null) return;
try {
Note note = SimperiumUtils.getNotesBucket().get(StringUtils.notNullStr(data.getStringExtra(NOTE_MODERATE_ID_EXTRA)));
CommentStatus commentStatus = CommentStatus.fromString(data.getStringExtra(NOTE_MODERATE_STATUS_EXTRA));
moderateCommentForNote(note, commentStatus);
} catch (BucketObjectMissingException e) {
e.printStackTrace();
}
}
super.onActivityResult(requestCode, resultCode, data);
}
private void moderateCommentForNote(final Note note, final CommentStatus newStatus) {
if (!isAdded()) return;
if (newStatus == CommentStatus.APPROVED || newStatus == CommentStatus.UNAPPROVED) {
note.setLocalStatus(CommentStatus.toRESTString(newStatus));
note.save();
setNoteIsModerating(note.getId(), true);
CommentActions.moderateCommentForNote(note, newStatus,
new CommentActions.CommentActionListener() {
@Override
public void onActionResult(boolean succeeded) {
if (!isAdded()) return;
setNoteIsModerating(note.getId(), false);
if (!succeeded) {
note.setLocalStatus(null);
note.save();
ToastUtils.showToast(getActivity(),
R.string.error_moderate_comment,
ToastUtils.Duration.LONG
);
}
}
});
} else if (newStatus == CommentStatus.TRASH || newStatus == CommentStatus.SPAM) {
setNoteIsHidden(note.getId(), true);
// Show undo bar for trash or spam actions
new UndoBarController.UndoBar(getActivity())
.message(newStatus == CommentStatus.TRASH ? R.string.comment_trashed : R.string.comment_spammed)
.listener(new UndoBarController.AdvancedUndoListener() {
@Override
public void onHide(Parcelable parcelable) {
// Deleted notifications in Simperium never come back, so we won't
// make the request until the undo bar fades away
CommentActions.moderateCommentForNote(note, newStatus,
new CommentActions.CommentActionListener() {
@Override
public void onActionResult(boolean succeeded) {
if (!isAdded()) return;
if (!succeeded) {
setNoteIsHidden(note.getId(), false);
ToastUtils.showToast(getActivity(),
R.string.error_moderate_comment,
ToastUtils.Duration.LONG
);
}
}
});
}
@Override
public void onClear(@Nonnull Parcelable[] token) {
//noop
}
@Override
public void onUndo(Parcelable parcelable) {
setNoteIsHidden(note.getId(), false);
}
}).show();
}
}
/**
* Simperium bucket listener methods
*/
@Override
public void onSaveObject(Bucket<Note> bucket, final Note object) {
refreshNotes();
}
@Override
public void onDeleteObject(Bucket<Note> bucket, final Note object) {
refreshNotes();
}
@Override
public void onNetworkChange(Bucket<Note> bucket, final Bucket.ChangeType type, final String key) {
// Reset the note's local status when a remote change is received
if (type == Bucket.ChangeType.MODIFY) {
try {
Note note = bucket.get(key);
if (note.isCommentType()) {
note.setLocalStatus(null);
note.save();
}
} catch (BucketObjectMissingException e) {
AppLog.e(AppLog.T.NOTIFS, "Could not create note after receiving change.");
}
}
refreshNotes();
}
@Override
public void onBeforeUpdateObject(Bucket<Note> noteBucket, Note note) {
//noop
}
@Override
public void onScrollToTop() {
if (isAdded() && getScrollPosition() > 0) {
mLinearLayoutManager.smoothScrollToPosition(mRecyclerView, null, 0);
}
}
}
|
wangkang0627/WordPress-Android
|
WordPress/src/main/java/org/wordpress/android/ui/notifications/NotificationsListFragment.java
|
Java
|
gpl-2.0
| 16,506
|
<?xml version="1.0" encoding="ascii"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<title>schrodinger.trajectory.ffstructure._FFBondProperty</title>
<link rel="stylesheet" href="epydoc.css" type="text/css" />
<script type="text/javascript" src="epydoc.js"></script>
</head>
<body bgcolor="white" text="black" link="blue" vlink="#204080"
alink="#204080">
<!-- ==================== NAVIGATION BAR ==================== -->
<table class="navbar" border="0" width="100%" cellpadding="0"
bgcolor="#a0c0ff" cellspacing="0">
<tr valign="middle">
<!-- Home link -->
<th> <a
href="schrodinger-module.html">Home</a> </th>
<!-- Tree link -->
<th> <a
href="module-tree.html">Trees</a> </th>
<!-- Index link -->
<th> <a
href="identifier-index.html">Indices</a> </th>
<!-- Help link -->
<th> <a
href="help.html">Help</a> </th>
<!-- Project homepage -->
<th class="navbar" align="right" width="100%">
<table border="0" cellpadding="0" cellspacing="0">
<tr><th class="navbar" align="center"
>Suite 2012 Schrodinger Python API</th>
</tr></table></th>
</tr>
</table>
<table width="100%" cellpadding="0" cellspacing="0">
<tr valign="top">
<td width="100%">
<span class="breadcrumbs">
<a href="schrodinger-module.html">Package schrodinger</a> ::
<a href="schrodinger.trajectory-module.html">Package trajectory</a> ::
<a href="schrodinger.trajectory.ffstructure-module.html">Module ffstructure</a> ::
Class _FFBondProperty
</span>
</td>
<td>
<table cellpadding="0" cellspacing="0">
<!-- hide/show private -->
<tr><td align="right"><span class="options">[<a href="javascript:void(0);" class="privatelink"
onclick="toggle_private();">hide private</a>]</span></td></tr>
<tr><td align="right"><span class="options"
>[<a href="frames.html" target="_top">frames</a
>] | <a href="schrodinger.trajectory.ffstructure._FFBondProperty-class.html"
target="_top">no frames</a>]</span></td></tr>
</table>
</td>
</tr>
</table>
<!-- ==================== CLASS DESCRIPTION ==================== -->
<h1 class="epydoc">Class _FFBondProperty</h1><p class="nomargin-top"></p>
<pre class="base-tree">
UserDict.DictMixin --+
|
<strong class="uidshort">_FFBondProperty</strong>
</pre>
<hr />
<p>A dictionary to hold bond force field parameters.</p>
<!-- ==================== INSTANCE METHODS ==================== -->
<a name="section-InstanceMethods"></a>
<table class="summary" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr bgcolor="#70b0f0" class="table-header">
<td colspan="2" class="table-header">
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr valign="top">
<td align="left"><span class="table-header">Instance Methods</span></td>
<td align="right" valign="top"
><span class="options">[<a href="#section-InstanceMethods"
class="privatelink" onclick="toggle_private();"
>hide private</a>]</span></td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="__init__"></a><span class="summary-sig-name">__init__</span>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">ffbond</span>,
<span class="summary-sig-arg">ffhandle</span>,
<span class="summary-sig-arg">_index</span>)</span></td>
<td align="right" valign="top">
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="schrodinger.trajectory.ffstructure._FFBondProperty-class.html#__getitem__" class="summary-sig-name">__getitem__</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">item</span>)</span><br />
Return the given item if it is a valid force field parameter for this
bond, None if not.</td>
<td align="right" valign="top">
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a href="schrodinger.trajectory.ffstructure._FFBondProperty-class.html#__setitem__" class="summary-sig-name">__setitem__</a>(<span class="summary-sig-arg">self</span>,
<span class="summary-sig-arg">item</span>,
<span class="summary-sig-arg">value</span>)</span><br />
Set a force field parameter for the bond.</td>
<td align="right" valign="top">
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td width="15%" align="right" valign="top" class="summary">
<span class="summary-type"> </span>
</td><td class="summary">
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr>
<td><span class="summary-sig"><a name="keys"></a><span class="summary-sig-name">keys</span>(<span class="summary-sig-arg">self</span>)</span><br />
Returns a list of the property names for this bond.</td>
<td align="right" valign="top">
</td>
</tr>
</table>
</td>
</tr>
<tr>
<td colspan="2" class="summary">
<p class="indent-wrapped-lines"><b>Inherited from <code>UserDict.DictMixin</code></b>:
<code>__cmp__</code>,
<code>__contains__</code>,
<code>__iter__</code>,
<code>__len__</code>,
<code>__repr__</code>,
<code>clear</code>,
<code>get</code>,
<code>has_key</code>,
<code>items</code>,
<code>iteritems</code>,
<code>iterkeys</code>,
<code>itervalues</code>,
<code>pop</code>,
<code>popitem</code>,
<code>setdefault</code>,
<code>update</code>,
<code>values</code>
</p>
</td>
</tr>
</table>
<!-- ==================== METHOD DETAILS ==================== -->
<a name="section-MethodDetails"></a>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr bgcolor="#70b0f0" class="table-header">
<td colspan="2" class="table-header">
<table border="0" cellpadding="0" cellspacing="0" width="100%">
<tr valign="top">
<td align="left"><span class="table-header">Method Details</span></td>
<td align="right" valign="top"
><span class="options">[<a href="#section-MethodDetails"
class="privatelink" onclick="toggle_private();"
>hide private</a>]</span></td>
</tr>
</table>
</td>
</tr>
</table>
<a name="__getitem__"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">__getitem__</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">item</span>)</span>
<br /><em class="fname">(Indexing operator)</em>
</h3>
</td><td align="right" valign="top"
>
</td>
</tr></table>
<p>Return the given item if it is a valid force field parameter for this
bond, None if not.</p>
<dl class="fields">
<dt>Parameters:</dt>
<dd><ul class="nomargin-top">
<li><strong class="pname"><code>item</code></strong> (string) - The property name.</li>
</ul></dd>
</dl>
</td></tr></table>
</div>
<a name="__setitem__"></a>
<div>
<table class="details" border="1" cellpadding="3"
cellspacing="0" width="100%" bgcolor="white">
<tr><td>
<table width="100%" cellpadding="0" cellspacing="0" border="0">
<tr valign="top"><td>
<h3 class="epydoc"><span class="sig"><span class="sig-name">__setitem__</span>(<span class="sig-arg">self</span>,
<span class="sig-arg">item</span>,
<span class="sig-arg">value</span>)</span>
<br /><em class="fname">(Index assignment operator)</em>
</h3>
</td><td align="right" valign="top"
>
</td>
</tr></table>
<p>Set a force field parameter for the bond.</p>
<dl class="fields">
<dt>Parameters:</dt>
<dd><ul class="nomargin-top">
<li><strong class="pname"><code>item</code></strong> (string) - The property name.</li>
<li><strong class="pname"><code>value</code></strong> - The value to be set.</li>
</ul></dd>
</dl>
</td></tr></table>
</div>
<br />
<!-- ==================== NAVIGATION BAR ==================== -->
<table class="navbar" border="0" width="100%" cellpadding="0"
bgcolor="#a0c0ff" cellspacing="0">
<tr valign="middle">
<!-- Home link -->
<th> <a
href="schrodinger-module.html">Home</a> </th>
<!-- Tree link -->
<th> <a
href="module-tree.html">Trees</a> </th>
<!-- Index link -->
<th> <a
href="identifier-index.html">Indices</a> </th>
<!-- Help link -->
<th> <a
href="help.html">Help</a> </th>
<!-- Project homepage -->
<th class="navbar" align="right" width="100%">
<table border="0" cellpadding="0" cellspacing="0">
<tr><th class="navbar" align="center"
>Suite 2012 Schrodinger Python API</th>
</tr></table></th>
</tr>
</table>
<table border="0" cellpadding="0" cellspacing="0" width="100%%">
<tr>
<td align="left" class="footer">
Generated by Epydoc 3.0.1 on Tue Sep 25 02:23:07 2012
</td>
<td align="right" class="footer">
<a target="mainFrame" href="http://epydoc.sourceforge.net"
>http://epydoc.sourceforge.net</a>
</td>
</tr>
</table>
<script type="text/javascript">
<!--
// Private objects are initially displayed (because if
// javascript is turned off then we want them to be
// visible); but by default, we want to hide them. So hide
// them unless we have a cookie that says to show them.
checkCookie();
// -->
</script>
</body>
</html>
|
platinhom/ManualHom
|
Schrodinger/Schrodinger_2012_docs/python_api/api/schrodinger.trajectory.ffstructure._FFBondProperty-class.html
|
HTML
|
gpl-2.0
| 11,143
|
/*
* QLogic qlge NIC HBA Driver
* Copyright (c) 2003-2008 QLogic Corporation
* See LICENSE.qlge for copyright and licensing details.
* Author: Linux qlge network device driver by
* Ron Mercer <ron.mercer@qlogic.com>
*/
#ifndef _QLGE_H_
#define _QLGE_H_
#include <linux/version.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/ioport.h>
#include <linux/slab.h>
#include <linux/list.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/in.h>
#include <linux/ip.h>
#include <linux/udp.h>
#include <linux/mii.h>
#include <asm/io.h>
#include <linux/pci.h>
#include <linux/netdevice.h>
/*
* General definitions...
*/
#define DRV_NAME "qlge"
#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
#define DRV_VERSION "1.00.00.25"
#define DIS_VERSION "2.6.16-2.6.18-p25"
#define REL_DATE "100706"
#define PFX "qlge: "
#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
do { \
if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
; \
else \
dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
"%s: " fmt, __func__, ##args); \
} while (0)
#if 0
#define QPRINTK_DBG(qdev, nlevel, klevel, fmt, args...) \
do { \
if (!((qdev)->msg_enable & NETIF_MSG_##nlevel)) \
; \
else \
dev_printk(KERN_##klevel, &((qdev)->pdev->dev), \
"%s: " fmt, __func__, ##args); \
} while (0)
#else
#define QPRINTK_DBG(qdev, nlevel, klevel, fmt, args...)
#endif
#define WQ_ADDR_ALIGN 0x3 /* 4 byte alignment */
#define QLGE_VENDOR_ID 0x1077
#define QLGE_DEVICE_ID_8012 0x8012
#define QLGE_DEVICE_ID_8000 0x8000
#define MAX_CPUS 8
#define MAX_TX_RINGS MAX_CPUS
#define MAX_RX_RINGS (MAX_CPUS + MAX_TX_RINGS)
#define NUM_TX_RING_ENTRIES 1024 /* Power of 2, range 32 to 65536 */
#define NUM_RX_RING_ENTRIES 1024 /* Power of 2, range 32 to 65536 */
#define NUM_SMALL_BUFFERS 1024 /* Power of 2, range 32 to 65536 */
#define NUM_LARGE_BUFFERS 1024 /* Power of 2, range 32 to 65536 */
#define DB_PAGE_SIZE 4096
/* Calculate the number of (4k) pages required to
* contain a buffer queue of the given length.
*/
#define MAX_DB_PAGES_PER_BQ(x) \
(((x * sizeof(u64)) / DB_PAGE_SIZE) + \
(((x * sizeof(u64)) % DB_PAGE_SIZE) ? 1 : 0))
#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
#define SMALL_BUFFER_SIZE 256 /* Per FCoE largest frame for normal MTU */
#define LARGE_BUFFER_SIZE 9600 /* Per FCoE largest frame for jumbo MTU */
#define MAX_SPLIT_SIZE 1023
#define QLGE_SB_PAD 0
#define MAX_CQ 128
#define DFLT_COALESCE_WAIT 100 /* 100 usec wait for coalescing */
#define MAX_INTER_FRAME_WAIT 10 /* 10 usec max interframe-wait for coalescing */
#define DFLT_INTER_FRAME_WAIT (MAX_INTER_FRAME_WAIT/2)
#define UDELAY_COUNT 3
#define UDELAY_DELAY 100
#define TX_DESC_PER_IOCB 8
/* The maximum number of frags we handle is based
* on PAGE_SIZE...
*/
#if (PAGE_SHIFT == 12) || (PAGE_SHIFT == 13) /* 4k & 8k pages */
#define TX_DESC_PER_OAL ((MAX_SKB_FRAGS - TX_DESC_PER_IOCB) + 2)
#else /* all other page sizes */
#define TX_DESC_PER_OAL 0
#endif
/* Word shifting for converting 64-bit
* address to a series of 16-bit words.
* This is used for some MPI firmware
* mailbox commands.
*/
#define LSW(x) ((u16)(x))
#define MSW(x) ((u16)((u32)(x) >> 16))
#define LSD(x) ((u32)((u64)(x)))
#define MSD(x) ((u32)((((u64)(x)) >> 16) >> 16))
/* MPI test register definitions. This register
* is used for determining alternate NIC function's
* PCI->func number.
*/
enum {
MPI_TEST_FUNC_PORT_CFG = 0x1002,
MPI_TEST_NIC1_FUNC_SHIFT = 1,
MPI_TEST_NIC2_FUNC_SHIFT = 5,
MPI_TEST_NIC_FUNC_MASK = 0x00000007,
};
/*
* Processor Address Register (PROC_ADDR) bit definitions.
*/
enum {
/* Misc. stuff */
MAILBOX_COUNT = 16,
PROC_ADDR_RDY = (1 << 31),
PROC_ADDR_R = (1 << 30),
PROC_ADDR_ERR = (1 << 29),
PROC_ADDR_DA = (1 << 28),
PROC_ADDR_FUNC0_MBI = 0x00001180,
PROC_ADDR_FUNC0_MBO = (PROC_ADDR_FUNC0_MBI + MAILBOX_COUNT),
PROC_ADDR_FUNC0_CTL = 0x000011a1,
PROC_ADDR_FUNC2_MBI = 0x00001280,
PROC_ADDR_FUNC2_MBO = (PROC_ADDR_FUNC2_MBI + MAILBOX_COUNT),
PROC_ADDR_FUNC2_CTL = 0x000012a1,
PROC_ADDR_MPI_RISC = 0x00000000,
PROC_ADDR_MDE = 0x00010000,
PROC_ADDR_REGBLOCK = 0x00020000,
PROC_ADDR_RISC_REG = 0x00030000,
};
/*
* System Register (SYS) bit definitions.
*/
enum {
SYS_EFE = (1 << 0),
SYS_FAE = (1 << 1),
SYS_MDC = (1 << 2),
SYS_DST = (1 << 3),
SYS_DWC = (1 << 4),
SYS_EVW = (1 << 5),
SYS_OMP_DLY_MASK = 0x3f000000,
/*
* There are no values defined as of edit #15.
*/
SYS_ODI = (1 << 14),
};
/*
* Reset/Failover Register (RST_FO) bit definitions.
*/
enum {
RST_FO_TFO = (1 << 0),
RST_FO_RR_MASK = 0x00060000,
RST_FO_RR_CQ_CAM = 0x00000000,
RST_FO_RR_DROP = 0x00000002,
RST_FO_RR_DQ = 0x00000004,
RST_FO_RR_RCV_FUNC_CQ = 0x00000006,
RST_FO_FRB = (1 << 12),
RST_FO_MOP = (1 << 13),
RST_FO_REG = (1 << 14),
RST_FO_FR = (1 << 15),
};
/*
* Function Specific Control Register (FSC) bit definitions.
*/
enum {
FSC_DBRST_MASK = 0x00070000,
FSC_DBRST_256 = 0x00000000,
FSC_DBRST_512 = 0x00000001,
FSC_DBRST_768 = 0x00000002,
FSC_DBRST_1024 = 0x00000003,
FSC_DBL_MASK = 0x00180000,
FSC_DBL_DBRST = 0x00000000,
FSC_DBL_MAX_PLD = 0x00000008,
FSC_DBL_MAX_BRST = 0x00000010,
FSC_DBL_128_BYTES = 0x00000018,
FSC_EC = (1 << 5),
FSC_EPC_MASK = 0x00c00000,
FSC_EPC_INBOUND = (1 << 6),
FSC_EPC_OUTBOUND = (1 << 7),
FSC_VM_PAGESIZE_MASK = 0x07000000,
FSC_VM_PAGE_2K = 0x00000100,
FSC_VM_PAGE_4K = 0x00000200,
FSC_VM_PAGE_8K = 0x00000300,
FSC_VM_PAGE_64K = 0x00000600,
FSC_SH = (1 << 11),
FSC_DSB = (1 << 12),
FSC_STE = (1 << 13),
FSC_FE = (1 << 15),
};
/*
* Host Command Status Register (CSR) bit definitions.
*/
enum {
CSR_ERR_STS_MASK = 0x0000003f,
/*
* There are no valued defined as of edit #15.
*/
CSR_RR = (1 << 8),
CSR_HRI = (1 << 9),
CSR_RP = (1 << 10),
CSR_CMD_PARM_SHIFT = 22,
CSR_CMD_NOP = 0x00000000,
CSR_CMD_SET_RST = 0x10000000,
CSR_CMD_CLR_RST = 0x20000000,
CSR_CMD_SET_PAUSE = 0x30000000,
CSR_CMD_CLR_PAUSE = 0x40000000,
CSR_CMD_SET_H2R_INT = 0x50000000,
CSR_CMD_CLR_H2R_INT = 0x60000000,
CSR_CMD_PAR_EN = 0x70000000,
CSR_CMD_SET_BAD_PAR = 0x80000000,
CSR_CMD_CLR_BAD_PAR = 0x90000000,
CSR_CMD_CLR_R2PCI_INT = 0xa0000000,
};
/*
* Configuration Register (CFG) bit definitions.
*/
enum {
CFG_LRQ = (1 << 0),
CFG_DRQ = (1 << 1),
CFG_LR = (1 << 2),
CFG_DR = (1 << 3),
CFG_LE = (1 << 5),
CFG_LCQ = (1 << 6),
CFG_DCQ = (1 << 7),
CFG_Q_SHIFT = 8,
CFG_Q_MASK = 0x7f000000,
};
/*
* Status Register (STS) bit definitions.
*/
enum {
STS_FE = (1 << 0),
STS_PI = (1 << 1),
STS_PL0 = (1 << 2),
STS_PL1 = (1 << 3),
STS_PI0 = (1 << 4),
STS_PI1 = (1 << 5),
STS_FUNC_ID_MASK = 0x000000c0,
STS_FUNC_ID_SHIFT = 6,
STS_F0E = (1 << 8),
STS_F1E = (1 << 9),
STS_F2E = (1 << 10),
STS_F3E = (1 << 11),
STS_NFE = (1 << 12),
};
/*
* Interrupt Enable Register (INTR_EN) bit definitions.
*/
enum {
INTR_EN_INTR_MASK = 0x007f0000,
INTR_EN_TYPE_MASK = 0x03000000,
INTR_EN_TYPE_ENABLE = 0x00000100,
INTR_EN_TYPE_DISABLE = 0x00000200,
INTR_EN_TYPE_READ = 0x00000300,
INTR_EN_IHD = (1 << 13),
INTR_EN_IHD_MASK = (INTR_EN_IHD << 16),
INTR_EN_EI = (1 << 14),
INTR_EN_EN = (1 << 15),
};
/*
* Interrupt Mask Register (INTR_MASK) bit definitions.
*/
enum {
INTR_MASK_PI = (1 << 0),
INTR_MASK_HL0 = (1 << 1),
INTR_MASK_LH0 = (1 << 2),
INTR_MASK_HL1 = (1 << 3),
INTR_MASK_LH1 = (1 << 4),
INTR_MASK_SE = (1 << 5),
INTR_MASK_LSC = (1 << 6),
INTR_MASK_MC = (1 << 7),
INTR_MASK_LINK_IRQS = INTR_MASK_LSC | INTR_MASK_SE | INTR_MASK_MC,
};
/*
* Register (REV_ID) bit definitions.
*/
enum {
REV_ID_MASK = 0x0000000f,
REV_ID_NICROLL_SHIFT = 0,
REV_ID_NICREV_SHIFT = 4,
REV_ID_XGROLL_SHIFT = 8,
REV_ID_XGREV_SHIFT = 12,
REV_ID_CHIPREV_SHIFT = 28,
};
/*
* Force ECC Error Register (FRC_ECC_ERR) bit definitions.
*/
enum {
FRC_ECC_ERR_VW = (1 << 12),
FRC_ECC_ERR_VB = (1 << 13),
FRC_ECC_ERR_NI = (1 << 14),
FRC_ECC_ERR_NO = (1 << 15),
FRC_ECC_PFE_SHIFT = 16,
FRC_ECC_ERR_DO = (1 << 18),
FRC_ECC_P14 = (1 << 19),
};
/*
* Error Status Register (ERR_STS) bit definitions.
*/
enum {
ERR_STS_NOF = (1 << 0),
ERR_STS_NIF = (1 << 1),
ERR_STS_DRP = (1 << 2),
ERR_STS_XGP = (1 << 3),
ERR_STS_FOU = (1 << 4),
ERR_STS_FOC = (1 << 5),
ERR_STS_FOF = (1 << 6),
ERR_STS_FIU = (1 << 7),
ERR_STS_FIC = (1 << 8),
ERR_STS_FIF = (1 << 9),
ERR_STS_MOF = (1 << 10),
ERR_STS_TA = (1 << 11),
ERR_STS_MA = (1 << 12),
ERR_STS_MPE = (1 << 13),
ERR_STS_SCE = (1 << 14),
ERR_STS_STE = (1 << 15),
ERR_STS_FOW = (1 << 16),
ERR_STS_UE = (1 << 17),
ERR_STS_MCH = (1 << 26),
ERR_STS_LOC_SHIFT = 27,
};
/*
* RAM Debug Address Register (RAM_DBG_ADDR) bit definitions.
*/
enum {
RAM_DBG_ADDR_FW = (1 << 30),
RAM_DBG_ADDR_FR = (1 << 31),
};
/*
* Semaphore Register (SEM) bit definitions.
*/
enum {
/*
* Example:
* reg = SEM_XGMAC0_MASK | (SEM_SET << SEM_XGMAC0_SHIFT)
*/
SEM_CLEAR = 0,
SEM_SET = 1,
SEM_FORCE = 3,
SEM_XGMAC0_SHIFT = 0,
SEM_XGMAC1_SHIFT = 2,
SEM_ICB_SHIFT = 4,
SEM_MAC_ADDR_SHIFT = 6,
SEM_FLASH_SHIFT = 8,
SEM_PROBE_SHIFT = 10,
SEM_RT_IDX_SHIFT = 12,
SEM_PROC_REG_SHIFT = 14,
SEM_XGMAC0_MASK = 0x00030000,
SEM_XGMAC1_MASK = 0x000c0000,
SEM_ICB_MASK = 0x00300000,
SEM_MAC_ADDR_MASK = 0x00c00000,
SEM_FLASH_MASK = 0x03000000,
SEM_PROBE_MASK = 0x0c000000,
SEM_RT_IDX_MASK = 0x30000000,
SEM_PROC_REG_MASK = 0xc0000000,
};
/*
* 10G MAC Address Register (XGMAC_ADDR) bit definitions.
*/
enum {
XGMAC_ADDR_RDY = (1 << 31),
XGMAC_ADDR_R = (1 << 30),
XGMAC_ADDR_XME = (1 << 29),
/* XGMAC control registers */
PAUSE_SRC_LO = 0x00000100,
PAUSE_SRC_HI = 0x00000104,
GLOBAL_CFG = 0x00000108,
GLOBAL_CFG_RESET = (1 << 0),
GLOBAL_CFG_JUMBO = (1 << 6),
GLOBAL_CFG_TX_STAT_EN = (1 << 10),
GLOBAL_CFG_RX_STAT_EN = (1 << 11),
TX_CFG = 0x0000010c,
TX_CFG_RESET = (1 << 0),
TX_CFG_EN = (1 << 1),
TX_CFG_PREAM = (1 << 2),
RX_CFG = 0x00000110,
RX_CFG_RESET = (1 << 0),
RX_CFG_EN = (1 << 1),
RX_CFG_PREAM = (1 << 2),
FLOW_CTL = 0x0000011c,
PAUSE_OPCODE = 0x00000120,
PAUSE_TIMER = 0x00000124,
PAUSE_FRM_DEST_LO = 0x00000128,
PAUSE_FRM_DEST_HI = 0x0000012c,
MAC_TX_PARAMS = 0x00000134,
MAC_TX_PARAMS_JUMBO = (1 << 31),
MAC_TX_PARAMS_SIZE_SHIFT = 16,
MAC_RX_PARAMS = 0x00000138,
MAC_SYS_INT = 0x00000144,
MAC_SYS_INT_MASK = 0x00000148,
MAC_MGMT_INT = 0x0000014c,
MAC_MGMT_IN_MASK = 0x00000150,
EXT_ARB_MODE = 0x000001fc,
/* XGMAC TX statistics registers */
TX_PKTS = 0x00000200,
TX_BYTES = 0x00000208,
TX_MCAST_PKTS = 0x00000210,
TX_BCAST_PKTS = 0x00000218,
TX_UCAST_PKTS = 0x00000220,
TX_CTL_PKTS = 0x00000228,
TX_PAUSE_PKTS = 0x00000230,
TX_64_PKT = 0x00000238,
TX_65_TO_127_PKT = 0x00000240,
TX_128_TO_255_PKT = 0x00000248,
TX_256_511_PKT = 0x00000250,
TX_512_TO_1023_PKT = 0x00000258,
TX_1024_TO_1518_PKT = 0x00000260,
TX_1519_TO_MAX_PKT = 0x00000268,
TX_UNDERSIZE_PKT = 0x00000270,
TX_OVERSIZE_PKT = 0x00000278,
/* XGMAC statistics control registers */
RX_HALF_FULL_DET = 0x000002a0,
TX_HALF_FULL_DET = 0x000002a4,
RX_OVERFLOW_DET = 0x000002a8,
TX_OVERFLOW_DET = 0x000002ac,
RX_HALF_FULL_MASK = 0x000002b0,
TX_HALF_FULL_MASK = 0x000002b4,
RX_OVERFLOW_MASK = 0x000002b8,
TX_OVERFLOW_MASK = 0x000002bc,
STAT_CNT_CTL = 0x000002c0,
STAT_CNT_CTL_CLEAR_TX = (1 << 0),
STAT_CNT_CTL_CLEAR_RX = (1 << 1),
AUX_RX_HALF_FULL_DET = 0x000002d0,
AUX_TX_HALF_FULL_DET = 0x000002d4,
AUX_RX_OVERFLOW_DET = 0x000002d8,
AUX_TX_OVERFLOW_DET = 0x000002dc,
AUX_RX_HALF_FULL_MASK = 0x000002f0,
AUX_TX_HALF_FULL_MASK = 0x000002f4,
AUX_RX_OVERFLOW_MASK = 0x000002f8,
AUX_TX_OVERFLOW_MASK = 0x000002fc,
/* XGMAC RX statistics registers */
RX_BYTES = 0x00000300,
RX_BYTES_OK = 0x00000308,
RX_PKTS = 0x00000310,
RX_PKTS_OK = 0x00000318,
RX_BCAST_PKTS = 0x00000320,
RX_MCAST_PKTS = 0x00000328,
RX_UCAST_PKTS = 0x00000330,
RX_UNDERSIZE_PKTS = 0x00000338,
RX_OVERSIZE_PKTS = 0x00000340,
RX_JABBER_PKTS = 0x00000348,
RX_UNDERSIZE_FCERR_PKTS = 0x00000350,
RX_DROP_EVENTS = 0x00000358,
RX_FCERR_PKTS = 0x00000360,
RX_ALIGN_ERR = 0x00000368,
RX_SYMBOL_ERR = 0x00000370,
RX_MAC_ERR = 0x00000378,
RX_CTL_PKTS = 0x00000380,
RX_PAUSE_PKTS = 0x00000388,
RX_64_PKTS = 0x00000390,
RX_65_TO_127_PKTS = 0x00000398,
RX_128_255_PKTS = 0x000003a0,
RX_256_511_PKTS = 0x000003a8,
RX_512_TO_1023_PKTS = 0x000003b0,
RX_1024_TO_1518_PKTS = 0x000003b8,
RX_1519_TO_MAX_PKTS = 0x000003c0,
RX_LEN_ERR_PKTS = 0x000003c8,
/* XGMAC MDIO control registers */
MDIO_TX_DATA = 0x00000400,
MDIO_RX_DATA = 0x00000410,
MDIO_CMD = 0x00000420,
MDIO_PHY_ADDR = 0x00000430,
MDIO_PORT = 0x00000440,
MDIO_STATUS = 0x00000450,
XGMAC_REGISTER_END = 0x00000740,
};
/*
* Enhanced Transmission Schedule Registers (NIC_ETS,CNA_ETS) bit definitions.
*/
enum {
ETS_QUEUE_SHIFT = 29,
ETS_REF = (1 << 26),
ETS_RS = (1 << 27),
ETS_P = (1 << 28),
ETS_FC_COS_SHIFT = 23,
};
/*
* Flash Address Register (FLASH_ADDR) bit definitions.
*/
enum {
FLASH_ADDR_RDY = (1 << 31),
FLASH_ADDR_R = (1 << 30),
FLASH_ADDR_ERR = (1 << 29),
};
/*
* Stop CQ Processing Register (CQ_STOP) bit definitions.
*/
enum {
CQ_STOP_QUEUE_MASK = (0x007f0000),
CQ_STOP_TYPE_MASK = (0x03000000),
CQ_STOP_TYPE_START = 0x00000100,
CQ_STOP_TYPE_STOP = 0x00000200,
CQ_STOP_TYPE_READ = 0x00000300,
CQ_STOP_EN = (1 << 15),
};
/*
* MAC Protocol Address Index Register (MAC_ADDR_IDX) bit definitions.
*/
enum {
MAC_ADDR_IDX_SHIFT = 4,
MAC_ADDR_TYPE_SHIFT = 16,
MAC_ADDR_TYPE_MASK = 0x000f0000,
MAC_ADDR_TYPE_CAM_MAC = 0x00000000,
MAC_ADDR_TYPE_MULTI_MAC = 0x00010000,
MAC_ADDR_TYPE_VLAN = 0x00020000,
MAC_ADDR_TYPE_MULTI_FLTR = 0x00030000,
MAC_ADDR_TYPE_FC_MAC = 0x00040000,
MAC_ADDR_TYPE_MGMT_MAC = 0x00050000,
MAC_ADDR_TYPE_MGMT_VLAN = 0x00060000,
MAC_ADDR_TYPE_MGMT_V4 = 0x00070000,
MAC_ADDR_TYPE_MGMT_V6 = 0x00080000,
MAC_ADDR_TYPE_MGMT_TU_DP = 0x00090000,
MAC_ADDR_ADR = (1 << 25),
MAC_ADDR_RS = (1 << 26),
MAC_ADDR_E = (1 << 27),
MAC_ADDR_MR = (1 << 30),
MAC_ADDR_MW = (1 << 31),
MAX_MULTICAST_ENTRIES = 32,
};
/*
* MAC Protocol Address Index Register (SPLT_HDR) bit definitions.
*/
enum {
SPLT_HDR_EP = (1 << 31),
};
/*
* FCoE Receive Configuration Register (FC_RCV_CFG) bit definitions.
*/
enum {
FC_RCV_CFG_ECT = (1 << 15),
FC_RCV_CFG_DFH = (1 << 20),
FC_RCV_CFG_DVF = (1 << 21),
FC_RCV_CFG_RCE = (1 << 27),
FC_RCV_CFG_RFE = (1 << 28),
FC_RCV_CFG_TEE = (1 << 29),
FC_RCV_CFG_TCE = (1 << 30),
FC_RCV_CFG_TFE = (1 << 31),
};
/*
* NIC Receive Configuration Register (NIC_RCV_CFG) bit definitions.
*/
enum {
NIC_RCV_CFG_PPE = (1 << 0),
NIC_RCV_CFG_VLAN_MASK = 0x00060000,
NIC_RCV_CFG_VLAN_ALL = 0x00000000,
NIC_RCV_CFG_VLAN_MATCH_ONLY = 0x00000002,
NIC_RCV_CFG_VLAN_MATCH_AND_NON = 0x00000004,
NIC_RCV_CFG_VLAN_NONE_AND_NON = 0x00000006,
NIC_RCV_CFG_RV = (1 << 3),
NIC_RCV_CFG_DFQ_MASK = (0x7f000000),
NIC_RCV_CFG_DFQ_SHIFT = 8,
NIC_RCV_CFG_DFQ = 0, /* HARDCODE default queue to 0. */
};
/*
* Mgmt Receive Configuration Register (MGMT_RCV_CFG) bit definitions.
*/
enum {
MGMT_RCV_CFG_ARP = (1 << 0),
MGMT_RCV_CFG_DHC = (1 << 1),
MGMT_RCV_CFG_DHS = (1 << 2),
MGMT_RCV_CFG_NP = (1 << 3),
MGMT_RCV_CFG_I6N = (1 << 4),
MGMT_RCV_CFG_I6R = (1 << 5),
MGMT_RCV_CFG_DH6 = (1 << 6),
MGMT_RCV_CFG_UD1 = (1 << 7),
MGMT_RCV_CFG_UD0 = (1 << 8),
MGMT_RCV_CFG_BCT = (1 << 9),
MGMT_RCV_CFG_MCT = (1 << 10),
MGMT_RCV_CFG_DM = (1 << 11),
MGMT_RCV_CFG_RM = (1 << 12),
MGMT_RCV_CFG_STL = (1 << 13),
MGMT_RCV_CFG_VLAN_MASK = 0xc0000000,
MGMT_RCV_CFG_VLAN_ALL = 0x00000000,
MGMT_RCV_CFG_VLAN_MATCH_ONLY = 0x00004000,
MGMT_RCV_CFG_VLAN_MATCH_AND_NON = 0x00008000,
MGMT_RCV_CFG_VLAN_NONE_AND_NON = 0x0000c000,
};
/*
* Routing Index Register (RT_IDX) bit definitions.
*/
enum {
RT_IDX_IDX_SHIFT = 8,
RT_IDX_TYPE_MASK = 0x000f0000,
RT_IDX_TYPE_RT = 0x00000000,
RT_IDX_TYPE_RT_INV = 0x00010000,
RT_IDX_TYPE_NICQ = 0x00020000,
RT_IDX_TYPE_NICQ_INV = 0x00030000,
RT_IDX_DST_MASK = 0x00700000,
RT_IDX_DST_RSS = 0x00000000,
RT_IDX_DST_CAM_Q = 0x00100000,
RT_IDX_DST_COS_Q = 0x00200000,
RT_IDX_DST_DFLT_Q = 0x00300000,
RT_IDX_DST_DEST_Q = 0x00400000,
RT_IDX_RS = (1 << 26),
RT_IDX_E = (1 << 27),
RT_IDX_MR = (1 << 30),
RT_IDX_MW = (1 << 31),
/* Nic Queue format - type 2 bits */
RT_IDX_BCAST = (1 << 0),
RT_IDX_MCAST = (1 << 1),
RT_IDX_MCAST_MATCH = (1 << 2),
RT_IDX_MCAST_REG_MATCH = (1 << 3),
RT_IDX_MCAST_HASH_MATCH = (1 << 4),
RT_IDX_FC_MACH = (1 << 5),
RT_IDX_ETH_FCOE = (1 << 6),
RT_IDX_CAM_HIT = (1 << 7),
RT_IDX_CAM_BIT0 = (1 << 8),
RT_IDX_CAM_BIT1 = (1 << 9),
RT_IDX_VLAN_TAG = (1 << 10),
RT_IDX_VLAN_MATCH = (1 << 11),
RT_IDX_VLAN_FILTER = (1 << 12),
RT_IDX_ETH_SKIP1 = (1 << 13),
RT_IDX_ETH_SKIP2 = (1 << 14),
RT_IDX_BCAST_MCAST_MATCH = (1 << 15),
RT_IDX_802_3 = (1 << 16),
RT_IDX_LLDP = (1 << 17),
RT_IDX_UNUSED018 = (1 << 18),
RT_IDX_UNUSED019 = (1 << 19),
RT_IDX_UNUSED20 = (1 << 20),
RT_IDX_UNUSED21 = (1 << 21),
RT_IDX_ERR = (1 << 22),
RT_IDX_VALID = (1 << 23),
RT_IDX_TU_CSUM_ERR = (1 << 24),
RT_IDX_IP_CSUM_ERR = (1 << 25),
RT_IDX_MAC_ERR = (1 << 26),
RT_IDX_RSS_TCP6 = (1 << 27),
RT_IDX_RSS_TCP4 = (1 << 28),
RT_IDX_RSS_IPV6 = (1 << 29),
RT_IDX_RSS_IPV4 = (1 << 30),
RT_IDX_RSS_MATCH = (1 << 31),
/* Hierarchy for the NIC Queue Mask */
RT_IDX_ALL_ERR_SLOT = 0,
RT_IDX_MAC_ERR_SLOT = 0,
RT_IDX_IP_CSUM_ERR_SLOT = 1,
RT_IDX_TCP_UDP_CSUM_ERR_SLOT = 2,
RT_IDX_BCAST_SLOT = 3,
RT_IDX_MCAST_MATCH_SLOT = 4,
RT_IDX_ALLMULTI_SLOT = 5,
RT_IDX_UNUSED6_SLOT = 6,
RT_IDX_UNUSED7_SLOT = 7,
RT_IDX_RSS_MATCH_SLOT = 8,
RT_IDX_RSS_IPV4_SLOT = 8,
RT_IDX_RSS_IPV6_SLOT = 9,
RT_IDX_RSS_TCP4_SLOT = 10,
RT_IDX_RSS_TCP6_SLOT = 11,
RT_IDX_CAM_HIT_SLOT = 12,
RT_IDX_UNUSED013 = 13,
RT_IDX_UNUSED014 = 14,
RT_IDX_PROMISCUOUS_SLOT = 15,
RT_IDX_MAX_SLOTS = 16,
};
/*
* Serdes Address Register (XG_SERDES_ADDR) bit definitions.
*/
enum {
XG_SERDES_ADDR_RDY = (1 << 31),
XG_SERDES_ADDR_R = (1 << 30),
};
/*
* Control Register Set Map
*/
enum {
PROC_ADDR = 0, /* Use semaphore */
PROC_DATA = 0x04, /* Use semaphore */
SYS = 0x08,
RST_FO = 0x0c,
FSC = 0x10,
CSR = 0x14,
LED = 0x18,
ICB_RID = 0x1c, /* Use semaphore */
ICB_L = 0x20, /* Use semaphore */
ICB_H = 0x24, /* Use semaphore */
CFG = 0x28,
BIOS_ADDR = 0x2c,
STS = 0x30,
INTR_EN = 0x34,
INTR_MASK = 0x38,
ISR1 = 0x3c,
ISR2 = 0x40,
ISR3 = 0x44,
ISR4 = 0x48,
REV_ID = 0x4c,
FRC_ECC_ERR = 0x50,
ERR_STS = 0x54,
RAM_DBG_ADDR = 0x58,
RAM_DBG_DATA = 0x5c,
ECC_ERR_CNT = 0x60,
SEM = 0x64,
GPIO_1 = 0x68, /* Use semaphore */
GPIO_2 = 0x6c, /* Use semaphore */
GPIO_3 = 0x70, /* Use semaphore */
RSVD2 = 0x74,
XGMAC_ADDR = 0x78, /* Use semaphore */
XGMAC_DATA = 0x7c, /* Use semaphore */
NIC_ETS = 0x80,
CNA_ETS = 0x84,
FLASH_ADDR = 0x88, /* Use semaphore */
FLASH_DATA = 0x8c, /* Use semaphore */
CQ_STOP = 0x90,
PAGE_TBL_RID = 0x94,
WQ_PAGE_TBL_LO = 0x98,
WQ_PAGE_TBL_HI = 0x9c,
CQ_PAGE_TBL_LO = 0xa0,
CQ_PAGE_TBL_HI = 0xa4,
MAC_ADDR_IDX = 0xa8, /* Use semaphore */
MAC_ADDR_DATA = 0xac, /* Use semaphore */
COS_DFLT_CQ1 = 0xb0,
COS_DFLT_CQ2 = 0xb4,
ETYPE_SKIP1 = 0xb8,
ETYPE_SKIP2 = 0xbc,
SPLT_HDR = 0xc0,
FC_PAUSE_THRES = 0xc4,
NIC_PAUSE_THRES = 0xc8,
FC_ETHERTYPE = 0xcc,
FC_RCV_CFG = 0xd0,
NIC_RCV_CFG = 0xd4,
FC_COS_TAGS = 0xd8,
NIC_COS_TAGS = 0xdc,
MGMT_RCV_CFG = 0xe0,
RT_IDX = 0xe4,
RT_DATA = 0xe8,
RSVD7 = 0xec,
XG_SERDES_ADDR = 0xf0,
XG_SERDES_DATA = 0xf4,
PRB_MX_ADDR = 0xf8, /* Use semaphore */
PRB_MX_DATA = 0xfc, /* Use semaphore */
};
/*
* CAM output format.
*/
enum {
CAM_OUT_ROUTE_FC = 0,
CAM_OUT_ROUTE_NIC = 1,
CAM_OUT_FUNC_SHIFT = 2,
CAM_OUT_RV = (1 << 4),
CAM_OUT_SH = (1 << 15),
CAM_OUT_CQ_ID_SHIFT = 5,
};
/*
* Mailbox definitions
*/
enum {
/* Asynchronous Event Notifications */
AEN_SYS_ERR = 0x00008002,
AEN_LINK_UP = 0x00008011,
AEN_LINK_DOWN = 0x00008012,
AEN_IDC_CMPLT = 0x00008100,
AEN_IDC_REQ = 0x00008101,
AEN_IDC_EXT = 0x00008102,
AEN_DCBX_CHG = 0x00008110,
AEN_AEN_LOST = 0x00008120,
AEN_AEN_SFP_IN = 0x00008130,
AEN_AEN_SFP_OUT = 0x00008131,
AEN_FW_INIT_DONE = 0x00008400,
AEN_FW_INIT_FAIL = 0x00008401,
/* Mailbox Command Opcodes. */
MB_CMD_NOP = 0x00000000,
MB_CMD_EX_FW = 0x00000002,
MB_CMD_MB_TEST = 0x00000006,
MB_CMD_CSUM_TEST = 0x00000007, /* Verify Checksum */
MB_CMD_ABOUT_FW = 0x00000008,
MB_CMD_COPY_RISC_RAM = 0x0000000a,
MB_CMD_LOAD_RISC_RAM = 0x0000000b,
MB_CMD_DUMP_RISC_RAM = 0x0000000c,
MB_CMD_WRITE_RAM = 0x0000000d,
MB_CMD_INIT_RISC_RAM = 0x0000000e,
MB_CMD_READ_RAM = 0x0000000f,
MB_CMD_STOP_FW = 0x00000014,
MB_CMD_MAKE_SYS_ERR = 0x0000002a,
MB_CMD_WRITE_SFP = 0x00000030,
MB_CMD_READ_SFP = 0x00000031,
MB_CMD_INIT_FW = 0x00000060,
MB_CMD_GET_IFCB = 0x00000061,
MB_CMD_GET_FW_STATE = 0x00000069,
MB_CMD_IDC_REQ = 0x00000100, /* Inter-Driver Communication */
MB_CMD_IDC_ACK = 0x00000101, /* Inter-Driver Communication */
MB_CMD_SET_WOL_MODE = 0x00000110, /* Wake On Lan */
MB_WOL_DISABLE = 0,
MB_WOL_MAGIC_PKT = (1 << 1),
MB_WOL_FLTR = (1 << 2),
MB_WOL_UCAST = (1 << 3),
MB_WOL_MCAST = (1 << 4),
MB_WOL_BCAST = (1 << 5),
MB_WOL_LINK_UP = (1 << 6),
MB_WOL_LINK_DOWN = (1 << 7),
MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
MB_CMD_CLEAR_WOL_MAGIC = 0x00000114, /* Wake On Lan Magic Packet */
MB_CMD_SET_WOL_IMMED = 0x00000115,
MB_CMD_PORT_RESET = 0x00000120,
MB_CMD_SET_PORT_CFG = 0x00000122,
MB_CMD_GET_PORT_CFG = 0x00000123,
MB_CMD_GET_LINK_STS = 0x00000124,
MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
MB_SET_MPI_TFK_STOP = (1 << 0),
MB_SET_MPI_TFK_RESUME = (1 << 1),
MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
MB_GET_MPI_TFK_STOPPED = (1 << 0),
MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
/* Sub-commands for IDC request.
* This describes the reason for the
* IDC request.
* See Fcoeidcv0_5.doc
*/
MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
MB_CMD_IOP_DVR_START = 0x0100,
MB_CMD_IOP_FLASH_ACC = 0x0101,
MB_CMD_IOP_RESTART_MPI = 0x0102,
MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
/* Mailbox Command Status. */
MB_CMD_STS_GOOD = 0x00004000, /* Success. */
MB_CMD_STS_INTRMDT = 0x00001000, /* Intermediate Complete. */
MB_CMD_STS_INVLD_CMD = 0x00004001, /* Invalid. */
MB_CMD_STS_XFC_ERR = 0x00004002, /* Interface Error. */
MB_CMD_STS_CSUM_ERR = 0x00004003, /* Csum Error. */
MB_CMD_STS_ERR = 0x00004005, /* Error. */
MB_CMD_STS_PARAM_ERR = 0x00004006, /* Parameter Error. */
};
struct mbox_params {
u32 mbox_in[MAILBOX_COUNT];
u32 mbox_out[MAILBOX_COUNT];
int in_count;
int out_count;
};
struct flash_params_8012 {
u8 dev_id_str[4];
__le16 size;
__le16 csum;
__le16 ver;
__le16 sub_dev_id;
u8 mac_addr[6];
__le16 res;
};
/* 8000 device's flash is a different structure
* at a different offset in flash.
*/
#define FUNC0_FLASH_OFFSET 0x140200
#define FUNC1_FLASH_OFFSET 0x140600
/* Flash related data structures. */
struct flash_params_8000 {
u8 dev_id_str[4]; /* "8000" */
__le16 ver;
__le16 size;
__le16 csum;
__le16 reserved0;
__le16 total_size;
__le16 entry_count;
u8 data_type0;
u8 data_size0;
u8 mac_addr[6];
u8 data_type1;
u8 data_size1;
u8 mac_addr1[6];
u8 data_type2;
u8 data_size2;
__le16 vlan_id;
u8 data_type3;
u8 data_size3;
__le16 last;
u8 reserved1[464];
__le16 subsys_ven_id;
__le16 subsys_dev_id;
u8 reserved2[4];
};
union flash_params {
struct flash_params_8012 flash_params_8012;
struct flash_params_8000 flash_params_8000;
};
/*
* doorbell space for the rx ring context
*/
struct rx_doorbell_context {
u32 cnsmr_idx; /* 0x00 */
u32 valid; /* 0x04 */
u32 reserved[4]; /* 0x08-0x14 */
u32 lbq_prod_idx; /* 0x18 */
u32 sbq_prod_idx; /* 0x1c */
};
/*
* doorbell space for the tx ring context
*/
struct tx_doorbell_context {
u32 prod_idx; /* 0x00 */
u32 valid; /* 0x04 */
u32 reserved[4]; /* 0x08-0x14 */
u32 lbq_prod_idx; /* 0x18 */
u32 sbq_prod_idx; /* 0x1c */
};
/* DATA STRUCTURES SHARED WITH HARDWARE. */
struct tx_buf_desc {
__le64 addr;
__le32 len;
#define TX_DESC_LEN_MASK 0x000fffff
#define TX_DESC_C 0x40000000
#define TX_DESC_E 0x80000000
} __attribute((packed));
/*
* IOCB Definitions...
*/
#define OPCODE_OB_MAC_IOCB 0x01
#define OPCODE_OB_MAC_TSO_IOCB 0x02
#define OPCODE_IB_MAC_IOCB 0x20
#define OPCODE_IB_MPI_IOCB 0x21
#define OPCODE_IB_AE_IOCB 0x3f
struct ob_mac_iocb_req {
u8 opcode;
u8 flags1;
#define OB_MAC_IOCB_REQ_OI 0x01
#define OB_MAC_IOCB_REQ_I 0x02
#define OB_MAC_IOCB_REQ_D 0x08
#define OB_MAC_IOCB_REQ_F 0x10
u8 flags2;
u8 flags3;
#define OB_MAC_IOCB_DFP 0x02
#define OB_MAC_IOCB_V 0x04
__le32 reserved1[2];
__le16 frame_len;
#define OB_MAC_IOCB_LEN_MASK 0x3ffff
__le16 reserved2;
u32 tid;
u32 txq_idx;
__le32 reserved3;
__le16 vlan_tci;
__le16 reserved4;
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
} __attribute((packed));
struct ob_mac_iocb_rsp {
u8 opcode; /* */
u8 flags1; /* */
#define OB_MAC_IOCB_RSP_OI 0x01 /* */
#define OB_MAC_IOCB_RSP_I 0x02 /* */
#define OB_MAC_IOCB_RSP_E 0x08 /* */
#define OB_MAC_IOCB_RSP_S 0x10 /* too Short */
#define OB_MAC_IOCB_RSP_L 0x20 /* too Large */
#define OB_MAC_IOCB_RSP_P 0x40 /* Padded */
u8 flags2; /* */
u8 flags3; /* */
#define OB_MAC_IOCB_RSP_B 0x80 /* */
u32 tid;
u32 txq_idx;
__le32 reserved[13];
} __attribute((packed));
struct ob_mac_tso_iocb_req {
u8 opcode;
u8 flags1;
#define OB_MAC_TSO_IOCB_OI 0x01
#define OB_MAC_TSO_IOCB_I 0x02
#define OB_MAC_TSO_IOCB_D 0x08
#define OB_MAC_TSO_IOCB_IP4 0x40
#define OB_MAC_TSO_IOCB_IP6 0x80
u8 flags2;
#define OB_MAC_TSO_IOCB_LSO 0x20
#define OB_MAC_TSO_IOCB_UC 0x40
#define OB_MAC_TSO_IOCB_TC 0x80
u8 flags3;
#define OB_MAC_TSO_IOCB_IC 0x01
#define OB_MAC_TSO_IOCB_DFP 0x02
#define OB_MAC_TSO_IOCB_V 0x04
__le32 reserved1[2];
__le32 frame_len;
u32 tid;
u32 txq_idx;
__le16 total_hdrs_len;
__le16 net_trans_offset;
#define OB_MAC_TRANSPORT_HDR_SHIFT 6
__le16 vlan_tci;
__le16 mss;
struct tx_buf_desc tbd[TX_DESC_PER_IOCB];
} __attribute((packed));
struct ob_mac_tso_iocb_rsp {
u8 opcode;
u8 flags1;
#define OB_MAC_TSO_IOCB_RSP_OI 0x01
#define OB_MAC_TSO_IOCB_RSP_I 0x02
#define OB_MAC_TSO_IOCB_RSP_E 0x08
#define OB_MAC_TSO_IOCB_RSP_S 0x10
#define OB_MAC_TSO_IOCB_RSP_L 0x20
#define OB_MAC_TSO_IOCB_RSP_P 0x40
u8 flags2; /* */
u8 flags3; /* */
#define OB_MAC_TSO_IOCB_RSP_B 0x8000
u32 tid;
u32 txq_idx;
__le32 reserved2[13];
} __attribute((packed));
struct ib_mac_iocb_rsp {
u8 opcode; /* 0x20 */
u8 flags1;
#define IB_MAC_IOCB_RSP_OI 0x01 /* Overide intr delay */
#define IB_MAC_IOCB_RSP_I 0x02 /* Disble Intr Generation */
#define IB_MAC_CSUM_ERR_MASK 0x1c /* A mask to use for csum errs */
#define IB_MAC_IOCB_RSP_TE 0x04 /* Checksum error */
#define IB_MAC_IOCB_RSP_NU 0x08 /* No checksum rcvd */
#define IB_MAC_IOCB_RSP_IE 0x10 /* IPv4 checksum error */
#define IB_MAC_IOCB_RSP_M_MASK 0x60 /* Multicast info */
#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* Not mcast frame */
#define IB_MAC_IOCB_RSP_M_HASH 0x20 /* HASH mcast frame */
#define IB_MAC_IOCB_RSP_M_REG 0x40 /* Registered mcast frame */
#define IB_MAC_IOCB_RSP_M_PROM 0x60 /* Promiscuous mcast frame */
#define IB_MAC_IOCB_RSP_B 0x80 /* Broadcast frame */
u8 flags2;
#define IB_MAC_IOCB_RSP_P 0x01 /* Promiscuous frame */
#define IB_MAC_IOCB_RSP_V 0x02 /* Vlan tag present */
#define IB_MAC_IOCB_RSP_ERR_MASK 0x1c /* */
#define IB_MAC_IOCB_RSP_ERR_CODE_ERR 0x04
#define IB_MAC_IOCB_RSP_ERR_OVERSIZE 0x08
#define IB_MAC_IOCB_RSP_ERR_UNDERSIZE 0x10
#define IB_MAC_IOCB_RSP_ERR_PREAMBLE 0x14
#define IB_MAC_IOCB_RSP_ERR_FRAME_LEN 0x18
#define IB_MAC_IOCB_RSP_ERR_CRC 0x1c
#define IB_MAC_IOCB_RSP_U 0x20 /* UDP packet */
#define IB_MAC_IOCB_RSP_T 0x40 /* TCP packet */
#define IB_MAC_IOCB_RSP_FO 0x80 /* Failover port */
u8 flags3;
#define IB_MAC_IOCB_RSP_RSS_MASK 0x07 /* RSS mask */
#define IB_MAC_IOCB_RSP_M_NONE 0x00 /* No RSS match */
#define IB_MAC_IOCB_RSP_M_IPV4 0x04 /* IPv4 RSS match */
#define IB_MAC_IOCB_RSP_M_IPV6 0x02 /* IPv6 RSS match */
#define IB_MAC_IOCB_RSP_M_TCP_V4 0x05 /* TCP with IPv4 */
#define IB_MAC_IOCB_RSP_M_TCP_V6 0x03 /* TCP with IPv6 */
#define IB_MAC_IOCB_RSP_V4 0x08 /* IPV4 */
#define IB_MAC_IOCB_RSP_V6 0x10 /* IPV6 */
#define IB_MAC_IOCB_RSP_IH 0x20 /* Split after IP header */
#define IB_MAC_IOCB_RSP_DS 0x40 /* data is in small buffer */
#define IB_MAC_IOCB_RSP_DL 0x80 /* data is in large buffer */
__le32 data_len; /* */
__le64 data_addr; /* */
__le32 rss; /* */
__le16 vlan_id; /* 12 bits */
#define IB_MAC_IOCB_RSP_C 0x1000 /* VLAN CFI bit */
#define IB_MAC_IOCB_RSP_COS_SHIFT 12 /* class of service value */
#define IB_MAC_IOCB_RSP_VLAN_MASK 0x0ffff
__le16 reserved1;
__le32 reserved2[6];
u8 reserved3[3];
u8 flags4;
#define IB_MAC_IOCB_RSP_HV 0x20
#define IB_MAC_IOCB_RSP_HS 0x40
#define IB_MAC_IOCB_RSP_HL 0x80
__le32 hdr_len; /* */
__le64 hdr_addr; /* */
} __attribute((packed));
struct ib_ae_iocb_rsp {
u8 opcode;
u8 flags1;
#define IB_AE_IOCB_RSP_OI 0x01
#define IB_AE_IOCB_RSP_I 0x02
u8 event;
#define LINK_UP_EVENT 0x00
#define LINK_DOWN_EVENT 0x01
#define CAM_LOOKUP_ERR_EVENT 0x06
#define SOFT_ECC_ERROR_EVENT 0x07
#define MGMT_ERR_EVENT 0x08
#define TEN_GIG_MAC_EVENT 0x09
#define GPI0_H2L_EVENT 0x10
#define GPI0_L2H_EVENT 0x20
#define GPI1_H2L_EVENT 0x11
#define GPI1_L2H_EVENT 0x21
#define PCI_ERR_ANON_BUF_RD 0x40
u8 q_id;
__le32 reserved[15];
} __attribute((packed));
/*
* These three structures are for generic
* handling of ib and ob iocbs.
*/
struct ql_net_rsp_iocb {
u8 opcode;
u8 flags0;
__le16 length;
__le32 tid;
__le32 reserved[14];
} __attribute((packed));
struct net_req_iocb {
u8 opcode;
u8 flags0;
__le16 flags1;
__le32 tid;
__le32 reserved1[30];
} __attribute((packed));
/*
* tx ring initialization control block for chip.
* It is defined as:
* "Work Queue Initialization Control Block"
*/
struct wqicb {
__le16 len;
#define Q_LEN_V (1 << 4)
#define Q_LEN_CPP_CONT 0x0000
#define Q_LEN_CPP_16 0x0001
#define Q_LEN_CPP_32 0x0002
#define Q_LEN_CPP_64 0x0003
#define Q_LEN_CPP_512 0x0006
__le16 flags;
#define Q_PRI_SHIFT 1
#define Q_FLAGS_LC 0x1000
#define Q_FLAGS_LB 0x2000
#define Q_FLAGS_LI 0x4000
#define Q_FLAGS_LO 0x8000
__le16 cq_id_rss;
#define Q_CQ_ID_RSS_RV 0x8000
__le16 rid;
__le64 addr;
__le64 cnsmr_idx_addr;
} __attribute((packed));
/*
* rx ring initialization control block for chip.
* It is defined as:
* "Completion Queue Initialization Control Block"
*/
struct cqicb {
u8 msix_vect;
u8 reserved1;
u8 reserved2;
u8 flags;
#define FLAGS_LV 0x08
#define FLAGS_LS 0x10
#define FLAGS_LL 0x20
#define FLAGS_LI 0x40
#define FLAGS_LC 0x80
__le16 len;
#define LEN_V (1 << 4)
#define LEN_CPP_CONT 0x0000
#define LEN_CPP_32 0x0001
#define LEN_CPP_64 0x0002
#define LEN_CPP_128 0x0003
__le16 rid;
__le64 addr;
__le64 prod_idx_addr;
__le16 pkt_delay;
__le16 irq_delay;
__le64 lbq_addr;
__le16 lbq_buf_size;
__le16 lbq_len; /* entry count */
__le64 sbq_addr;
__le16 sbq_buf_size;
__le16 sbq_len; /* entry count */
} __attribute((packed));
struct ricb {
u8 base_cq;
#define RSS_L4K 0x80
u8 flags;
#define RSS_L6K 0x01
#define RSS_LI 0x02
#define RSS_LB 0x04
#define RSS_LM 0x08
#define RSS_RI4 0x10
#define RSS_RT4 0x20
#define RSS_RI6 0x40
#define RSS_RT6 0x80
__le16 mask;
u8 hash_cq_id[1024];
__le32 ipv6_hash_key[10];
__le32 ipv4_hash_key[4];
} __attribute((packed));
/* SOFTWARE/DRIVER DATA STRUCTURES. */
struct oal {
struct tx_buf_desc oal[TX_DESC_PER_OAL];
};
struct map_list {
DECLARE_PCI_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen);
};
struct tx_ring_desc {
struct sk_buff *skb;
struct ob_mac_iocb_req *queue_entry;
u32 index;
struct oal *oal;
struct map_list *map;
int map_cnt;
struct tx_ring_desc *next;
};
struct page_chunk {
struct page *page; /* master page */
char *va; /* virt addr for this chunk */
u64 map; /* mapping for master */
unsigned int offset; /* offset for this chunk */
unsigned int last_flag; /* flag set for last chunk in page */
};
struct bq_desc {
union {
struct page_chunk pg_chunk;
struct sk_buff *skb;
} p;
__le64 *addr;
u32 index;
DECLARE_PCI_UNMAP_ADDR(mapaddr);
DECLARE_PCI_UNMAP_LEN(maplen);
};
#define QL_TXQ_IDX(qdev, skb) (smp_processor_id()%(qdev->tx_ring_count))
#define TXQ_CLEAN_TIME HZ
struct tx_ring {
/*
* queue info.
*/
struct wqicb wqicb; /* structure used to inform chip of new queue */
void *wq_base; /* pci_alloc:virtual addr for tx */
dma_addr_t wq_base_dma; /* pci_alloc:dma addr for tx */
__le32 *cnsmr_idx_sh_reg; /* shadow copy of consumer idx */
dma_addr_t cnsmr_idx_sh_reg_dma; /* dma-shadow copy of consumer */
u32 wq_size; /* size in bytes of queue area */
u32 wq_len; /* number of entries in queue */
void __iomem *prod_idx_db_reg; /* doorbell index reg at offset 0x00 */
void __iomem *valid_db_reg; /* doorbell valid reg at offset 0x04 */
u16 prod_idx; /* current value for prod idx */
u16 cq_id; /* completion (rx) queue for tx completions */
u8 wq_id; /* queue id for this entry */
u8 reserved1[3];
struct tx_ring_desc *q; /* descriptor list for the queue */
spinlock_t lock;
atomic_t tx_count; /* counts down for every outstanding IO */
struct work_struct tx_work;
struct ql_adapter *qdev;
struct timer_list txq_clean_timer;
};
/*
* Type of inbound queue.
*/
enum {
TX_Q = 3, /* Handles outbound completions. */
RX_Q = 4, /* Handles inbound completions. */
};
struct rx_ring {
struct cqicb cqicb; /* The chip's completion queue init control block.*/
/* Completion queue elements. */
void *cq_base;
dma_addr_t cq_base_dma;
u32 cq_size;
u32 cq_len;
u16 cq_id;
__le32 *prod_idx_sh_reg; /* Shadowed producer register. */
dma_addr_t prod_idx_sh_reg_dma;
void __iomem *cnsmr_idx_db_reg; /* PCI doorbell mem area + 0 */
u32 cnsmr_idx; /* current sw idx */
struct ql_net_rsp_iocb *curr_entry; /* next entry on queue */
void __iomem *valid_db_reg; /* PCI doorbell mem area + 0x04 */
/* Large buffer queue elements. */
u32 lbq_len; /* entry count */
u32 lbq_size; /* size in bytes of queue */
u32 lbq_buf_map_size;
void *lbq_base;
dma_addr_t lbq_base_dma;
void *lbq_base_indirect;
dma_addr_t lbq_base_indirect_dma;
struct page_chunk pg_chunk; /* current page for chunks */
struct bq_desc *lbq; /* array of control blocks */
void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
u32 lbq_prod_idx; /* current sw prod idx */
u32 lbq_curr_idx; /* next entry we expect */
u32 lbq_clean_idx; /* beginning of new descs */
u32 lbq_free_cnt; /* free buffer desc cnt */
/* Small buffer queue elements. */
u32 sbq_len; /* entry count */
u32 sbq_size; /* size in bytes of queue */
u32 sbq_buf_size;
void *sbq_base;
dma_addr_t sbq_base_dma;
void *sbq_base_indirect;
dma_addr_t sbq_base_indirect_dma;
struct bq_desc *sbq; /* array of control blocks */
void __iomem *sbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x1c */
u32 sbq_prod_idx; /* current sw prod idx */
u32 sbq_curr_idx; /* next entry we expect */
u32 sbq_clean_idx; /* beginning of new descs */
u32 sbq_free_cnt; /* free buffer desc cnt */
/* Misc. handler elements. */
u32 type; /* Type of queue, tx, rx. */
u32 irq; /* Which vector this ring is assigned. */
u32 cpu; /* Which CPU this should run on. */
char name[IFNAMSIZ + 5];
struct work_struct rx_work;
u8 reserved;
struct ql_adapter *qdev;
struct net_device *dummy_netdev;
#ifdef NETIF_F_GRO
struct napi_struct napi;
#endif
unsigned long packets; /* total packets received */
unsigned long bytes; /* total bytes received */
};
/*
* RSS Initialization Control Block
*/
struct hash_id {
u8 value[4];
};
struct nic_stats {
/*
* These stats come from offset 200h to 278h
* in the XGMAC register.
*/
u64 tx_pkts;
u64 tx_bytes;
u64 tx_mcast_pkts;
u64 tx_bcast_pkts;
u64 tx_ucast_pkts;
u64 tx_ctl_pkts;
u64 tx_pause_pkts;
u64 tx_64_pkt;
u64 tx_65_to_127_pkt;
u64 tx_128_to_255_pkt;
u64 tx_256_511_pkt;
u64 tx_512_to_1023_pkt;
u64 tx_1024_to_1518_pkt;
u64 tx_1519_to_max_pkt;
u64 tx_undersize_pkt;
u64 tx_oversize_pkt;
/*
* These stats come from offset 300h to 3C8h
* in the XGMAC register.
*/
u64 rx_bytes;
u64 rx_bytes_ok;
u64 rx_pkts;
u64 rx_pkts_ok;
u64 rx_bcast_pkts;
u64 rx_mcast_pkts;
u64 rx_ucast_pkts;
u64 rx_undersize_pkts;
u64 rx_oversize_pkts;
u64 rx_jabber_pkts;
u64 rx_undersize_fcerr_pkts;
u64 rx_drop_events;
u64 rx_fcerr_pkts;
u64 rx_align_err;
u64 rx_symbol_err;
u64 rx_mac_err;
u64 rx_ctl_pkts;
u64 rx_pause_pkts;
u64 rx_64_pkts;
u64 rx_65_to_127_pkts;
u64 rx_128_255_pkts;
u64 rx_256_511_pkts;
u64 rx_512_to_1023_pkts;
u64 rx_1024_to_1518_pkts;
u64 rx_1519_to_max_pkts;
u64 rx_len_err_pkts;
/*
* These stats come from offset 500h to 5C8h
* in the XGMAC register.
*/
u64 tx_cbfc_pause_frames0;
u64 tx_cbfc_pause_frames1;
u64 tx_cbfc_pause_frames2;
u64 tx_cbfc_pause_frames3;
u64 tx_cbfc_pause_frames4;
u64 tx_cbfc_pause_frames5;
u64 tx_cbfc_pause_frames6;
u64 tx_cbfc_pause_frames7;
u64 rx_cbfc_pause_frames0;
u64 rx_cbfc_pause_frames1;
u64 rx_cbfc_pause_frames2;
u64 rx_cbfc_pause_frames3;
u64 rx_cbfc_pause_frames4;
u64 rx_cbfc_pause_frames5;
u64 rx_cbfc_pause_frames6;
u64 rx_cbfc_pause_frames7;
u64 rx_nic_fifo_drop;
};
/* Address/Length pairs for the coredump. */
enum {
MPI_CORE_REGS_ADDR = 0x00030000,
MPI_CORE_REGS_CNT = 127,
MPI_CORE_SH_REGS_CNT = 16,
TEST_REGS_ADDR = 0x00001000,
TEST_REGS_CNT = 23,
RMII_REGS_ADDR = 0x00001040,
RMII_REGS_CNT = 64,
FCMAC1_REGS_ADDR = 0x00001080,
FCMAC2_REGS_ADDR = 0x000010c0,
FCMAC_REGS_CNT = 64,
FC1_MBX_REGS_ADDR = 0x00001100,
FC2_MBX_REGS_ADDR = 0x00001240,
FC_MBX_REGS_CNT = 64,
IDE_REGS_ADDR = 0x00001140,
IDE_REGS_CNT = 64,
NIC1_MBX_REGS_ADDR = 0x00001180,
NIC2_MBX_REGS_ADDR = 0x00001280,
NIC_MBX_REGS_CNT = 64,
SMBUS_REGS_ADDR = 0x00001200,
SMBUS_REGS_CNT = 64,
I2C_REGS_ADDR = 0x00001fc0,
I2C_REGS_CNT = 64,
MEMC_REGS_ADDR = 0x00003000,
MEMC_REGS_CNT = 256,
PBUS_REGS_ADDR = 0x00007c00,
PBUS_REGS_CNT = 256,
MDE_REGS_ADDR = 0x00010000,
MDE_REGS_CNT = 6,
CODE_RAM_ADDR = 0x00020000,
CODE_RAM_CNT = 0x2000,
MEMC_RAM_ADDR = 0x00100000,
MEMC_RAM_CNT = 0x2000,
};
#define MPI_COREDUMP_COOKIE 0x5555aaaa
struct mpi_coredump_global_header {
u32 cookie;
u8 idString[16];
u32 timeLo;
u32 timeHi;
u32 imageSize;
u32 headerSize;
u8 info[220];
};
struct mpi_coredump_segment_header {
u32 cookie;
u32 segNum;
u32 segSize;
u32 extra;
u8 description[16];
};
/* Reg dump segment numbers. */
enum {
CORE_SEG_NUM = 1,
TEST_LOGIC_SEG_NUM = 2,
RMII_SEG_NUM = 3,
FCMAC1_SEG_NUM = 4,
FCMAC2_SEG_NUM = 5,
FC1_MBOX_SEG_NUM = 6,
IDE_SEG_NUM = 7,
NIC1_MBOX_SEG_NUM = 8,
SMBUS_SEG_NUM = 9,
FC2_MBOX_SEG_NUM = 10,
NIC2_MBOX_SEG_NUM = 11,
I2C_SEG_NUM = 12,
MEMC_SEG_NUM = 13,
PBUS_SEG_NUM = 14,
MDE_SEG_NUM = 15,
NIC1_CONTROL_SEG_NUM = 16,
NIC2_CONTROL_SEG_NUM = 17,
NIC1_XGMAC_SEG_NUM = 18,
NIC2_XGMAC_SEG_NUM = 19,
WCS_RAM_SEG_NUM = 20,
MEMC_RAM_SEG_NUM = 21,
XAUI_AN_SEG_NUM = 22,
XAUI_HSS_PCS_SEG_NUM = 23,
XFI_AN_SEG_NUM = 24,
XFI_TRAIN_SEG_NUM = 25,
XFI_HSS_PCS_SEG_NUM = 26,
XFI_HSS_TX_SEG_NUM = 27,
XFI_HSS_RX_SEG_NUM = 28,
XFI_HSS_PLL_SEG_NUM = 29,
MISC_NIC_INFO_SEG_NUM = 30,
INTR_STATES_SEG_NUM = 31,
CAM_ENTRIES_SEG_NUM = 32,
ROUTING_WORDS_SEG_NUM = 33,
ETS_SEG_NUM = 34,
PROBE_DUMP_SEG_NUM = 35,
ROUTING_INDEX_SEG_NUM = 36,
MAC_PROTOCOL_SEG_NUM = 37,
XAUI2_AN_SEG_NUM = 38,
XAUI2_HSS_PCS_SEG_NUM = 39,
XFI2_AN_SEG_NUM = 40,
XFI2_TRAIN_SEG_NUM = 41,
XFI2_HSS_PCS_SEG_NUM = 42,
XFI2_HSS_TX_SEG_NUM = 43,
XFI2_HSS_RX_SEG_NUM = 44,
XFI2_HSS_PLL_SEG_NUM = 45,
SEM_REGS_SEG_NUM = 50
};
/* Probe dump constants */
/* 64 probes, 8 bytes per probe + 4 bytes to list the probe ID */
#define PROBE_DATA_LENGTH_WORDS ((64*2) + 1)
#define NUMBER_OF_PROBES 34
#define NUMBER_ROUTING_REG_ENTRIES 48
#define WORDS_PER_ROUTING_REG_ENTRY 4
#define MAC_PROTOCOL_REGISTER_WORDS ((512 * 3) + (32 * 2) + (4096 * 1) + \
(4096 * 1) + (4 * 2) + (8 * 2) + (16 * 1) + (4 * 1) + (4 * 4) + (4 * 1))
/* Save both the address and data register */
#define WORDS_PER_MAC_PROT_ENTRY 2
#define MAX_SEMAPHORE_FUNCTIONS 5
#define WQC_WORD_SIZE 6
#define NUMBER_OF_WQCS 128
#define CQC_WORD_SIZE 13
#define NUMBER_OF_CQCS 128
#define MPI_READ 0x00000000
#define REG_BLOCK 0x00020000
#define TEST_LOGIC_FUNC_PORT_CONFIG 0x1002
#define NIC1_FUNCTION_ENABLE 0x00000001
#define NIC1_FUNCTION_MASK 0x0000000e
#define NIC1_FUNCTION_SHIFT 1
#define NIC2_FUNCTION_ENABLE 0x00000010
#define NIC2_FUNCTION_MASK 0x000000e0
#define NIC2_FUNCTION_SHIFT 5
#define FC1_FUNCTION_ENABLE 0x00000100
#define FC1_FUNCTION_MASK 0x00000e00
#define FC1_FUNCTION_SHIFT 9
#define FC2_FUNCTION_ENABLE 0x00001000
#define FC2_FUNCTION_MASK 0x0000e000
#define FC2_FUNCTION_SHIFT 13
#define FUNCTION_SHIFT 6
#define XFI1_POWERED_UP 0x00000005
#define XFI2_POWERED_UP 0x0000000A
#define XAUI_POWERED_DOWN 0x00000001
#define RISC_124 0x0003007c
#define RISC_127 0x0003007f
#define SHADOW_OFFSET 0xb0000000
#define SYS_CLOCK (0x00)
#define PCI_CLOCK (0x80)
#define FC_CLOCK (0x140)
#define XGM_CLOCK (0x180)
#define ADDRESS_REGISTER_ENABLE 0x00010000
#define UP 0x00008000
#define MAX_MUX 0x40
#define MAX_MODULES 0x1F
#define RS_AND_ADR 0x06000000
#define RS_ONLY 0x04000000
#define NUM_TYPES 10
struct ql_nic_misc {
u32 rx_ring_count;
u32 tx_ring_count;
u32 intr_count;
u32 function;
};
struct ql_mpi_coredump {
/* segment 0 */
struct mpi_coredump_global_header mpi_global_header;
/* segment 1 */
struct mpi_coredump_segment_header core_regs_seg_hdr;
u32 mpi_core_regs[MPI_CORE_REGS_CNT];
u32 mpi_core_sh_regs[MPI_CORE_SH_REGS_CNT];
/* segment 2 */
struct mpi_coredump_segment_header test_logic_regs_seg_hdr;
u32 test_logic_regs[TEST_REGS_CNT];
/* segment 3 */
struct mpi_coredump_segment_header rmii_regs_seg_hdr;
u32 rmii_regs[RMII_REGS_CNT];
/* segment 4 */
struct mpi_coredump_segment_header fcmac1_regs_seg_hdr;
u32 fcmac1_regs[FCMAC_REGS_CNT];
/* segment 5 */
struct mpi_coredump_segment_header fcmac2_regs_seg_hdr;
u32 fcmac2_regs[FCMAC_REGS_CNT];
/* segment 6 */
struct mpi_coredump_segment_header fc1_mbx_regs_seg_hdr;
u32 fc1_mbx_regs[FC_MBX_REGS_CNT];
/* segment 7 */
struct mpi_coredump_segment_header ide_regs_seg_hdr;
u32 ide_regs[IDE_REGS_CNT];
/* segment 8 */
struct mpi_coredump_segment_header nic1_mbx_regs_seg_hdr;
u32 nic1_mbx_regs[NIC_MBX_REGS_CNT];
/* segment 9 */
struct mpi_coredump_segment_header smbus_regs_seg_hdr;
u32 smbus_regs[SMBUS_REGS_CNT];
/* segment 10 */
struct mpi_coredump_segment_header fc2_mbx_regs_seg_hdr;
u32 fc2_mbx_regs[FC_MBX_REGS_CNT];
/* segment 11 */
struct mpi_coredump_segment_header nic2_mbx_regs_seg_hdr;
u32 nic2_mbx_regs[NIC_MBX_REGS_CNT];
/* segment 12 */
struct mpi_coredump_segment_header i2c_regs_seg_hdr;
u32 i2c_regs[I2C_REGS_CNT];
/* segment 13 */
struct mpi_coredump_segment_header memc_regs_seg_hdr;
u32 memc_regs[MEMC_REGS_CNT];
/* segment 14 */
struct mpi_coredump_segment_header pbus_regs_seg_hdr;
u32 pbus_regs[PBUS_REGS_CNT];
/* segment 15 */
struct mpi_coredump_segment_header mde_regs_seg_hdr;
u32 mde_regs[MDE_REGS_CNT];
/* segment 16 */
struct mpi_coredump_segment_header nic_regs_seg_hdr;
u32 nic_regs[64];
/* segment 17 */
struct mpi_coredump_segment_header nic2_regs_seg_hdr;
u32 nic2_regs[64];
/* segment 18 */
struct mpi_coredump_segment_header xgmac1_seg_hdr;
u32 xgmac1[XGMAC_REGISTER_END / 4];
/* segment 19 */
struct mpi_coredump_segment_header xgmac2_seg_hdr;
u32 xgmac2[XGMAC_REGISTER_END / 4];
/* segment 20 */
struct mpi_coredump_segment_header code_ram_seg_hdr;
u32 code_ram[CODE_RAM_CNT];
/* segment 21 */
struct mpi_coredump_segment_header memc_ram_seg_hdr;
u32 memc_ram[MEMC_RAM_CNT];
/* segment 22 */
struct mpi_coredump_segment_header xaui_an_hdr;
u32 serdes_xaui_an[14];
/* segment 23 */
struct mpi_coredump_segment_header xaui_hss_pcs_hdr;
u32 serdes_xaui_hss_pcs[33];
/* segment 24 */
struct mpi_coredump_segment_header xfi_an_hdr;
u32 serdes_xfi_an[14];
/* segment 25 */
struct mpi_coredump_segment_header xfi_train_hdr;
u32 serdes_xfi_train[12];
/* segment 26 */
struct mpi_coredump_segment_header xfi_hss_pcs_hdr;
u32 serdes_xfi_hss_pcs[15];
/* segment 27 */
struct mpi_coredump_segment_header xfi_hss_tx_hdr;
u32 serdes_xfi_hss_tx[32];
/* segment 28 */
struct mpi_coredump_segment_header xfi_hss_rx_hdr;
u32 serdes_xfi_hss_rx[32];
/* segment 29 */
struct mpi_coredump_segment_header xfi_hss_pll_hdr;
u32 serdes_xfi_hss_pll[32];
/* segment 30 */
struct mpi_coredump_segment_header misc_nic_seg_hdr;
struct ql_nic_misc misc_nic_info;
/* segment 31 */
/* one interrupt state for each CQ */
struct mpi_coredump_segment_header intr_states_seg_hdr;
u32 intr_states[MAX_RX_RINGS];
/* segment 32 */
/* 3 cam words each for 16 unicast,
* 2 cam words for each of 32 multicast.
*/
struct mpi_coredump_segment_header cam_entries_seg_hdr;
u32 cam_entries[(16 * 3) + (32 * 3)];
/* segment 33 */
struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
u32 nic_routing_words[16];
/* segment 34 */
struct mpi_coredump_segment_header ets_seg_hdr;
u32 ets[8+2];
/* segment 35 */
struct mpi_coredump_segment_header probe_dump_seg_hdr;
u32 probe_dump[PROBE_DATA_LENGTH_WORDS * NUMBER_OF_PROBES];
/* segment 36 */
struct mpi_coredump_segment_header routing_reg_seg_hdr;
u32 routing_regs[NUMBER_ROUTING_REG_ENTRIES *
WORDS_PER_ROUTING_REG_ENTRY];
/* segment 37 */
struct mpi_coredump_segment_header mac_prot_reg_seg_hdr;
u32 mac_prot_regs[MAC_PROTOCOL_REGISTER_WORDS *
WORDS_PER_MAC_PROT_ENTRY];
/* segment 38 */
struct mpi_coredump_segment_header xaui2_an_hdr;
u32 serdes2_xaui_an[14];
/* segment 39 */
struct mpi_coredump_segment_header xaui2_hss_pcs_hdr;
u32 serdes2_xaui_hss_pcs[33];
/* segment 40 */
struct mpi_coredump_segment_header xfi2_an_hdr;
u32 serdes2_xfi_an[14];
/* segment 41 */
struct mpi_coredump_segment_header xfi2_train_hdr;
u32 serdes2_xfi_train[12];
/* segment 42 */
struct mpi_coredump_segment_header xfi2_hss_pcs_hdr;
u32 serdes2_xfi_hss_pcs[15];
/* segment 43 */
struct mpi_coredump_segment_header xfi2_hss_tx_hdr;
u32 serdes2_xfi_hss_tx[32];
/* segment 44 */
struct mpi_coredump_segment_header xfi2_hss_rx_hdr;
u32 serdes2_xfi_hss_rx[32];
/* segment 45 */
struct mpi_coredump_segment_header xfi2_hss_pll_hdr;
u32 serdes2_xfi_hss_pll[32];
/* segment 50 */
/* semaphore register for all 5 functions */
struct mpi_coredump_segment_header sem_regs_seg_hdr;
u32 sem_regs[MAX_SEMAPHORE_FUNCTIONS];
struct mpi_coredump_segment_header wqc1_seg_hdr;
u32 wqc1[WQC_WORD_SIZE * NUMBER_OF_WQCS];
struct mpi_coredump_segment_header cqc1_seg_hdr;
u32 cqc1[CQC_WORD_SIZE * NUMBER_OF_CQCS];
struct mpi_coredump_segment_header wqc2_seg_hdr;
u32 wqc2[WQC_WORD_SIZE * NUMBER_OF_WQCS];
struct mpi_coredump_segment_header cqc2_seg_hdr;
u32 cqc2[CQC_WORD_SIZE * NUMBER_OF_CQCS];
};
/*
* intr_context structure is used during initialization
* to hook the interrupts. It is also used in a single
* irq environment as a context to the ISR.
*/
struct intr_context {
struct ql_adapter *qdev;
u32 intr;
u32 hooked;
u32 intr_en_mask; /* value/mask used to enable this intr */
u32 intr_dis_mask; /* value/mask used to disable this intr */
u32 intr_read_mask; /* value/mask used to read this intr */
char name[IFNAMSIZ * 2];
atomic_t irq_cnt; /* irq_cnt is used in single vector
* environment. It's incremented for each
* irq handler that is scheduled. When each
* handler finishes it decrements irq_cnt and
* enables interrupts if it's zero. */
irqreturn_t (*handler) (int, void *, struct pt_regs *);
};
/* adapter flags definitions. */
enum {
QL_ADAPTER_UP = 0, /* Adapter has been brought up. */
QL_EEH_FATAL = 1,
QL_LEGACY_ENABLED = 2,
QL_MSI_ENABLED = 3,
QL_MSIX_ENABLED = 4,
QL_DMA64 = 5,
QL_PROMISCUOUS = 6,
QL_ALLMULTI = 7,
QL_PORT_CFG = 8,
QL_CAM_RT_SET = 9,
QL_TESTING = 10,
QL_IN_FW_RST = 11,
QL_SPOOL_LOG = 12,
QL_LINK_UP = 13,
};
/* link_status bit definitions */
enum {
STS_LOOPBACK_MASK = 0x00000700,
STS_LOOPBACK_PCS = 0x00000100,
STS_LOOPBACK_HSS = 0x00000200,
STS_LOOPBACK_EXT = 0x00000300,
STS_PAUSE_MASK = 0x000000c0,
STS_PAUSE_STD = 0x00000040,
STS_PAUSE_PRI = 0x00000080,
STS_SPEED_MASK = 0x00000038,
STS_SPEED_100Mb = 0x00000000,
STS_SPEED_1Gb = 0x00000008,
STS_SPEED_10Gb = 0x00000010,
STS_LINK_TYPE_MASK = 0x00000007,
STS_LINK_TYPE_XFI = 0x00000001,
STS_LINK_TYPE_XAUI = 0x00000002,
STS_LINK_TYPE_XFI_BP = 0x00000003,
STS_LINK_TYPE_XAUI_BP = 0x00000004,
STS_LINK_TYPE_10GBASET = 0x00000005,
};
/* link_config bit definitions */
enum {
CFG_JUMBO_FRAME_SIZE = 0x00010000,
CFG_PAUSE_MASK = 0x00000060,
CFG_PAUSE_STD = 0x00000020,
CFG_PAUSE_PRI = 0x00000040,
CFG_DCBX = 0x00000010,
CFG_LOOPBACK_MASK = 0x00000007,
CFG_LOOPBACK_PCS = 0x00000002,
CFG_LOOPBACK_HSS = 0x00000004,
CFG_LOOPBACK_EXT = 0x00000006,
CFG_DEFAULT_MAX_FRAME_SIZE = 0x00002580,
};
/* LED blink bit definitions */
#define QL_LED_BLINK 0x03e803e8
struct nic_operations {
int (*get_flash) (struct ql_adapter *);
int (*port_initialize) (struct ql_adapter *);
};
/*
* The main Adapter structure definition.
* This structure has all fields relevant to the hardware.
*/
struct ql_adapter {
struct ricb ricb;
unsigned long flags;
u32 wol;
struct nic_stats nic_stats;
struct vlan_group *vlgrp;
/* PCI Configuration information for this device */
struct pci_dev *pdev;
struct net_device *ndev; /* Parent NET device */
/* Hardware information */
u32 chip_rev_id;
u32 fw_rev_id;
u32 func; /* PCI function for this adapter */
u32 alt_func; /* PCI function for alternate adapter */
u32 port; /* Port number this adapter */
spinlock_t hw_lock;
spinlock_t stats_lock;
/* PCI Bus Relative Register Addresses */
void __iomem *reg_base;
void __iomem *doorbell_area;
u32 doorbell_area_size;
u32 msg_enable;
/* Page for Shadow Registers */
void *rx_ring_shadow_reg_area;
dma_addr_t rx_ring_shadow_reg_dma;
void *tx_ring_shadow_reg_area;
dma_addr_t tx_ring_shadow_reg_dma;
u32 mailbox_in;
u32 mailbox_out;
struct mbox_params idc_mbc;
struct mutex mpi_mutex;
int tx_ring_size;
int rx_ring_size;
u32 intr_count;
struct msix_entry *msi_x_entry;
struct intr_context intr_context[MAX_RX_RINGS];
int tx_ring_count; /* One per online CPU. */
u32 rss_ring_count; /* One per online CPU. */
/*
* rx_ring_count =
* (CPU count * outbound completion rx_ring) +
* (CPU count * inbound (RSS) completion rx_ring)
*/
int rx_ring_count;
int ring_mem_size;
void *ring_mem;
struct rx_ring rx_ring[MAX_RX_RINGS];
struct tx_ring tx_ring[MAX_TX_RINGS];
unsigned int lbq_buf_order;
int rx_csum;
u16 rx_coalesce_usecs; /* cqicb->int_delay */
u16 rx_max_coalesced_frames; /* cqicb->pkt_int_delay */
u16 tx_coalesce_usecs; /* cqicb->int_delay */
u16 tx_max_coalesced_frames; /* cqicb->pkt_int_delay */
u32 xg_sem_mask;
u32 port_link_up;
u32 port_init;
u32 link_status;
struct ql_mpi_coredump *mpi_coredump;
u32 core_is_dumped;
u32 link_config;
u32 led_config;
u32 max_frame_size;
union flash_params flash;
struct net_device_stats stats;
struct workqueue_struct *workqueue;
struct delayed_work asic_reset_work;
struct delayed_work mpi_reset_work;
struct delayed_work mpi_work;
struct delayed_work mpi_port_cfg_work;
struct delayed_work mpi_idc_work;
struct delayed_work mpi_core_to_log;
struct delayed_work link_work;
struct completion ide_completion;
struct nic_operations *nic_ops;
u16 device_id;
struct timer_list eeh_timer;
uint32_t *config_space;
/* Saving mac addr */
char current_mac_addr[6];
spinlock_t tx_lock;
u32 queue_stopped; /* Bitfield of queues that are full. */
};
/*
* Typical Register accessor for memory mapped device.
*/
static inline u32 ql_read32(const struct ql_adapter *qdev, int reg)
{
return readl(qdev->reg_base + reg);
}
/*
* Typical Register accessor for memory mapped device.
*/
static inline void ql_write32(const struct ql_adapter *qdev, int reg, u32 val)
{
writel(val, qdev->reg_base + reg);
}
/*
* Doorbell Registers:
* Doorbell registers are virtual registers in the PCI memory space.
* The space is allocated by the chip during PCI initialization. The
* device driver finds the doorbell address in BAR 3 in PCI config space.
* The registers are used to control outbound and inbound queues. For
* example, the producer index for an outbound queue. Each queue uses
* 1 4k chunk of memory. The lower half of the space is for outbound
* queues. The upper half is for inbound queues.
*/
static inline void ql_write_db_reg(u32 val, void __iomem *addr)
{
writel(val, addr);
mmiowb();
}
/*
* Shadow Registers:
* Outbound queues have a consumer index that is maintained by the chip.
* Inbound queues have a producer index that is maintained by the chip.
* For lower overhead, these registers are "shadowed" to host memory
* which allows the device driver to track the queue progress without
* PCI reads. When an entry is placed on an inbound queue, the chip will
* update the relevant index register and then copy the value to the
* shadow register in host memory.
*/
static inline u32 ql_read_sh_reg(__le32 *addr)
{
u32 reg;
reg = le32_to_cpu(*addr);
rmb();
return reg;
}
extern char qlge_driver_name[];
extern const char qlge_driver_version[];
extern struct ethtool_ops qlge_ethtool_ops;
extern int ql_sem_spinlock(struct ql_adapter *qdev, u32 sem_mask);
extern void ql_sem_unlock(struct ql_adapter *qdev, u32 sem_mask);
extern int ql_read_xgmac_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
extern int ql_get_mac_addr_reg(struct ql_adapter *qdev, u32 type, u16 index,
u32 *value);
extern int ql_get_routing_reg(struct ql_adapter *qdev, u32 index, u32 *value);
extern int ql_write_cfg(struct ql_adapter *qdev, void *ptr, int size, u32 bit,
u16 q_id);
void ql_queue_fw_error(struct ql_adapter *qdev);
void ql_mpi_work(void *data);
void ql_mpi_reset_work(void *data);
void ql_mpi_idc_work(void *data);
void ql_mpi_core_to_log(void *data);
void ql_mpi_port_cfg_work(void *data);
int ql_wait_reg_rdy(struct ql_adapter *qdev, u32 reg, u32 bit, u32 ebit);
void ql_queue_asic_error(struct ql_adapter *qdev);
u32 ql_enable_completion_interrupt(struct ql_adapter *qdev, u32 intr);
void ql_set_ethtool_ops(struct net_device *ndev);
int ql_read_xgmac_reg64(struct ql_adapter *qdev, u32 reg, u64 *data);
int ql_cam_route_initialize(struct ql_adapter *qdev);
void ql_link_on(struct ql_adapter *qdev);
void ql_link_off(struct ql_adapter *qdev);
int ql_mb_get_fw_state(struct ql_adapter *qdev);
int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
int ql_write_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 data);
int ql_unpause_mpi_risc(struct ql_adapter *qdev);
int ql_pause_mpi_risc(struct ql_adapter *qdev);
int ql_hard_reset_mpi_risc(struct ql_adapter *qdev);
int ql_dump_risc_ram_area(struct ql_adapter *qdev, void *buf,
u32 ram_addr, int word_count);
int ql_core_dump(struct ql_adapter *qdev,
struct ql_mpi_coredump *mpi_coredump);
void ql_get_dump(struct ql_adapter *qdev, void *buff);
int ql_mb_about_fw(struct ql_adapter *qdev);
int ql_wol(struct ql_adapter *qdev);
int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
int ql_mb_get_led_cfg(struct ql_adapter *qdev);
int ql_mb_get_port_cfg(struct ql_adapter *qdev);
int ql_mb_set_port_cfg(struct ql_adapter *qdev);
int qlge_send(struct sk_buff *skb, struct net_device *ndev);
void ql_check_receive_frame(struct sk_buff *skb);
int ql_own_firmware(struct ql_adapter *qdev);
int ql_wait_fifo_empty(struct ql_adapter *);
int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *, u32);
void qlge_set_multicast_list(struct net_device *ndev);
#if 1
#define QL_ALL_DUMP
#define QL_REG_DUMP
#define QL_DEV_DUMP
#define QL_CB_DUMP
#endif
#ifdef QL_REG_DUMP
extern void ql_dump_xgmac_control_regs(struct ql_adapter *qdev);
extern void ql_dump_routing_entries(struct ql_adapter *qdev);
extern void ql_dump_regs(struct ql_adapter *qdev);
#define QL_DUMP_REGS(qdev) ql_dump_regs(qdev)
#define QL_DUMP_ROUTE(qdev) ql_dump_routing_entries(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev) ql_dump_xgmac_control_regs(qdev)
#else
#define QL_DUMP_REGS(qdev)
#define QL_DUMP_ROUTE(qdev)
#define QL_DUMP_XGMAC_CONTROL_REGS(qdev)
#endif
#ifdef QL_STAT_DUMP
extern void ql_dump_stat(struct ql_adapter *qdev);
#define QL_DUMP_STAT(qdev) ql_dump_stat(qdev)
#else
#define QL_DUMP_STAT(qdev)
#endif
#ifdef QL_DEV_DUMP
extern void ql_dump_qdev(struct ql_adapter *qdev);
#define QL_DUMP_QDEV(qdev) ql_dump_qdev(qdev)
#else
#define QL_DUMP_QDEV(qdev)
#endif
#ifdef QL_CB_DUMP
extern void ql_dump_wqicb(struct wqicb *wqicb);
extern void ql_dump_tx_ring(struct tx_ring *tx_ring);
extern void ql_dump_ricb(struct ricb *ricb);
extern void ql_dump_cqicb(struct cqicb *cqicb);
extern void ql_dump_rx_ring(struct rx_ring *rx_ring);
extern void ql_dump_hw_cb(struct ql_adapter *qdev, int size, u32 bit, u16 q_id);
#define QL_DUMP_RICB(ricb) ql_dump_ricb(ricb)
#define QL_DUMP_WQICB(wqicb) ql_dump_wqicb(wqicb)
#define QL_DUMP_TX_RING(tx_ring) ql_dump_tx_ring(tx_ring)
#define QL_DUMP_CQICB(cqicb) ql_dump_cqicb(cqicb)
#define QL_DUMP_RX_RING(rx_ring) ql_dump_rx_ring(rx_ring)
#define QL_DUMP_HW_CB(qdev, size, bit, q_id) \
ql_dump_hw_cb(qdev, size, bit, q_id)
#else
#define QL_DUMP_RICB(ricb)
#define QL_DUMP_WQICB(wqicb)
#define QL_DUMP_TX_RING(tx_ring)
#define QL_DUMP_CQICB(cqicb)
#define QL_DUMP_RX_RING(rx_ring)
#define QL_DUMP_HW_CB(qdev, size, bit, q_id)
#endif
#ifdef QL_OB_DUMP
extern void ql_dump_tx_desc(struct tx_buf_desc *tbd);
extern void ql_dump_ob_mac_iocb(struct ob_mac_iocb_req *ob_mac_iocb);
extern void ql_dump_ob_mac_rsp(struct ob_mac_iocb_rsp *ob_mac_rsp);
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb) ql_dump_ob_mac_iocb(ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp) ql_dump_ob_mac_rsp(ob_mac_rsp)
#else
#define QL_DUMP_OB_MAC_IOCB(ob_mac_iocb)
#define QL_DUMP_OB_MAC_RSP(ob_mac_rsp)
#endif
#ifdef QL_IB_DUMP
extern void ql_dump_ib_mac_rsp(struct ib_mac_iocb_rsp *ib_mac_rsp);
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp) ql_dump_ib_mac_rsp(ib_mac_rsp)
#else
#define QL_DUMP_IB_MAC_RSP(ib_mac_rsp)
#endif
#ifdef QL_ALL_DUMP
extern void ql_dump_all(struct ql_adapter *qdev);
#define QL_DUMP_ALL(qdev) ql_dump_all(qdev)
#else
#define QL_DUMP_ALL(qdev)
#endif
#endif /* _QLGE_H_ */
|
spacex/kernel-centos5
|
drivers/net/qlge/qlge.h
|
C
|
gpl-2.0
| 59,109
|
namespace UFIDA.U9.Cust.XMQX.LaserLabBP.LaserLabScrapBP
{
using System;
using System.Collections.Generic;
using System.Text;
using UFIDA.U9.Cust.XMQX.LaserLabBE.LaserLab;
using UFSoft.UBF.AopFrame;
using UFSoft.UBF.Business;
using UFSoft.UBF.PL;
using UFSoft.UBF.Util.Context;
/// <summary>
/// ReturnLBMaster partial
/// </summary>
public partial class ReturnLBMaster
{
internal BaseStrategy Select()
{
return new ReturnLBMasterImpementStrategy();
}
}
#region implement strategy
/// <summary>
/// Impement Implement
///
/// </summary>
internal partial class ReturnLBMasterImpementStrategy : BaseStrategy
{
public ReturnLBMasterImpementStrategy() { }
public override object Do(object obj)
{
ReturnLBMaster bpObj = (ReturnLBMaster)obj;
var str = "";
if (!string.IsNullOrEmpty(bpObj.LB) && bpObj.CP=="Scrap")
{
using (ISession session = Session.Open())
{
LaserLab LaserLab = LaserLab.Finder.Find("LB=@LB and Cp=@Cp", new OqlParam[] { new OqlParam(bpObj.LB), new OqlParam(4) });
LaserLab.Cp = LBEnum.Master;
session.Commit();
str = "True";
}
}
else
{
str = "Flase";
}
return str;
}
}
#endregion
}
|
amazingbow/yonyou
|
侨兴/QiaoXing_Code/LaserLabBP/BpImplement/LaserLabScrapBP/ReturnLBMasterExtend.cs
|
C#
|
gpl-2.0
| 1,519
|
/* Empty. Add your own CSS if you like */
.Gmap{
height: 300px;
border: 1px solid;
}
.descriptAcc{
height: 200px;
margin-top: 20px;
}
.Gmap-Acc{
height: 300px;
border: 1px solid;
}
.comment{
margin-top: 5px;
height: 100px;
}
.main{
background-image: url('../img/fondo.jpg');
}
.personalData{
margin-top: 50px;
}
.commentsAndLikes{
margin-top: 50px;
}
.accomodation{
margin-top: 50px;
}
.titulo{
font-weight: bold !important;
margin-top: 20px;
width: 137px;
margin: auto !important;
}
.subTitulo{
margin: 30px auto auto !important;
width: 264px;
}
.btnSearch{
margin-top: 30px;
}
.titles{
margin-top: 30px;
}
.validation{
color: #FF0000;
margin-left: 15px;
}
.title{
margin-left: 16px;
margin-top: 20px;
width: 228px;
}
.file_name{
margin-top: 30px;
}
.file_country{
margin-top: 30px;
}
.input_phone{
width: 110px !important;
float: left !important;
}
.item_Profile{
margin-top: 30px;
}
.subItem{
width: 171px;
margin: auto;
}
.description_label{
padding-left: 10px;
}
.subdiv{
margin-top: 20px;
}
.footResult{
background: #3A3A3A;
}
|
taddeiPablo/Accomodation_App
|
Accomodation_app/www/css/style.css
|
CSS
|
gpl-2.0
| 1,091
|
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#define pr_fmt(fmt) "%s: " fmt, __func__
#include <linux/dma-mapping.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/major.h>
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/delay.h>
#include <linux/msm_mdp.h>
#include <linux/memblock.h>
#include <linux/sort.h>
#include <linux/sw_sync.h>
#include <linux/msm_iommu_domains.h>
#include <soc/qcom/event_timer.h>
#include <mach/msm_bus.h>
#include "mdss.h"
#include "mdss_debug.h"
#include "mdss_fb.h"
#include "mdss_mdp.h"
#include "mdss_mdp_rotator.h"
#include "mdss_quickdraw.h"
#define VSYNC_PERIOD 16
#define BORDERFILL_NDX 0x0BF000BF
#define CHECK_BOUNDS(offset, size, max_size) \
(((size) > (max_size)) || ((offset) > ((max_size) - (size))))
#define IS_RIGHT_MIXER_OV(flags, dst_x, left_lm_w) \
((flags & MDSS_MDP_RIGHT_MIXER) || (dst_x >= left_lm_w))
#define PP_CLK_CFG_OFF 0
#define PP_CLK_CFG_ON 1
#define OVERLAY_MAX 10
static atomic_t ov_active_panels = ATOMIC_INIT(0);
static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd);
static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd);
static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd);
static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd);
static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val);
static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd);
static inline bool is_ov_right_blend(struct mdp_rect *left_blend,
struct mdp_rect *right_blend, u32 left_lm_w)
{
return (((left_blend->x + left_blend->w) == right_blend->x) &&
((left_blend->x + left_blend->w) != left_lm_w) &&
(left_blend->x != right_blend->x) &&
(left_blend->y == right_blend->y) &&
(left_blend->h == right_blend->h));
}
static inline u32 left_lm_w_from_mfd(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
return ctl->mixer_left->width;
}
/**
* __is_more_decimation_doable() -
* @pipe: pointer to pipe data structure
*
* if per pipe BW exceeds the limit and user
* has not requested decimation then return
* -E2BIG error back to user else try more
* decimation based on following table config.
*
* ----------------------------------------------------------
* error | split mode | src_split | v_deci | action |
* ------|------------|-----------|--------|----------------|
* | | | 00 | return error |
* | | enabled |--------|----------------|
* | | | >1 | more decmation |
* | yes |-----------|--------|----------------|
* | | | 00 | return error |
* | | disabled |--------|----------------|
* | | | >1 | return error |
* E2BIG |------------|-----------|--------|----------------|
* | | | 00 | return error |
* | | enabled |--------|----------------|
* | | | >1 | more decmation |
* | no |-----------|--------|----------------|
* | | | 00 | return error |
* | | disabled |--------|----------------|
* | | | >1 | more decmation |
* ----------------------------------------------------------
*/
static inline bool __is_more_decimation_doable(struct mdss_mdp_pipe *pipe)
{
struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
struct msm_fb_data_type *mfd = pipe->mixer_left->ctl->mfd;
if (!mfd->split_display && !pipe->vert_deci)
return false;
else if (mfd->split_display && (!mdata->has_src_split ||
(mdata->has_src_split && !pipe->vert_deci)))
return false;
else
return true;
}
static struct mdss_mdp_pipe *__overlay_find_pipe(
struct msm_fb_data_type *mfd, u32 ndx)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_pipe *tmp, *pipe = NULL;
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry(tmp, &mdp5_data->pipes_used, list) {
if (tmp->ndx == ndx) {
pipe = tmp;
break;
}
}
mutex_unlock(&mdp5_data->list_lock);
return pipe;
}
static int mdss_mdp_overlay_get(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
{
struct mdss_mdp_pipe *pipe;
pipe = __overlay_find_pipe(mfd, req->id);
if (!pipe) {
pr_err("invalid pipe ndx=%x\n", req->id);
return pipe ? PTR_ERR(pipe) : -ENODEV;
}
*req = pipe->req_data;
return 0;
}
static int mdss_mdp_ov_xres_check(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
{
u32 xres = 0;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w)) {
if (mdata->has_src_split) {
xres = left_lm_w;
if (req->flags & MDSS_MDP_RIGHT_MIXER) {
pr_warn("invalid use of RIGHT_MIXER flag.\n");
/*
* if chip-set is capable of source split then
* all layers which are only on right LM should
* have their x offset relative to left LM's
* left-top or in other words relative to
* panel width.
* By modifying dst_x below, we are assuming
* that client is running in legacy mode
* chipset capable of source split.
*/
if (req->dst_rect.x < left_lm_w)
req->dst_rect.x += left_lm_w;
req->flags &= ~MDSS_MDP_RIGHT_MIXER;
}
} else if (req->dst_rect.x >= left_lm_w) {
/*
* this is a step towards removing a reliance on
* MDSS_MDP_RIGHT_MIXER flags. With the new src split
* code, some clients of non-src-split chipsets have
* stopped sending MDSS_MDP_RIGHT_MIXER flag and
* modified their xres relative to full panel
* dimensions. In such cases, we need to deduct left
* layer mixer width before we programm this HW.
*/
req->dst_rect.x -= left_lm_w;
req->flags |= MDSS_MDP_RIGHT_MIXER;
}
if (ctl->mixer_right) {
xres += ctl->mixer_right->width;
} else {
pr_err("ov cannot be placed on right mixer\n");
return -EPERM;
}
} else {
if (ctl->mixer_left) {
xres = ctl->mixer_left->width;
} else {
pr_err("ov cannot be placed on left mixer\n");
return -EPERM;
}
if (mdata->has_src_split && ctl->mixer_right)
xres += ctl->mixer_right->width;
}
if (CHECK_BOUNDS(req->dst_rect.x, req->dst_rect.w, xres)) {
pr_err("dst_xres is invalid. dst_x:%d, dst_w:%d, xres:%d\n",
req->dst_rect.x, req->dst_rect.w, xres);
return -EOVERFLOW;
}
return 0;
}
int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
struct mdp_overlay *req,
struct mdss_mdp_format_params *fmt)
{
u32 yres;
u32 min_src_size, min_dst_size;
int content_secure;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
yres = mfd->fbi->var.yres;
content_secure = (req->flags & MDP_SECURE_OVERLAY_SESSION);
if (!ctl->is_secure && content_secure &&
(mfd->panel.type == WRITEBACK_PANEL)) {
pr_debug("return due to security concerns\n");
return -EPERM;
}
if (mdata->mdp_rev >= MDSS_MDP_HW_REV_102) {
min_src_size = fmt->is_yuv ? 2 : 1;
min_dst_size = 1;
} else {
min_src_size = fmt->is_yuv ? 10 : 5;
min_dst_size = 2;
}
if (req->z_order >= MDSS_MDP_MAX_STAGE) {
pr_err("zorder %d out of range\n", req->z_order);
return -ERANGE;
}
if (req->src.width > MAX_IMG_WIDTH ||
req->src.height > MAX_IMG_HEIGHT ||
req->src_rect.w < min_src_size || req->src_rect.h < min_src_size ||
CHECK_BOUNDS(req->src_rect.x, req->src_rect.w, req->src.width) ||
CHECK_BOUNDS(req->src_rect.y, req->src_rect.h, req->src.height)) {
pr_err("invalid source image img wh=%dx%d rect=%d,%d,%d,%d\n",
req->src.width, req->src.height,
req->src_rect.x, req->src_rect.y,
req->src_rect.w, req->src_rect.h);
return -EOVERFLOW;
}
if (req->dst_rect.w < min_dst_size || req->dst_rect.h < min_dst_size) {
pr_err("invalid destination resolution (%dx%d)",
req->dst_rect.w, req->dst_rect.h);
return -EOVERFLOW;
}
if (req->horz_deci || req->vert_deci) {
if (!mdata->has_decimation) {
pr_err("No Decimation in MDP V=%x\n", mdata->mdp_rev);
return -EINVAL;
} else if ((req->horz_deci > MAX_DECIMATION) ||
(req->vert_deci > MAX_DECIMATION)) {
pr_err("Invalid decimation factors horz=%d vert=%d\n",
req->horz_deci, req->vert_deci);
return -EINVAL;
} else if (req->flags & MDP_BWC_EN) {
pr_err("Decimation can't be enabled with BWC\n");
return -EINVAL;
} else if (fmt->tile) {
pr_err("Decimation can't be enabled with MacroTile format\n");
return -EINVAL;
}
}
if (!(req->flags & MDSS_MDP_ROT_ONLY)) {
u32 src_w, src_h, dst_w, dst_h;
if (CHECK_BOUNDS(req->dst_rect.y, req->dst_rect.h, yres)) {
pr_err("invalid vertical destination: y=%d, h=%d\n",
req->dst_rect.y, req->dst_rect.h);
return -EOVERFLOW;
}
if (req->flags & MDP_ROT_90) {
dst_h = req->dst_rect.w;
dst_w = req->dst_rect.h;
} else {
dst_w = req->dst_rect.w;
dst_h = req->dst_rect.h;
}
src_w = req->src_rect.w >> req->horz_deci;
src_h = req->src_rect.h >> req->vert_deci;
if (src_w > MAX_MIXER_WIDTH) {
pr_err("invalid source width=%d HDec=%d\n",
req->src_rect.w, req->horz_deci);
return -EINVAL;
}
if ((src_w * MAX_UPSCALE_RATIO) < dst_w) {
pr_err("too much upscaling Width %d->%d\n",
req->src_rect.w, req->dst_rect.w);
return -EINVAL;
}
if ((src_h * MAX_UPSCALE_RATIO) < dst_h) {
pr_err("too much upscaling. Height %d->%d\n",
req->src_rect.h, req->dst_rect.h);
return -EINVAL;
}
if (src_w > (dst_w * MAX_DOWNSCALE_RATIO)) {
pr_err("too much downscaling. Width %d->%d H Dec=%d\n",
src_w, req->dst_rect.w, req->horz_deci);
return -EINVAL;
}
if (src_h > (dst_h * MAX_DOWNSCALE_RATIO)) {
pr_err("too much downscaling. Height %d->%d V Dec=%d\n",
src_h, req->dst_rect.h, req->vert_deci);
return -EINVAL;
}
if (req->flags & MDP_BWC_EN) {
if ((req->src.width != req->src_rect.w) ||
(req->src.height != req->src_rect.h)) {
pr_err("BWC: unequal src img and rect w,h\n");
return -EINVAL;
}
if (req->flags & MDP_DECIMATION_EN) {
pr_err("Can't enable BWC decode && decimate\n");
return -EINVAL;
}
}
if (req->flags & MDP_DEINTERLACE) {
if (req->flags & MDP_SOURCE_ROTATED_90) {
if ((req->src_rect.w % 4) != 0) {
pr_err("interlaced rect not h/4\n");
return -EINVAL;
}
} else if ((req->src_rect.h % 4) != 0) {
pr_err("interlaced rect not h/4\n");
return -EINVAL;
}
}
} else {
if (req->flags & MDP_DEINTERLACE) {
if ((req->src_rect.h % 4) != 0) {
pr_err("interlaced rect h not multiple of 4\n");
return -EINVAL;
}
}
}
if (fmt->is_yuv) {
if ((req->src_rect.x & 0x1) || (req->src_rect.y & 0x1) ||
(req->src_rect.w & 0x1) || (req->src_rect.h & 0x1)) {
pr_err("invalid odd src resolution or coordinates\n");
return -EINVAL;
}
}
return 0;
}
static int __mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
bool is_single_layer)
{
struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
struct mdss_mdp_perf_params perf;
int rc;
for (;;) {
rc = mdss_mdp_perf_calc_pipe(pipe, &perf, NULL, true,
is_single_layer);
if (!rc && (perf.mdp_clk_rate <= mdata->max_mdp_clk_rate)) {
rc = mdss_mdp_perf_bw_check_pipe(&perf, pipe);
if (!rc) {
break;
} else if (rc == -E2BIG &&
!__is_more_decimation_doable(pipe)) {
pr_debug("pipe%d exceeded per pipe BW\n",
pipe->num);
return rc;
}
}
/*
* if decimation is available try to reduce minimum clock rate
* requirement by applying vertical decimation and reduce
* mdp clock requirement
*/
if (mdata->has_decimation && (pipe->vert_deci < MAX_DECIMATION)
&& !pipe->bwc_mode && !pipe->src_fmt->tile &&
!pipe->scale.enable_pxl_ext)
pipe->vert_deci++;
else
return -E2BIG;
}
return 0;
}
static int __mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe)
{
u32 src;
int rc;
src = pipe->src.w >> pipe->horz_deci;
if (pipe->scale.enable_pxl_ext)
return 0;
memset(&pipe->scale, 0, sizeof(struct mdp_scale_data));
rc = mdss_mdp_calc_phase_step(src, pipe->dst.w,
&pipe->scale.phase_step_x[0]);
if (rc == -EOVERFLOW) {
/* overflow on horizontal direction is acceptable */
rc = 0;
} else if (rc) {
pr_err("Horizontal scaling calculation failed=%d! %d->%d\n",
rc, src, pipe->dst.w);
return rc;
}
src = pipe->src.h >> pipe->vert_deci;
rc = mdss_mdp_calc_phase_step(src, pipe->dst.h,
&pipe->scale.phase_step_y[0]);
if ((rc == -EOVERFLOW) && (pipe->type == MDSS_MDP_PIPE_TYPE_VIG)) {
/* overflow on Qseed2 scaler is acceptable */
rc = 0;
} else if (rc) {
pr_err("Vertical scaling calculation failed=%d! %d->%d\n",
rc, src, pipe->dst.h);
return rc;
}
return rc;
}
static inline void __mdss_mdp_overlay_set_chroma_sample(
struct mdss_mdp_pipe *pipe)
{
pipe->chroma_sample_v = pipe->chroma_sample_h = 0;
switch (pipe->src_fmt->chroma_sample) {
case MDSS_MDP_CHROMA_H1V2:
pipe->chroma_sample_v = 1;
break;
case MDSS_MDP_CHROMA_H2V1:
pipe->chroma_sample_h = 1;
break;
case MDSS_MDP_CHROMA_420:
pipe->chroma_sample_v = 1;
pipe->chroma_sample_h = 1;
break;
}
if (pipe->horz_deci)
pipe->chroma_sample_h = 0;
if (pipe->vert_deci)
pipe->chroma_sample_v = 0;
}
int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
struct mdp_overlay *req, struct mdss_mdp_pipe **ppipe,
struct mdss_mdp_pipe *left_blend_pipe, bool is_single_layer)
{
struct mdss_mdp_format_params *fmt;
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_mixer *mixer = NULL;
u32 pipe_type, mixer_mux, len;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdp_histogram_start_req hist;
int ret;
u32 bwc_enabled;
u32 rot90;
bool is_vig_needed = false;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
if (mdp5_data->ctl == NULL)
return -ENODEV;
if (req->flags & MDP_ROT_90) {
pr_err("unsupported inline rotation\n");
return -EOPNOTSUPP;
}
if ((req->dst_rect.w > MAX_DST_W) || (req->dst_rect.h > MAX_DST_H)) {
pr_err("exceeded max mixer supported resolution %dx%d\n",
req->dst_rect.w, req->dst_rect.h);
return -EOVERFLOW;
}
if (IS_RIGHT_MIXER_OV(req->flags, req->dst_rect.x, left_lm_w))
mixer_mux = MDSS_MDP_MIXER_MUX_RIGHT;
else
mixer_mux = MDSS_MDP_MIXER_MUX_LEFT;
pr_debug("ctl=%u req id=%x mux=%d z_order=%d flags=0x%x dst_x:%d\n",
mdp5_data->ctl->num, req->id, mixer_mux, req->z_order,
req->flags, req->dst_rect.x);
fmt = mdss_mdp_get_format_params(req->src.format);
if (!fmt) {
pr_err("invalid pipe format %d\n", req->src.format);
return -EINVAL;
}
bwc_enabled = req->flags & MDP_BWC_EN;
rot90 = req->flags & MDP_SOURCE_ROTATED_90;
/*
* Always set yuv rotator output to pseudo planar.
*/
if (bwc_enabled || rot90) {
req->src.format =
mdss_mdp_get_rotator_dst_format(req->src.format, rot90,
bwc_enabled);
fmt = mdss_mdp_get_format_params(req->src.format);
if (!fmt) {
pr_err("invalid pipe format %d\n", req->src.format);
return -EINVAL;
}
}
ret = mdss_mdp_ov_xres_check(mfd, req);
if (ret)
return ret;
ret = mdss_mdp_overlay_req_check(mfd, req, fmt);
if (ret)
return ret;
pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl, mixer_mux,
req->z_order, left_blend_pipe != NULL);
if (pipe && pipe->ndx != req->id) {
pr_debug("replacing pnum=%d at stage=%d mux=%d id:0x%x %s\n",
pipe->num, req->z_order, mixer_mux, req->id,
left_blend_pipe ? "right blend" : "left blend");
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
}
mixer = mdss_mdp_mixer_get(mdp5_data->ctl, mixer_mux);
if (!mixer) {
pr_err("unable to get mixer\n");
return -ENODEV;
}
if ((mdata->has_non_scalar_rgb) &&
((req->src_rect.w != req->dst_rect.w) ||
(req->src_rect.h != req->dst_rect.h)))
is_vig_needed = true;
if (req->id == MSMFB_NEW_REQUEST) {
if (req->flags & MDP_OV_PIPE_FORCE_DMA)
pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
else if (fmt->is_yuv || (req->flags & MDP_OV_PIPE_SHARE) ||
is_vig_needed)
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
else
pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
/* RGB pipes can be used instead of DMA */
if (!pipe && (pipe_type == MDSS_MDP_PIPE_TYPE_DMA)) {
pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
mfd->index, req->flags);
pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
left_blend_pipe);
}
/* VIG pipes can also support RGB format */
if (!pipe && pipe_type == MDSS_MDP_PIPE_TYPE_RGB) {
pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
mfd->index, req->flags);
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
left_blend_pipe);
}
if (pipe == NULL) {
pr_err("error allocating pipe. flags=0x%x\n",
req->flags);
return -ENODEV;
}
ret = mdss_mdp_pipe_map(pipe);
if (ret) {
pr_err("unable to map pipe=%d\n", pipe->num);
return ret;
}
mutex_lock(&mdp5_data->list_lock);
list_add(&pipe->list, &mdp5_data->pipes_used);
mutex_unlock(&mdp5_data->list_lock);
pipe->mixer_left = mixer;
pipe->mfd = mfd;
pipe->pid = current->tgid;
pipe->play_cnt = 0;
} else {
pipe = __overlay_find_pipe(mfd, req->id);
if (!pipe) {
pr_err("invalid pipe ndx=%x\n", req->id);
return -ENODEV;
}
ret = mdss_mdp_pipe_map(pipe);
if (IS_ERR_VALUE(ret)) {
pr_err("Unable to map used pipe%d ndx=%x\n",
pipe->num, pipe->ndx);
return ret;
}
if (is_vig_needed && (pipe->type != MDSS_MDP_PIPE_TYPE_VIG)) {
pr_err("pipe is non-scalar ndx=%x\n", req->id);
ret = -EINVAL;
goto exit_fail;
}
if (pipe->mixer_left != mixer) {
if (!mixer->ctl || (mixer->ctl->mfd != mfd)) {
pr_err("Can't switch mixer %d->%d pnum %d!\n",
pipe->mixer_left->num, mixer->num,
pipe->num);
ret = -EINVAL;
goto exit_fail;
}
pr_debug("switching pipe%d mixer %d->%d stage%d\n",
pipe->num,
pipe->mixer_left ? pipe->mixer_left->num : -1,
mixer->num, req->z_order);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
pipe->mixer_left = mixer;
}
}
if (left_blend_pipe) {
if (pipe->priority <= left_blend_pipe->priority) {
pr_debug("priority limitation. left:%d right%d\n",
left_blend_pipe->priority, pipe->priority);
ret = -EPERM;
goto exit_fail;
} else {
pr_debug("pipe%d is a right_pipe\n", pipe->num);
pipe->is_right_blend = true;
}
} else if (pipe->is_right_blend) {
/*
* pipe used to be right blend need to update mixer
* configuration to remove it as a right blend
*/
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
pipe->is_right_blend = false;
}
pipe->flags = req->flags;
if (bwc_enabled && !mdp5_data->mdata->has_bwc) {
pr_err("BWC is not supported in MDP version %x\n",
mdp5_data->mdata->mdp_rev);
pipe->bwc_mode = 0;
} else {
pipe->bwc_mode = pipe->mixer_left->rotator_mode ?
0 : (bwc_enabled ? 1 : 0) ;
}
pipe->img_width = req->src.width & 0x3fff;
pipe->img_height = req->src.height & 0x3fff;
pipe->src.x = req->src_rect.x;
pipe->src.y = req->src_rect.y;
pipe->src.w = req->src_rect.w;
pipe->src.h = req->src_rect.h;
pipe->dst.x = req->dst_rect.x;
pipe->dst.y = req->dst_rect.y;
pipe->dst.w = req->dst_rect.w;
pipe->dst.h = req->dst_rect.h;
pipe->horz_deci = req->horz_deci;
pipe->vert_deci = req->vert_deci;
/*
* check if overlay span across two mixers and if source split is
* available. If yes, enable src_split_req flag so that during mixer
* staging, same pipe will be stagged on both layer mixers.
*/
if (mdata->has_src_split) {
if ((mixer_mux == MDSS_MDP_MIXER_MUX_LEFT) &&
((req->dst_rect.x + req->dst_rect.w) > mixer->width)) {
if (req->dst_rect.x >= mixer->width) {
pr_err("%pS: err dst_x can't lie in right half",
__builtin_return_address(0));
pr_cont(" flags:0x%x dst x:%d w:%d lm_w:%d\n",
req->flags, req->dst_rect.x,
req->dst_rect.w, mixer->width);
ret = -EINVAL;
goto exit_fail;
} else {
pipe->src_split_req = true;
}
} else {
if (pipe->src_split_req) {
mdss_mdp_mixer_pipe_unstage(pipe,
pipe->mixer_right);
pipe->mixer_right = NULL;
}
pipe->src_split_req = false;
}
}
memcpy(&pipe->scale, &req->scale, sizeof(struct mdp_scale_data));
pipe->src_fmt = fmt;
__mdss_mdp_overlay_set_chroma_sample(pipe);
pipe->mixer_stage = req->z_order;
pipe->is_fg = req->is_fg;
pipe->alpha = req->alpha;
pipe->transp = req->transp_mask;
pipe->blend_op = req->blend_op;
if (pipe->blend_op == BLEND_OP_NOT_DEFINED)
pipe->blend_op = fmt->alpha_enable ?
BLEND_OP_PREMULTIPLIED :
BLEND_OP_OPAQUE;
if (!fmt->alpha_enable && (pipe->blend_op != BLEND_OP_OPAQUE))
pr_debug("Unintended blend_op %d on layer with no alpha plane\n",
pipe->blend_op);
if (fmt->is_yuv && !(pipe->flags & MDP_SOURCE_ROTATED_90) &&
!pipe->scale.enable_pxl_ext) {
pipe->overfetch_disable = OVERFETCH_DISABLE_BOTTOM;
if (!(pipe->flags & MDSS_MDP_DUAL_PIPE) ||
IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w))
pipe->overfetch_disable |= OVERFETCH_DISABLE_RIGHT;
pr_debug("overfetch flags=%x\n", pipe->overfetch_disable);
} else {
pipe->overfetch_disable = 0;
}
pipe->bg_color = req->bg_color;
req->id = pipe->ndx;
req->priority = pipe->priority;
pipe->req_data = *req;
if (pipe->flags & MDP_OVERLAY_PP_CFG_EN) {
memcpy(&pipe->pp_cfg, &req->overlay_pp_cfg,
sizeof(struct mdp_overlay_pp_params));
len = pipe->pp_cfg.igc_cfg.len;
if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_IGC_CFG) &&
(len == IGC_LUT_ENTRIES)) {
ret = copy_from_user(pipe->pp_res.igc_c0_c1,
pipe->pp_cfg.igc_cfg.c0_c1_data,
sizeof(uint32_t) * len);
if (ret) {
ret = -ENOMEM;
goto exit_fail;
}
ret = copy_from_user(pipe->pp_res.igc_c2,
pipe->pp_cfg.igc_cfg.c2_data,
sizeof(uint32_t) * len);
if (ret) {
ret = -ENOMEM;
goto exit_fail;
}
pipe->pp_cfg.igc_cfg.c0_c1_data =
pipe->pp_res.igc_c0_c1;
pipe->pp_cfg.igc_cfg.c2_data = pipe->pp_res.igc_c2;
}
if (pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_CFG) {
if (pipe->pp_cfg.hist_cfg.ops & MDP_PP_OPS_ENABLE) {
hist.block = pipe->pp_cfg.hist_cfg.block;
hist.frame_cnt =
pipe->pp_cfg.hist_cfg.frame_cnt;
hist.bit_mask = pipe->pp_cfg.hist_cfg.bit_mask;
hist.num_bins = pipe->pp_cfg.hist_cfg.num_bins;
mdss_mdp_hist_start(&hist);
} else if (pipe->pp_cfg.hist_cfg.ops &
MDP_PP_OPS_DISABLE) {
mdss_mdp_hist_stop(pipe->pp_cfg.hist_cfg.block);
}
}
len = pipe->pp_cfg.hist_lut_cfg.len;
if ((pipe->pp_cfg.config_ops & MDP_OVERLAY_PP_HIST_LUT_CFG) &&
(len == ENHIST_LUT_ENTRIES)) {
ret = copy_from_user(pipe->pp_res.hist_lut,
pipe->pp_cfg.hist_lut_cfg.data,
sizeof(uint32_t) * len);
if (ret) {
ret = -ENOMEM;
goto exit_fail;
}
pipe->pp_cfg.hist_lut_cfg.data = pipe->pp_res.hist_lut;
}
}
/*
* When scaling is enabled src crop and image
* width and height is modified by user
*/
if ((pipe->flags & MDP_DEINTERLACE)) {
if (pipe->flags & MDP_SOURCE_ROTATED_90) {
pipe->src.x = DIV_ROUND_UP(pipe->src.x, 2);
pipe->src.x &= ~1;
if (!pipe->scale.enable_pxl_ext) {
pipe->src.w /= 2;
pipe->img_width /= 2;
}
} else {
if (!pipe->scale.enable_pxl_ext)
pipe->src.h /= 2;
pipe->src.y = DIV_ROUND_UP(pipe->src.y, 2);
pipe->src.y &= ~1;
}
}
ret = __mdp_pipe_tune_perf(pipe, is_single_layer);
if (ret) {
pr_debug("unable to satisfy performance. ret=%d\n", ret);
goto exit_fail;
}
ret = __mdss_mdp_overlay_setup_scaling(pipe);
if (ret)
goto exit_fail;
if ((mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) &&
(mdp5_data->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))
mdss_mdp_smp_release(pipe);
ret = mdss_mdp_smp_reserve(pipe);
if (ret) {
pr_debug("mdss_mdp_smp_reserve failed. pnum:%d ret=%d\n",
pipe->num, ret);
goto exit_fail;
}
pipe->params_changed++;
pipe->has_buf = 0;
req->vert_deci = pipe->vert_deci;
*ppipe = pipe;
mdss_mdp_pipe_unmap(pipe);
return ret;
exit_fail:
mdss_mdp_pipe_unmap(pipe);
mutex_lock(&mdp5_data->list_lock);
if (pipe->play_cnt == 0) {
pr_debug("failed for pipe %d\n", pipe->num);
if (!list_empty(&pipe->list))
list_del_init(&pipe->list);
mdss_mdp_pipe_destroy(pipe);
}
/* invalidate any overlays in this framebuffer after failure */
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
pr_debug("freeing allocations for pipe %d\n", pipe->num);
mdss_mdp_smp_unreserve(pipe);
pipe->params_changed = 0;
}
mutex_unlock(&mdp5_data->list_lock);
return ret;
}
int mdss_mdp_overlay_set(struct msm_fb_data_type *mfd,
struct mdp_overlay *req)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int ret;
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
return ret;
if (!mfd->panel_power_on) {
mutex_unlock(&mdp5_data->ov_lock);
return -EPERM;
}
if (req->flags & MDSS_MDP_ROT_ONLY) {
ret = mdss_mdp_rotator_setup(mfd, req);
} else if (req->src.format == MDP_RGB_BORDERFILL) {
req->id = BORDERFILL_NDX;
} else {
struct mdss_mdp_pipe *pipe;
/* userspace zorder start with stage 0 */
req->z_order += MDSS_MDP_STAGE_0;
ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe, NULL, false);
req->z_order -= MDSS_MDP_STAGE_0;
}
mutex_unlock(&mdp5_data->ov_lock);
return ret;
}
/**
* __mdss_mdp_overlay_free_list_purge() - clear free list of buffers
* @mfd: Msm frame buffer data structure for the associated fb
*
* Frees memory and clears current list of buffers which are pending free
*/
static void __mdss_mdp_overlay_free_list_purge(struct msm_fb_data_type *mfd)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int i;
pr_debug("purging fb%d free list\n", mfd->index);
for (i = 0; i < mdp5_data->free_list_size; i++)
mdss_mdp_data_free(&mdp5_data->free_list[i]);
mdp5_data->free_list_size = 0;
}
/**
* __mdss_mdp_overlay_free_list_add() - add a buffer to free list
* @mfd: Msm frame buffer data structure for the associated fb
*/
static void __mdss_mdp_overlay_free_list_add(struct msm_fb_data_type *mfd,
struct mdss_mdp_data *buf)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int i;
/* if holding too many buffers free current list */
if (mdp5_data->free_list_size >= MAX_FREE_LIST_SIZE) {
pr_warn("max free list size for fb%d, purging\n", mfd->index);
__mdss_mdp_overlay_free_list_purge(mfd);
}
BUG_ON(mdp5_data->free_list_size >= MAX_FREE_LIST_SIZE);
i = mdp5_data->free_list_size++;
mdp5_data->free_list[i] = *buf;
memset(buf, 0, sizeof(*buf));
}
void mdss_mdp_overlay_cleanup(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe, *tmp;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
bool recovery_mode = false;
LIST_HEAD(destroy_pipes);
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_cleanup, list) {
list_move(&pipe->list, &destroy_pipes);
/* make sure pipe fetch has been halted before freeing buffer */
if (mdss_mdp_pipe_fetch_halt(pipe)) {
/*
* if pipe is not able to halt. Enter recovery mode,
* by un-staging any pipes that are attached to mixer
* so that any freed pipes that are not able to halt
* can be staged in solid fill mode and be reset
* with next vsync
*/
if (!recovery_mode) {
recovery_mode = true;
mdss_mdp_mixer_unstage_all(ctl->mixer_left);
mdss_mdp_mixer_unstage_all(ctl->mixer_right);
}
pipe->params_changed++;
mdss_mdp_pipe_queue_data(pipe, NULL);
}
}
if (recovery_mode) {
pr_warn("performing recovery sequence for fb%d\n", mfd->index);
__overlay_kickoff_requeue(mfd);
}
__mdss_mdp_overlay_free_list_purge(mfd);
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
if (pipe->back_buf.num_planes) {
/* make back buffer active */
__mdss_mdp_overlay_free_list_add(mfd, &pipe->front_buf);
swap(pipe->back_buf, pipe->front_buf);
}
}
list_for_each_entry_safe(pipe, tmp, &destroy_pipes, list) {
/*
* in case of secure UI, the buffer needs to be released as
* soon as session is closed.
*/
if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
mdss_mdp_data_free(&pipe->front_buf);
else
__mdss_mdp_overlay_free_list_add(mfd, &pipe->front_buf);
mdss_mdp_data_free(&pipe->back_buf);
list_del_init(&pipe->list);
mdss_mdp_pipe_destroy(pipe);
}
mutex_unlock(&mdp5_data->list_lock);
}
void mdss_mdp_handoff_cleanup_pipes(struct msm_fb_data_type *mfd,
u32 type)
{
u32 i, npipes;
struct mdss_mdp_pipe *pipes;
struct mdss_mdp_pipe *pipe;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
switch (type) {
case MDSS_MDP_PIPE_TYPE_VIG:
pipes = mdata->vig_pipes;
npipes = mdata->nvig_pipes;
break;
case MDSS_MDP_PIPE_TYPE_RGB:
pipes = mdata->rgb_pipes;
npipes = mdata->nrgb_pipes;
break;
case MDSS_MDP_PIPE_TYPE_DMA:
pipes = mdata->dma_pipes;
npipes = mdata->ndma_pipes;
break;
default:
return;
}
for (i = 0; i < npipes; i++) {
pipe = &pipes[i];
if (pipe->is_handed_off) {
pr_debug("Unmapping handed off pipe %d\n", pipe->num);
list_add(&pipe->list, &mdp5_data->pipes_cleanup);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
pipe->is_handed_off = false;
}
}
}
/**
* mdss_mdp_overlay_start() - Programs the MDP control data path to hardware
* @mfd: Msm frame buffer structure associated with fb device.
*
* Program the MDP hardware with the control settings for the framebuffer
* device. In addition to this, this function also handles the transition
* from the the splash screen to the android boot animation when the
* continuous splash screen feature is enabled.
*/
int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd)
{
int rc;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_ctl *ctl = mdp5_data->ctl;
if (ctl->power_on) {
if (mdp5_data->mdata->idle_pc) {
rc = mdss_mdp_footswitch_ctrl_idle_pc(1,
&mfd->pdev->dev);
if (rc) {
pr_err("footswtich control power on failed rc=%d\n",
rc);
goto end;
}
mdss_mdp_ctl_restore(ctl);
}
if (!mdp5_data->mdata->batfet)
mdss_mdp_batfet_ctrl(mdp5_data->mdata, true);
mdss_mdp_release_splash_pipe(mfd);
return 0;
} else if (mfd->panel_info->cont_splash_enabled) {
mutex_lock(&mdp5_data->list_lock);
rc = list_empty(&mdp5_data->pipes_used);
mutex_unlock(&mdp5_data->list_lock);
if (rc) {
pr_debug("empty kickoff on fb%d during cont splash\n",
mfd->index);
return 0;
}
}
pr_debug("starting fb%d overlay\n", mfd->index);
/*
* We need to do hw init before any hw programming.
* Also, hw init involves programming the VBIF registers which
* should be done only after attaching IOMMU which in turn would call
* in to TZ to restore security configs on the VBIF registers.
* This is not needed when continuous splash screen is enabled since
* we would have called in to TZ to restore security configs from LK.
*/
if (!is_mdss_iommu_attached()) {
if (!mfd->panel_info->cont_splash_enabled) {
rc = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(rc)) {
pr_err("iommu attach failed rc=%d\n", rc);
return rc;
}
mdss_hw_init(mdss_res);
mdss_iommu_ctrl(0);
}
}
rc = mdss_mdp_ctl_start(ctl, false);
if (rc == 0) {
atomic_inc(&ov_active_panels);
mdss_mdp_ctl_notifier_register(mdp5_data->ctl,
&mfd->mdp_sync_pt_data.notifier);
} else {
pr_err("mdp ctl start failed.\n");
goto ctl_error;
}
rc = mdss_mdp_splash_cleanup(mfd, true);
if (!rc)
goto end;
ctl_error:
mdss_mdp_ctl_destroy(ctl);
mdp5_data->ctl = NULL;
end:
return rc;
}
static void mdss_mdp_overlay_update_pm(struct mdss_overlay_private *mdp5_data)
{
ktime_t wakeup_time;
if (!mdp5_data->cpu_pm_hdl)
return;
if (mdss_mdp_display_wakeup_time(mdp5_data->ctl, &wakeup_time))
return;
activate_event_timer(mdp5_data->cpu_pm_hdl, wakeup_time);
}
static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_mdp_ctl *tmp;
int ret = 0;
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
struct mdss_mdp_data *buf;
/*
* When secure display is enabled, if there is a non secure
* display pipe, skip that
*/
if (mdss_get_sd_client_cnt() &&
!(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
pipe->num, pipe->flags);
continue;
}
/*
* When external is connected and no dedicated wfd is present,
* reprogram DMA pipe before kickoff to clear out any previous
* block mode configuration.
*/
if ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) &&
(ctl->shared_lock &&
(ctl->mdata->wfd_mode == MDSS_MDP_WFD_SHARED))) {
if (ctl->mdata->mixer_switched) {
ret = mdss_mdp_overlay_pipe_setup(mfd,
&pipe->req_data, &pipe, NULL, false);
pr_debug("reseting DMA pipe for ctl=%d",
ctl->num);
}
if (ret) {
pr_err("can't reset DMA pipe ret=%d ctl=%d\n",
ret, ctl->num);
return ret;
}
tmp = mdss_mdp_ctl_mixer_switch(ctl,
MDSS_MDP_WB_CTL_TYPE_LINE);
if (!tmp)
return -EINVAL;
pipe->mixer_left = mdss_mdp_mixer_get(tmp,
MDSS_MDP_MIXER_MUX_DEFAULT);
}
/* ensure pipes are always reconfigured after power off/on */
if (ctl->play_cnt == 0)
pipe->params_changed++;
if (pipe->back_buf.num_planes) {
buf = &pipe->back_buf;
ret = mdss_mdp_data_map(buf);
} else if (!pipe->params_changed) {
continue;
} else if (pipe->front_buf.num_planes) {
buf = &pipe->front_buf;
} else {
pr_debug("no buf detected pnum=%d use solid fill\n",
pipe->num);
buf = NULL;
}
if (!IS_ERR_VALUE(ret))
ret = mdss_mdp_pipe_queue_data(pipe, buf);
if (IS_ERR_VALUE(ret)) {
pr_warn("Unable to queue data for pnum=%d\n",
pipe->num);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
}
}
return 0;
}
static void __overlay_kickoff_requeue(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
mdss_mdp_display_commit(ctl, NULL, NULL);
mdss_mdp_display_wait4comp(ctl);
ATRACE_BEGIN("sspp_programming");
__overlay_queue_pipes(mfd);
ATRACE_END("sspp_programming");
mdss_mdp_display_commit(ctl, NULL, NULL);
mdss_mdp_display_wait4comp(ctl);
}
static int mdss_mdp_commit_cb(enum mdp_commit_stage_type commit_stage,
void *data)
{
int ret = 0;
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_ctl *ctl;
switch (commit_stage) {
case MDP_COMMIT_STAGE_WAIT_FOR_PINGPONG:
ctl = mfd_to_ctl(mfd);
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_START);
mutex_unlock(&mdp5_data->ov_lock);
break;
case MDP_COMMIT_STAGE_PINGPONG_DONE:
mutex_lock(&mdp5_data->ov_lock);
break;
default:
pr_err("Invalid commit stage %x", commit_stage);
break;
}
return ret;
}
int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdp_display_commit *data)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdp_display_commit temp_data;
int ret = 0;
int sd_in_pipe = 0;
bool need_cleanup = false;
struct mdss_mdp_commit_cb commit_cb;
ATRACE_BEGIN(__func__);
if (!ctl) {
pr_warn("kickoff on fb=%d without a ctl attched\n", mfd->index);
return ret;
}
if (ctl->shared_lock)
mutex_lock(ctl->shared_lock);
mutex_lock(&mdp5_data->ov_lock);
ret = mdss_mdp_overlay_start(mfd);
if (ret) {
pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
mutex_unlock(&mdp5_data->ov_lock);
if (ctl->shared_lock)
mutex_unlock(ctl->shared_lock);
return ret;
}
ret = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(ret)) {
pr_err("iommu attach failed rc=%d\n", ret);
mutex_unlock(&mdp5_data->ov_lock);
if (ctl->shared_lock)
mutex_unlock(ctl->shared_lock);
return ret;
}
mutex_lock(&mdp5_data->list_lock);
/*
* check if there is a secure display session
*/
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
sd_in_pipe = 1;
pr_debug("Secure pipe: %u : %08X\n",
pipe->num, pipe->flags);
}
}
/*
* If there is no secure display session and sd_enabled, disable the
* secure display session
*/
if (!sd_in_pipe && mdp5_data->sd_enabled) {
/* disable the secure display on last client */
if (mdss_get_sd_client_cnt() == 1)
ret = mdss_mdp_secure_display_ctrl(0);
if (!ret) {
mdss_update_sd_client(mdp5_data->mdata, false);
mdp5_data->sd_enabled = 0;
}
}
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_BEGIN);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
__vsync_set_vsync_handler(mfd);
if (data) {
mdss_mdp_set_roi(ctl, data);
} else {
temp_data.l_roi = (struct mdp_rect){0, 0,
ctl->mixer_left->width, ctl->mixer_left->height};
if (ctl->mixer_right) {
temp_data.r_roi = (struct mdp_rect) {0, 0,
ctl->mixer_right->width, ctl->mixer_right->height};
}
mdss_mdp_set_roi(ctl, &temp_data);
}
/*
* Setup pipe in solid fill before unstaging,
* to ensure no fetches are happening after dettach or reattach.
*/
list_for_each_entry(pipe, &mdp5_data->pipes_cleanup, list) {
mdss_mdp_pipe_queue_data(pipe, NULL);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_left);
mdss_mdp_mixer_pipe_unstage(pipe, pipe->mixer_right);
need_cleanup = true;
}
ATRACE_BEGIN("sspp_programming");
ret = __overlay_queue_pipes(mfd);
ATRACE_END("sspp_programming");
mutex_unlock(&mdp5_data->list_lock);
if (mfd->panel.type == WRITEBACK_PANEL) {
ATRACE_BEGIN("wb_kickoff");
ret = mdss_mdp_wb_kickoff(mfd);
ATRACE_END("wb_kickoff");
} else if (!need_cleanup) {
commit_cb.commit_cb_fnc = mdss_mdp_commit_cb;
commit_cb.data = mfd;
ATRACE_BEGIN("display_commit");
ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL,
&commit_cb);
ATRACE_END("display_commit");
} else {
ATRACE_BEGIN("display_commit");
ret = mdss_mdp_display_commit(mdp5_data->ctl, NULL,
NULL);
ATRACE_END("display_commit");
}
/* MDP_NOTIFY_FRAME_START is sent in cb for command panel */
if ((!need_cleanup) && (!mdp5_data->ctl->wait_pingpong))
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_START);
if (IS_ERR_VALUE(ret))
goto commit_fail;
mutex_unlock(&mdp5_data->ov_lock);
mdss_mdp_overlay_update_pm(mdp5_data);
ATRACE_BEGIN("display_wait4comp");
ret = mdss_mdp_display_wait4comp(mdp5_data->ctl);
ATRACE_END("display_wait4comp");
mutex_lock(&mdp5_data->ov_lock);
if (ret == 0) {
if (!mdp5_data->sd_enabled && sd_in_pipe) {
if (!mdss_get_sd_client_cnt())
ret = mdss_mdp_secure_display_ctrl(1);
if (!ret) {
mdp5_data->sd_enabled = 1;
mdss_update_sd_client(mdp5_data->mdata, true);
}
}
}
mdss_fb_update_notify_update(mfd);
commit_fail:
ATRACE_BEGIN("overlay_cleanup");
mdss_mdp_overlay_cleanup(mfd);
ATRACE_END("overlay_cleanup");
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_FLUSHED);
if (need_cleanup)
mdss_mdp_ctl_notify(ctl, MDP_NOTIFY_FRAME_START);
mutex_unlock(&mdp5_data->ov_lock);
if (ctl->shared_lock)
mutex_unlock(ctl->shared_lock);
mdss_iommu_ctrl(0);
ATRACE_END(__func__);
return ret;
}
int mdss_mdp_overlay_release(struct msm_fb_data_type *mfd, int ndx)
{
return mdss_mdp_overlay_release_sub(mfd, ndx, false);
}
int mdss_mdp_overlay_release_sub(struct msm_fb_data_type *mfd, int ndx,
bool unstage)
{
struct mdss_mdp_pipe *pipe, *tmp;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
u32 unset_ndx = 0;
int destroy_pipe;
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
if (pipe->ndx & ndx) {
if (mdss_mdp_pipe_map(pipe)) {
pr_err("Unable to map used pipe%d ndx=%x\n",
pipe->num, pipe->ndx);
continue;
}
unset_ndx |= pipe->ndx;
pipe->pid = 0;
destroy_pipe = pipe->play_cnt == 0;
if (destroy_pipe)
list_del_init(&pipe->list);
else
list_move(&pipe->list,
&mdp5_data->pipes_cleanup);
if (unstage) {
mdss_mdp_pipe_queue_data(pipe, NULL);
mdss_mdp_mixer_pipe_unstage(pipe,
pipe->mixer_left);
mdss_mdp_mixer_pipe_unstage(pipe,
pipe->mixer_right);
}
mdss_mdp_pipe_unmap(pipe);
if (destroy_pipe)
mdss_mdp_pipe_destroy(pipe);
if (unset_ndx == ndx)
break;
}
}
mutex_unlock(&mdp5_data->list_lock);
if (unset_ndx != ndx) {
pr_warn("Unable to unset pipe(s) ndx=0x%x unset=0x%x\n",
ndx, unset_ndx);
return -ENOENT;
}
return 0;
}
int mdss_mdp_overlay_unset(struct msm_fb_data_type *mfd, int ndx)
{
int ret = 0;
struct mdss_overlay_private *mdp5_data;
if (!mfd)
return -ENODEV;
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data || !mdp5_data->ctl)
return -ENODEV;
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
return ret;
if (ndx == BORDERFILL_NDX) {
pr_debug("borderfill disable\n");
mdp5_data->borderfill_enable = false;
ret = 0;
goto done;
}
if (!mfd->panel_power_on) {
ret = -EPERM;
goto done;
}
pr_debug("unset ndx=%x\n", ndx);
if (ndx & MDSS_MDP_ROT_SESSION_MASK) {
ret = mdss_mdp_rotator_unset(ndx);
} else {
ret = mdss_mdp_overlay_release(mfd, ndx);
}
done:
mutex_unlock(&mdp5_data->ov_lock);
return ret;
}
/**
* mdss_mdp_overlay_release_all() - release any overlays associated with fb dev
* @mfd: Msm frame buffer structure associated with fb device
* @release_all: ignore pid and release all the pipes
*
* Release any resources allocated by calling process, this can be called
* on fb_release to release any overlays/rotator sessions left open.
*/
static int __mdss_mdp_overlay_release_all(struct msm_fb_data_type *mfd,
bool release_all, uint32_t pid)
{
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_rotator_session *rot, *tmp;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
u32 unset_ndx = 0;
int cnt = 0;
pr_debug("releasing all resources for fb%d pid=%d\n", mfd->index, pid);
mutex_lock(&mdp5_data->ov_lock);
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
if (release_all || (pipe->pid == pid)) {
unset_ndx |= pipe->ndx;
cnt++;
}
}
if (cnt == 0 && !list_empty(&mdp5_data->pipes_cleanup)) {
pr_debug("overlay release on fb%d called without commit!",
mfd->index);
cnt++;
}
pr_debug("release_all=%d mfd->ref_cnt=%d unset_ndx=0x%x cnt=%d\n",
release_all, mfd->ref_cnt, unset_ndx, cnt);
mutex_unlock(&mdp5_data->list_lock);
if (unset_ndx) {
pr_debug("%d pipes need cleanup (%x)\n", cnt, unset_ndx);
mdss_mdp_overlay_release(mfd, unset_ndx);
}
mutex_unlock(&mdp5_data->ov_lock);
if (cnt)
mfd->mdp.kickoff_fnc(mfd, NULL);
list_for_each_entry_safe(rot, tmp, &mdp5_data->rot_proc_list, list) {
if (rot->pid == pid) {
if (!list_empty(&rot->list))
list_del_init(&rot->list);
mdss_mdp_rotator_release(rot);
}
}
return 0;
}
static int mdss_mdp_overlay_play_wait(struct msm_fb_data_type *mfd,
struct msmfb_overlay_data *req)
{
int ret = 0;
if (!mfd)
return -ENODEV;
ret = mfd->mdp.kickoff_fnc(mfd, NULL);
if (!ret)
pr_err("error displaying\n");
return ret;
}
static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
struct msmfb_overlay_data *req)
{
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_data *src_data;
int ret;
u32 flags;
pipe = __overlay_find_pipe(mfd, req->id);
if (!pipe) {
pr_err("pipe ndx=%x doesn't exist\n", req->id);
return -ENODEV;
}
ret = mdss_mdp_pipe_map(pipe);
if (IS_ERR_VALUE(ret)) {
pr_err("Unable to map used pipe%d ndx=%x\n",
pipe->num, pipe->ndx);
return ret;
}
pr_debug("ov queue pnum=%d\n", pipe->num);
if (pipe->flags & MDP_SOLID_FILL)
pr_warn("Unexpected buffer queue to a solid fill pipe\n");
flags = (pipe->flags & MDP_SECURE_OVERLAY_SESSION);
src_data = &pipe->back_buf;
if (src_data->num_planes) {
pr_warn("dropped buffer pnum=%d play=%d addr=0x%pa\n",
pipe->num, pipe->play_cnt, &src_data->p[0].addr);
mdss_mdp_data_free(src_data);
}
ret = mdss_mdp_data_get(src_data, &req->data, 1, flags);
if (IS_ERR_VALUE(ret))
pr_err("src_data pmem error\n");
pipe->has_buf = !ret;
mdss_mdp_pipe_unmap(pipe);
return ret;
}
int mdss_mdp_overlay_play(struct msm_fb_data_type *mfd,
struct msmfb_overlay_data *req)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int ret = 0;
pr_debug("play req id=%x\n", req->id);
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
return ret;
if (!mfd->panel_power_on) {
ret = -EPERM;
goto done;
}
if (req->id & MDSS_MDP_ROT_SESSION_MASK) {
ret = mdss_mdp_rotator_play(mfd, req);
} else if (req->id == BORDERFILL_NDX) {
pr_debug("borderfill enable\n");
mdp5_data->borderfill_enable = true;
ret = mdss_mdp_overlay_free_fb_pipe(mfd);
} else {
ret = mdss_mdp_overlay_queue(mfd, req);
}
done:
mutex_unlock(&mdp5_data->ov_lock);
return ret;
}
static int mdss_mdp_overlay_free_fb_pipe(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_pipe *pipe;
u32 fb_ndx = 0;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
MDSS_MDP_MIXER_MUX_LEFT, MDSS_MDP_STAGE_BASE, false);
if (pipe)
fb_ndx |= pipe->ndx;
pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl,
MDSS_MDP_MIXER_MUX_RIGHT, MDSS_MDP_STAGE_BASE, false);
if (pipe)
fb_ndx |= pipe->ndx;
if (fb_ndx) {
pr_debug("unstaging framebuffer pipes %x\n", fb_ndx);
mdss_mdp_overlay_release(mfd, fb_ndx);
}
return 0;
}
static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
struct mdss_mdp_pipe **ppipe,
int mixer_mux)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_pipe *pipe;
int ret;
pipe = mdss_mdp_get_staged_pipe(mdp5_data->ctl, mixer_mux,
MDSS_MDP_STAGE_BASE, false);
if (pipe == NULL) {
struct mdp_overlay req;
struct fb_info *fbi = mfd->fbi;
struct mdss_mdp_mixer *mixer;
int bpp;
mixer = mdss_mdp_mixer_get(mdp5_data->ctl,
MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
pr_err("unable to retrieve mixer\n");
return -ENODEV;
}
memset(&req, 0, sizeof(req));
bpp = fbi->var.bits_per_pixel / 8;
req.id = MSMFB_NEW_REQUEST;
req.src.format = mfd->fb_imgType;
req.src.height = fbi->var.yres;
req.src.width = fbi->fix.line_length / bpp;
if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
if (req.src.width <= mixer->width) {
pr_warn("right fb pipe not needed\n");
return -EINVAL;
}
}
if ((mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) !=
(mdp5_data->fb_rot_180 != 0)) {
req.src_rect.x = mixer->width;
req.src_rect.w = fbi->var.xres - mixer->width;
} else {
req.src_rect.x = 0;
req.src_rect.w = MIN(fbi->var.xres,
mixer->width);
}
req.src_rect.y = 0;
req.src_rect.h = req.src.height;
if (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT) {
req.dst_rect.x = mixer->width;
req.dst_rect.w = fbi->var.xres - mixer->width;
} else {
req.dst_rect.x = 0;
req.dst_rect.w = MIN(fbi->var.xres,
mixer->width);
}
req.dst_rect.y = req.src_rect.y;
req.dst_rect.h = req.src_rect.h;
req.z_order = MDSS_MDP_STAGE_BASE;
if (mdp5_data->fb_rot_180)
req.flags |= MDP_ROT_180;
pr_debug("allocating base pipe mux=%d\n", mixer_mux);
ret = mdss_mdp_overlay_pipe_setup(mfd, &req, &pipe, NULL,
false);
if (ret)
return ret;
}
pr_debug("ctl=%d pnum=%d\n", mdp5_data->ctl->num, pipe->num);
*ppipe = pipe;
return 0;
}
static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_data *buf;
struct mdss_mdp_pipe *pipe;
struct fb_info *fbi;
struct mdss_overlay_private *mdp5_data;
u32 offset;
int bpp, ret;
if (!mfd)
return;
fbi = mfd->fbi;
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data || !mdp5_data->ctl)
return;
if (!fbi->fix.smem_start || fbi->fix.smem_len == 0 ||
mdp5_data->borderfill_enable) {
mfd->mdp.kickoff_fnc(mfd, NULL);
return;
}
if (mutex_lock_interruptible(&mdp5_data->ov_lock))
return;
// allow pan-display for CMD panels in DCM state at panel-off
if ((!mfd->panel_power_on) && !((mfd->dcm_state == DCM_ENTER) &&
(mfd->panel.type == MIPI_CMD_PANEL))) {
mutex_unlock(&mdp5_data->ov_lock);
return;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
ret = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(ret)) {
pr_err("IOMMU attach failed\n");
goto pan_display_error;
}
bpp = fbi->var.bits_per_pixel / 8;
offset = fbi->var.xoffset * bpp +
fbi->var.yoffset * fbi->fix.line_length;
if (offset > fbi->fix.smem_len) {
pr_err("invalid fb offset=%u total length=%u\n",
offset, fbi->fix.smem_len);
goto pan_display_error;
}
ret = mdss_mdp_overlay_start(mfd);
if (ret) {
pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
goto pan_display_error;
}
ret = mdss_mdp_overlay_get_fb_pipe(mfd, &pipe,
MDSS_MDP_MIXER_MUX_LEFT);
if (ret) {
pr_err("unable to allocate base pipe\n");
goto pan_display_error;
}
if (mdss_mdp_pipe_map(pipe)) {
pr_err("unable to map base pipe\n");
goto pan_display_error;
}
buf = &pipe->back_buf;
if (is_mdss_iommu_attached()) {
if (!mfd->iova) {
pr_err("mfd iova is zero\n");
mdss_mdp_pipe_unmap(pipe);
goto pan_display_error;
}
buf->p[0].addr = mfd->iova;
} else {
buf->p[0].addr = fbi->fix.smem_start;
}
buf->p[0].addr += offset;
buf->p[0].len = fbi->fix.smem_len - offset;
buf->num_planes = 1;
pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
if (fbi->var.xres > MAX_MIXER_WIDTH || mfd->split_display) {
ret = mdss_mdp_overlay_get_fb_pipe(mfd, &pipe,
MDSS_MDP_MIXER_MUX_RIGHT);
if (ret) {
pr_err("unable to allocate right base pipe\n");
goto pan_display_error;
}
if (mdss_mdp_pipe_map(pipe)) {
pr_err("unable to map right base pipe\n");
goto pan_display_error;
}
pipe->back_buf = *buf;
pipe->has_buf = 1;
mdss_mdp_pipe_unmap(pipe);
}
mutex_unlock(&mdp5_data->ov_lock);
if ((fbi->var.activate & FB_ACTIVATE_VBL) ||
(fbi->var.activate & FB_ACTIVATE_FORCE))
mfd->mdp.kickoff_fnc(mfd, NULL);
mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return;
pan_display_error:
mdss_iommu_ctrl(0);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&mdp5_data->ov_lock);
}
/* function is called in irq context should have minimum processing */
static void mdss_mdp_overlay_handle_vsync(struct mdss_mdp_ctl *ctl,
ktime_t t)
{
struct msm_fb_data_type *mfd = NULL;
struct mdss_overlay_private *mdp5_data = NULL;
if (!ctl) {
pr_err("ctl is NULL\n");
return;
}
mfd = ctl->mfd;
if (!mfd || !mfd->mdp.private1) {
pr_warn("Invalid handle for vsync\n");
return;
}
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data) {
pr_err("mdp5_data is NULL\n");
return;
}
pr_debug("vsync on fb%d play_cnt=%d\n", mfd->index, ctl->play_cnt);
mdp5_data->vsync_time = t;
sysfs_notify_dirent(mdp5_data->vsync_event_sd);
}
int mdss_mdp_overlay_vsync_ctrl(struct msm_fb_data_type *mfd, int en)
{
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int rc;
if (!ctl)
return -ENODEV;
if (!ctl->add_vsync_handler || !ctl->remove_vsync_handler)
return -EOPNOTSUPP;
if (!ctl->panel_data->panel_info.cont_splash_enabled
&& !ctl->power_on) {
pr_debug("fb%d vsync pending first update en=%d\n",
mfd->index, en);
return -EPERM;
}
pr_debug("fb%d vsync en=%d\n", mfd->index, en);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (en)
rc = ctl->add_vsync_handler(ctl, &ctl->vsync_handler);
else
rc = ctl->remove_vsync_handler(ctl, &ctl->vsync_handler);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return rc;
}
static ssize_t dynamic_fps_sysfs_rda_dfps(struct device *dev,
struct device_attribute *attr, char *buf)
{
ssize_t ret;
struct mdss_panel_data *pdata;
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data->ctl || !mdp5_data->ctl->power_on)
return 0;
pdata = dev_get_platdata(&mfd->pdev->dev);
if (!pdata) {
pr_err("no panel connected for fb%d\n", mfd->index);
return -ENODEV;
}
ret = snprintf(buf, PAGE_SIZE, "%d\n",
pdata->panel_info.mipi.frame_rate);
pr_debug("%s: '%d'\n", __func__,
pdata->panel_info.mipi.frame_rate);
return ret;
} /* dynamic_fps_sysfs_rda_dfps */
static ssize_t dynamic_fps_sysfs_wta_dfps(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
int dfps, rc = 0;
struct mdss_panel_data *pdata;
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
rc = kstrtoint(buf, 10, &dfps);
if (rc) {
pr_err("%s: kstrtoint failed. rc=%d\n", __func__, rc);
return rc;
}
if (!mdp5_data->ctl || !mdp5_data->ctl->power_on)
return 0;
pdata = dev_get_platdata(&mfd->pdev->dev);
if (!pdata) {
pr_err("no panel connected for fb%d\n", mfd->index);
return -ENODEV;
}
if (dfps == pdata->panel_info.mipi.frame_rate) {
pr_debug("%s: FPS is already %d\n",
__func__, dfps);
return count;
}
if (dfps < 30) {
pr_err("Unsupported FPS. Configuring to min_fps = 30\n");
dfps = 30;
rc = mdss_mdp_ctl_update_fps(mdp5_data->ctl, dfps);
} else if (dfps > 60) {
pr_err("Unsupported FPS. Configuring to max_fps = 60\n");
dfps = 60;
rc = mdss_mdp_ctl_update_fps(mdp5_data->ctl, dfps);
} else {
rc = mdss_mdp_ctl_update_fps(mdp5_data->ctl, dfps);
}
if (!rc) {
pr_info("%s: configured to '%d' FPS\n", __func__, dfps);
} else {
pr_err("Failed to configure '%d' FPS. rc = %d\n",
dfps, rc);
return rc;
}
pdata->panel_info.new_fps = dfps;
return count;
} /* dynamic_fps_sysfs_wta_dfps */
static DEVICE_ATTR(dynamic_fps, S_IRUGO | S_IWUSR, dynamic_fps_sysfs_rda_dfps,
dynamic_fps_sysfs_wta_dfps);
static struct attribute *dynamic_fps_fs_attrs[] = {
&dev_attr_dynamic_fps.attr,
NULL,
};
static struct attribute_group dynamic_fps_fs_attrs_group = {
.attrs = dynamic_fps_fs_attrs,
};
static ssize_t frame_counter_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_mdp_mixer *mixer;
u32 reg;
if (!ctl) {
pr_warn("there is no ctl attached to fb\n");
return -ENODEV;
}
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
if (!mixer) {
pr_warn("there is no mixer\n");
return -ENODEV;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
reg = mdss_mdp_pingpong_read(mixer,
MDSS_MDP_REG_PP_INT_COUNT_VAL) >> 16;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return snprintf(buf, PAGE_SIZE, "%d\n", reg);
}
static DEVICE_ATTR(frame_counter, S_IRUSR | S_IRGRP, frame_counter_show, NULL);
static int te_status = -1;
static ssize_t te_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_mdp_pp_tear_check *te;
if (!ctl || !ctl->panel_data) {
pr_warn("%s: there is no ctl or panel_data\n", __func__);
return -ENODEV;
}
te = &ctl->panel_data->panel_info.te;
if (!te) {
pr_warn("%s: there is no te information\n", __func__);
return -ENODEV;
}
if (te_status < 0)
te_status = te->tear_check_en;
return snprintf(buf, PAGE_SIZE, "%d\n", te_status);
}
static ssize_t te_enable_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_mixer *mixer;
static int prev_height;
int enable;
int r = 0;
int i, mux;
if (!ctl || !mdp5_data) {
pr_warn("there is no ctl or mdp5_data attached to fb\n");
r = -ENODEV;
goto end;
}
if (!mfd->panel_power_on) {
pr_warn("panel is not powered\n");
r = -EPERM;
goto end;
}
r = kstrtoint(buf, 0, &enable);
if ((r) || ((enable != 0) && (enable != 1))) {
pr_err("invalid TE enable value = %d\n",
enable);
r = -EINVAL;
goto end;
}
mutex_lock(&mdp5_data->ov_lock);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
if (te_status == enable) {
pr_info("te_status is not changed. Do nothing\n");
goto locked_end;
}
for (i = 0; i < 2; i++) {
if (i == 0)
mux = MDSS_MDP_MIXER_MUX_LEFT;
else if (i == 1)
mux = MDSS_MDP_MIXER_MUX_RIGHT;
mixer = mdss_mdp_mixer_get(ctl, mux);
if (!mixer) {
pr_warn("There is no mixer for mux = %d\n", i);
continue;
}
/* The TE max height in MDP is being set to a max value of
* 0xFFF0. Since this is such a large number, when TE is
* disabled from the panel, we'll start to get constant timeout
* errors and get 1 FPS. To prevent this from happening, set
* the height to display height * 2. This will just cause our
* FPS to drop to 30 FPS, and prevent timeout errors. */
if (!enable) {
prev_height = mdss_mdp_pingpong_read(mixer,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
mdss_mdp_pingpong_write(mixer,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
mfd->fbi->var.yres * 2);
} else if (enable && prev_height) {
mdss_mdp_pingpong_write(mixer,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT,
prev_height);
}
r = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_ENABLE_TE,
(void *) enable);
if (r) {
pr_err("Failed sending TE command, r=%d\n", r);
r = -EFAULT;
goto locked_end;
} else
te_status = enable;
}
locked_end:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
mutex_unlock(&mdp5_data->ov_lock);
end:
return r ? r : count;
}
static DEVICE_ATTR(te_enable, S_IRUSR | S_IWUSR | S_IRGRP | S_IWGRP,
te_enable_show, te_enable_store);
static struct attribute *factory_te_attrs[] = {
&dev_attr_frame_counter.attr,
&dev_attr_te_enable.attr,
NULL,
};
static struct attribute_group factory_te_attrs_group = {
.attrs = factory_te_attrs,
};
static ssize_t hbm_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
if (!ctl) {
pr_warning("there is no ctl attached to fb\n");
return -ENODEV;
}
return snprintf(buf, PAGE_SIZE, "%d\n",
ctl->panel_data->panel_info.hbm_state);
}
static ssize_t hbm_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int enable;
int r;
if (!ctl) {
pr_warning("there is no ctl attached to fb\n");
r = -ENODEV;
goto end;
}
r = kstrtoint(buf, 0, &enable);
if ((r) || ((enable != 0) && (enable != 1))) {
pr_err("invalid HBM value = %d\n",
enable);
r = -EINVAL;
goto end;
}
mutex_lock(&ctl->offlock);
if (!mfd->panel_power_on) {
pr_warning("panel is not powered\n");
r = -EPERM;
goto unlock;
}
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
r = mdss_mdp_ctl_intf_event(ctl, MDSS_EVENT_ENABLE_HBM,
(void *) enable);
if (r) {
pr_err("Failed sending HBM command, r = %d\n", r);
r = -EFAULT;
} else
pr_info("HBM state changed by sysfs, state = %d\n", enable);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
unlock:
mutex_unlock(&ctl->offlock);
end:
return r ? r : count;
}
static DEVICE_ATTR(hbm, S_IWUSR | S_IWGRP | S_IRUSR | S_IRGRP,
hbm_show, hbm_store);
static struct attribute *hbm_attrs[] = {
&dev_attr_hbm.attr,
NULL,
};
static struct attribute_group hbm_attrs_group = {
.attrs = hbm_attrs,
};
static ssize_t mdss_mdp_vsync_show_event(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)fbi->par;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
u64 vsync_ticks;
int ret;
if (!mdp5_data->ctl ||
(!mdp5_data->ctl->panel_data->panel_info.cont_splash_enabled
&& !mdp5_data->ctl->power_on))
return -EAGAIN;
vsync_ticks = ktime_to_ns(mdp5_data->vsync_time);
pr_debug("fb%d vsync=%llu", mfd->index, vsync_ticks);
ret = scnprintf(buf, PAGE_SIZE, "VSYNC=%llu\n", vsync_ticks);
return ret;
}
static inline int mdss_mdp_ad_is_supported(struct msm_fb_data_type *mfd)
{
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_mdp_mixer *mixer;
if (!ctl) {
pr_debug("there is no ctl attached to fb\n");
return 0;
}
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (mixer && (mixer->num > ctl->mdata->nad_cfgs)) {
if (!mixer)
pr_warn("there is no mixer attached to fb\n");
else
pr_debug("mixer attached (%d) doesnt support ad\n",
mixer->num);
return 0;
}
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
if (mixer && (mixer->num > ctl->mdata->nad_cfgs))
return 0;
return 1;
}
static ssize_t mdss_mdp_ad_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int ret, state;
state = mdss_mdp_ad_is_supported(mfd) ? mdp5_data->ad_state : -1;
ret = scnprintf(buf, PAGE_SIZE, "%d", state);
return ret;
}
static ssize_t mdss_mdp_ad_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct fb_info *fbi = dev_get_drvdata(dev);
struct msm_fb_data_type *mfd = fbi->par;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
int ret, ad;
ret = kstrtoint(buf, 10, &ad);
if (ret) {
pr_err("Invalid input for ad\n");
return -EINVAL;
}
mdp5_data->ad_state = ad;
sysfs_notify(&dev->kobj, NULL, "ad");
return count;
}
static DEVICE_ATTR(vsync_event, S_IRUGO, mdss_mdp_vsync_show_event, NULL);
static DEVICE_ATTR(ad, S_IRUGO | S_IWUSR | S_IWGRP, mdss_mdp_ad_show,
mdss_mdp_ad_store);
static struct attribute *mdp_overlay_sysfs_attrs[] = {
&dev_attr_vsync_event.attr,
&dev_attr_ad.attr,
NULL,
};
static struct attribute_group mdp_overlay_sysfs_group = {
.attrs = mdp_overlay_sysfs_attrs,
};
static int mdss_mdp_hw_cursor_update(struct msm_fb_data_type *mfd,
struct fb_cursor *cursor)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_mixer *mixer;
struct fb_image *img = &cursor->image;
u32 blendcfg;
int ret = 0;
u32 xres = mfd->fbi->var.xres;
u32 yres = mfd->fbi->var.yres;
u32 start_x = img->dx;
u32 start_y = img->dy;
u32 roi_x = 0;
u32 roi_y = 0;
int roi_w = 0;
int roi_h = 0;
int roi_size = 0;
mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_DEFAULT);
if (!mixer)
return -ENODEV;
if (!mfd->cursor_buf && (cursor->set & FB_CUR_SETIMAGE)) {
mfd->cursor_buf = dma_alloc_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
(dma_addr_t *) &mfd->cursor_buf_phys,
GFP_KERNEL);
if (!mfd->cursor_buf) {
pr_err("can't allocate cursor buffer\n");
return -ENOMEM;
}
ret = msm_iommu_map_contig_buffer(mfd->cursor_buf_phys,
mdss_get_iommu_domain(MDSS_IOMMU_DOMAIN_UNSECURE),
0, MDSS_MDP_CURSOR_SIZE, SZ_4K, 0,
&(mfd->cursor_buf_iova));
if (IS_ERR_VALUE(ret)) {
dma_free_coherent(NULL, MDSS_MDP_CURSOR_SIZE,
mfd->cursor_buf,
(dma_addr_t) mfd->cursor_buf_phys);
pr_err("unable to map cursor buffer to iommu(%d)\n",
ret);
return ret;
}
mixer->cursor_hotx = 0;
mixer->cursor_hoty = 0;
}
if ((img->width > MDSS_MDP_CURSOR_WIDTH) ||
(img->height > MDSS_MDP_CURSOR_HEIGHT) ||
(img->depth != 32) || (start_x >= xres) || (start_y >= yres))
return -EINVAL;
pr_debug("mixer=%d enable=%x set=%x\n", mixer->num, cursor->enable,
cursor->set);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
blendcfg = mdp_mixer_read(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG);
if (cursor->set & FB_CUR_SETHOT) {
if ((cursor->hot.x < img->width) &&
(cursor->hot.y < img->height)) {
mixer->cursor_hotx = cursor->hot.x;
mixer->cursor_hoty = cursor->hot.y;
/* Update cursor position */
cursor->set |= FB_CUR_SETPOS;
} else {
pr_err("Invalid cursor hotspot coordinates\n");
return -EINVAL;
}
}
if (start_x > mixer->cursor_hotx) {
start_x -= mixer->cursor_hotx;
} else {
roi_x = mixer->cursor_hotx - start_x;
start_x = 0;
}
if (start_y > mixer->cursor_hoty) {
start_y -= mixer->cursor_hoty;
} else {
roi_y = mixer->cursor_hoty - start_y;
start_y = 0;
}
roi_w = min(xres - start_x, img->width - roi_x);
roi_h = min(yres - start_y, img->height - roi_y);
roi_size = (roi_h << 16) | roi_w;
if (cursor->set & FB_CUR_SETPOS) {
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_XY,
(roi_y << 16) | roi_x);
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_START_XY,
(start_y << 16) | start_x);
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
}
if (cursor->set & FB_CUR_SETIMAGE) {
int calpha_en, transp_en, alpha, size;
u32 cursor_addr;
ret = copy_from_user(mfd->cursor_buf, img->data,
img->width * img->height * 4);
if (ret) {
pr_err("copy_from_user error. rc=%d\n", ret);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return ret;
}
if (is_mdss_iommu_attached()) {
cursor_addr = mfd->cursor_buf_iova;
} else {
if (MDSS_LPAE_CHECK(mfd->cursor_buf_phys)) {
pr_err("can't access phy mem >4GB w/o iommu\n");
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return -ERANGE;
}
cursor_addr = mfd->cursor_buf_phys;
}
if (img->bg_color == 0xffffffff)
transp_en = 0;
else
transp_en = 1;
alpha = (img->fg_color & 0xff000000) >> 24;
if (alpha)
calpha_en = 0x0; /* xrgb */
else
calpha_en = 0x2; /* argb */
size = (img->height << 16) | img->width;
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_IMG_SIZE, size);
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_SIZE, roi_size);
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_STRIDE,
img->width * 4);
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BASE_ADDR,
cursor_addr);
wmb();
blendcfg &= ~0x1;
blendcfg |= (transp_en << 3) | (calpha_en << 1);
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
blendcfg);
if (calpha_en)
mdp_mixer_write(mixer,
MDSS_MDP_REG_LM_CURSOR_BLEND_PARAM,
alpha);
if (transp_en) {
mdp_mixer_write(mixer,
MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW0,
((img->bg_color & 0xff00) << 8) |
(img->bg_color & 0xff));
mdp_mixer_write(mixer,
MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_LOW1,
((img->bg_color & 0xff0000) >> 16));
mdp_mixer_write(mixer,
MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH0,
((img->bg_color & 0xff00) << 8) |
(img->bg_color & 0xff));
mdp_mixer_write(mixer,
MDSS_MDP_REG_LM_CURSOR_BLEND_TRANSP_HIGH1,
((img->bg_color & 0xff0000) >> 16));
}
mixer->cursor_hotx = 0;
mixer->cursor_hoty = 0;
}
if (!cursor->enable != !(blendcfg & 0x1)) {
if (cursor->enable) {
pr_debug("enable hw cursor on mixer=%d\n", mixer->num);
blendcfg |= 0x1;
} else {
pr_debug("disable hw cursor on mixer=%d\n", mixer->num);
blendcfg &= ~0x1;
}
mdp_mixer_write(mixer, MDSS_MDP_REG_LM_CURSOR_BLEND_CONFIG,
blendcfg);
mixer->cursor_enabled = cursor->enable;
mixer->params_changed++;
}
mixer->ctl->flush_bits |= BIT(6) << mixer->num;
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
return 0;
}
static int mdss_bl_scale_config(struct msm_fb_data_type *mfd,
struct mdp_bl_scale_data *data)
{
int ret = 0;
int curr_bl;
mutex_lock(&mfd->bl_lock);
curr_bl = mfd->bl_level;
mfd->bl_scale = data->scale;
mfd->bl_min_lvl = data->min_lvl;
pr_debug("update scale = %d, min_lvl = %d\n", mfd->bl_scale,
mfd->bl_min_lvl);
/* update current backlight to use new scaling*/
mdss_fb_set_backlight(mfd, curr_bl);
mutex_unlock(&mfd->bl_lock);
return ret;
}
static int mdss_mdp_pp_ioctl(struct msm_fb_data_type *mfd,
void __user *argp)
{
int ret;
struct msmfb_mdp_pp mdp_pp;
u32 copyback = 0;
u32 copy_from_kernel = 0;
if (mfd->panel_info->partial_update_enabled) {
pr_err("Partical update feature is enabled.");
return -EPERM;
}
ret = copy_from_user(&mdp_pp, argp, sizeof(mdp_pp));
if (ret)
return ret;
/* Supprt only MDP register read/write and
exit_dcm in DCM state*/
if (mfd->dcm_state == DCM_ENTER &&
(mdp_pp.op != mdp_op_calib_buffer &&
mdp_pp.op != mdp_op_calib_dcm_state))
return -EPERM;
switch (mdp_pp.op) {
case mdp_op_pa_cfg:
ret = mdss_mdp_pa_config(&mdp_pp.data.pa_cfg_data,
©back);
break;
case mdp_op_pa_v2_cfg:
ret = mdss_mdp_pa_v2_config(&mdp_pp.data.pa_v2_cfg_data,
©back);
break;
case mdp_op_pcc_cfg:
ret = mdss_mdp_pcc_config(&mdp_pp.data.pcc_cfg_data,
©back);
break;
case mdp_op_lut_cfg:
switch (mdp_pp.data.lut_cfg_data.lut_type) {
case mdp_lut_igc:
ret = mdss_mdp_igc_lut_config(
(struct mdp_igc_lut_data *)
&mdp_pp.data.lut_cfg_data.data,
©back, copy_from_kernel);
break;
case mdp_lut_pgc:
ret = mdss_mdp_argc_config(
&mdp_pp.data.lut_cfg_data.data.pgc_lut_data,
©back);
break;
case mdp_lut_hist:
ret = mdss_mdp_hist_lut_config(
(struct mdp_hist_lut_data *)
&mdp_pp.data.lut_cfg_data.data, ©back);
break;
default:
ret = -ENOTSUPP;
break;
}
break;
case mdp_op_dither_cfg:
ret = mdss_mdp_dither_config(
&mdp_pp.data.dither_cfg_data,
©back);
break;
case mdp_op_gamut_cfg:
ret = mdss_mdp_gamut_config(
&mdp_pp.data.gamut_cfg_data,
©back);
break;
case mdp_bl_scale_cfg:
ret = mdss_bl_scale_config(mfd, (struct mdp_bl_scale_data *)
&mdp_pp.data.bl_scale_data);
break;
case mdp_op_ad_cfg:
ret = mdss_mdp_ad_config(mfd, &mdp_pp.data.ad_init_cfg);
break;
case mdp_op_ad_input:
ret = mdss_mdp_ad_input(mfd, &mdp_pp.data.ad_input, 1);
if (ret > 0) {
ret = 0;
copyback = 1;
}
break;
case mdp_op_calib_cfg:
ret = mdss_mdp_calib_config((struct mdp_calib_config_data *)
&mdp_pp.data.calib_cfg, ©back);
break;
case mdp_op_calib_mode:
ret = mdss_mdp_calib_mode(mfd, &mdp_pp.data.mdss_calib_cfg);
break;
case mdp_op_calib_buffer:
ret = mdss_mdp_calib_config_buffer(
(struct mdp_calib_config_buffer *)
&mdp_pp.data.calib_buffer, ©back);
break;
case mdp_op_calib_dcm_state:
ret = mdss_fb_dcm(mfd, mdp_pp.data.calib_dcm.dcm_state);
break;
default:
pr_err("Unsupported request to MDP_PP IOCTL. %d = op\n",
mdp_pp.op);
ret = -EINVAL;
break;
}
if ((ret == 0) && copyback)
ret = copy_to_user(argp, &mdp_pp, sizeof(struct msmfb_mdp_pp));
return ret;
}
static int mdss_mdp_histo_ioctl(struct msm_fb_data_type *mfd, u32 cmd,
void __user *argp)
{
int ret = -ENOSYS;
struct mdp_histogram_data hist;
struct mdp_histogram_start_req hist_req;
u32 block;
u32 pp_bus_handle;
static int req = -1;
if (mfd->panel_info->partial_update_enabled) {
pr_err("Partical update feature is enabled.");
return -EPERM;
}
switch (cmd) {
case MSMFB_HISTOGRAM_START:
if (!mfd->panel_power_on)
return -EPERM;
pp_bus_handle = mdss_mdp_get_mdata()->pp_bus_hdl;
req = msm_bus_scale_client_update_request(pp_bus_handle,
PP_CLK_CFG_ON);
if (req)
pr_err("Updated pp_bus_scale failed, ret = %d", req);
ret = copy_from_user(&hist_req, argp, sizeof(hist_req));
if (ret)
return ret;
ret = mdss_mdp_hist_start(&hist_req);
break;
case MSMFB_HISTOGRAM_STOP:
ret = copy_from_user(&block, argp, sizeof(int));
if (ret)
return ret;
ret = mdss_mdp_hist_stop(block);
if (ret)
return ret;
if (!req) {
pp_bus_handle = mdss_mdp_get_mdata()->pp_bus_hdl;
req = msm_bus_scale_client_update_request(pp_bus_handle,
PP_CLK_CFG_OFF);
if (req)
pr_err("Updated pp_bus_scale failed, ret = %d",
req);
}
break;
case MSMFB_HISTOGRAM:
if (!mfd->panel_power_on)
return -EPERM;
ret = copy_from_user(&hist, argp, sizeof(hist));
if (ret)
return ret;
ret = mdss_mdp_hist_collect(&hist);
if (!ret)
ret = copy_to_user(argp, &hist, sizeof(hist));
break;
default:
break;
}
return ret;
}
static int mdss_fb_set_metadata(struct msm_fb_data_type *mfd,
struct msmfb_metadata *metadata)
{
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret = 0;
if (!ctl)
return -EPERM;
switch (metadata->op) {
case metadata_op_vic:
if (mfd->panel_info)
mfd->panel_info->vic =
metadata->data.video_info_code;
else
ret = -EINVAL;
break;
case metadata_op_crc:
if (!mfd->panel_power_on)
return -EPERM;
ret = mdss_misr_set(mdata, &metadata->data.misr_request, ctl);
break;
case metadata_op_wb_format:
ret = mdss_mdp_wb_set_format(mfd,
metadata->data.mixer_cfg.writeback_format);
break;
case metadata_op_wb_secure:
ret = mdss_mdp_wb_set_secure(mfd, metadata->data.secure_en);
break;
default:
pr_warn("unsupported request to MDP META IOCTL\n");
ret = -EINVAL;
break;
}
return ret;
}
static int mdss_fb_get_hw_caps(struct msm_fb_data_type *mfd,
struct mdss_hw_caps *caps)
{
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
caps->mdp_rev = mdata->mdp_rev;
caps->vig_pipes = mdata->nvig_pipes;
caps->rgb_pipes = mdata->nrgb_pipes;
caps->dma_pipes = mdata->ndma_pipes;
if (mdata->has_bwc)
caps->features |= MDP_BWC_EN;
if (mdata->has_decimation)
caps->features |= MDP_DECIMATION_EN;
caps->max_smp_cnt = mdss_res->smp_mb_cnt;
caps->smp_per_pipe = mdata->smp_mb_per_pipe;
return 0;
}
static int mdss_fb_get_metadata(struct msm_fb_data_type *mfd,
struct msmfb_metadata *metadata)
{
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret = 0;
if (!ctl)
return -EPERM;
switch (metadata->op) {
case metadata_op_frame_rate:
metadata->data.panel_frame_rate =
mdss_panel_get_framerate(mfd->panel_info);
break;
case metadata_op_get_caps:
ret = mdss_fb_get_hw_caps(mfd, &metadata->data.caps);
break;
case metadata_op_crc:
if (!mfd->panel_power_on)
return -EPERM;
ret = mdss_misr_get(mdata, &metadata->data.misr_request, ctl);
break;
case metadata_op_wb_format:
ret = mdss_mdp_wb_get_format(mfd, &metadata->data.mixer_cfg);
break;
case metadata_op_wb_secure:
ret = mdss_mdp_wb_get_secure(mfd, &metadata->data.secure_en);
break;
default:
pr_warn("Unsupported request to MDP META IOCTL.\n");
ret = -EINVAL;
break;
}
return ret;
}
/*
* This routine serves two purposes.
* 1. Propagate overlay_id returned from sorted list to original list
* to user-space.
* 2. In case of error processing sorted list, map the error overlay's
* index to original list because user-space is not aware of the sorted list.
*/
static int __mdss_overlay_map(struct mdp_overlay *ovs,
struct mdp_overlay *op_ovs, int num_ovs, int num_ovs_processed)
{
int i = num_ovs_processed, j, k;
for (j = 0; j < num_ovs; j++) {
for (k = 0; k < num_ovs; k++) {
if ((ovs[j].dst_rect.x == op_ovs[k].dst_rect.x) &&
(ovs[j].z_order == op_ovs[k].z_order)) {
op_ovs[k].id = ovs[j].id;
op_ovs[k].priority = ovs[j].priority;
break;
}
}
if ((i != num_ovs) && (i != j) &&
(ovs[j].dst_rect.x == op_ovs[k].dst_rect.x) &&
(ovs[i].z_order == op_ovs[k].z_order)) {
pr_debug("mapped %d->%d\n", i, j);
i = j;
}
}
return i;
}
static inline void __overlay_swap_func(void *a, void *b, int size)
{
swap(*(struct mdp_overlay *)a, *(struct mdp_overlay *)b);
}
static inline int __zorder_dstx_cmp_func(const void *a, const void *b)
{
int rc = 0;
const struct mdp_overlay *ov1 = a;
const struct mdp_overlay *ov2 = b;
if (ov1->z_order < ov2->z_order)
rc = -1;
else if ((ov1->z_order == ov2->z_order) &&
(ov1->dst_rect.x < ov2->dst_rect.x))
rc = -1;
return rc;
}
/*
* first sort list of overlays based on z_order and then within
* same z_order sort them on dst_x.
*/
static int __mdss_overlay_src_split_sort(struct msm_fb_data_type *mfd,
struct mdp_overlay *ovs, int num_ovs)
{
int i;
int left_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
int right_lm_zo_cnt[MDSS_MDP_MAX_STAGE] = {0};
u32 left_lm_w = left_lm_w_from_mfd(mfd);
sort(ovs, num_ovs, sizeof(struct mdp_overlay), __zorder_dstx_cmp_func,
__overlay_swap_func);
for (i = 0; i < num_ovs; i++) {
if (ovs[i].dst_rect.x < left_lm_w) {
if (left_lm_zo_cnt[ovs[i].z_order] == 2) {
pr_err("more than 2 ov @ stage%d on left lm\n",
ovs[i].z_order);
return -EINVAL;
}
left_lm_zo_cnt[ovs[i].z_order]++;
} else {
if (right_lm_zo_cnt[ovs[i].z_order] == 2) {
pr_err("more than 2 ov @ stage%d on right lm\n",
ovs[i].z_order);
return -EINVAL;
}
right_lm_zo_cnt[ovs[i].z_order]++;
}
}
return 0;
}
static int __handle_overlay_prepare(struct msm_fb_data_type *mfd,
struct mdp_overlay_list *ovlist, struct mdp_overlay *ip_ovs)
{
int ret, i;
int new_reqs = 0, left_cnt = 0, right_cnt = 0;
int num_ovs = ovlist->num_overlays;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
u32 left_lm_ovs = 0, right_lm_ovs = 0;
bool is_single_layer = false;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdp_overlay *sorted_ovs = NULL;
struct mdp_overlay *req, *prev_req;
struct mdss_mdp_pipe *pipe, *left_blend_pipe;
struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = { 0 };
struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = { 0 };
bool sort_needed = mdata->has_src_split && (num_ovs > 1);
ret = mutex_lock_interruptible(&mdp5_data->ov_lock);
if (ret)
return ret;
if (!mfd->panel_power_on) {
mutex_unlock(&mdp5_data->ov_lock);
return -EPERM;
}
if (sort_needed) {
sorted_ovs = kzalloc(num_ovs * sizeof(*ip_ovs), GFP_KERNEL);
if (!sorted_ovs) {
pr_err("error allocating ovlist mem\n");
return -ENOMEM;
}
memcpy(sorted_ovs, ip_ovs, num_ovs * sizeof(*ip_ovs));
ret = __mdss_overlay_src_split_sort(mfd, sorted_ovs, num_ovs);
if (ret) {
pr_err("src_split_sort failed. ret=%d\n", ret);
kfree(sorted_ovs);
return ret;
}
}
pr_debug("prepare fb%d num_ovs=%d\n", mfd->index, num_ovs);
for (i = 0; i < num_ovs; i++) {
if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
left_lm_w))
right_lm_ovs++;
else
left_lm_ovs++;
if ((left_lm_ovs > 1) && (right_lm_ovs > 1))
break;
}
for (i = 0; i < num_ovs; i++) {
left_blend_pipe = NULL;
if (sort_needed) {
req = &sorted_ovs[i];
prev_req = (i > 0) ? &sorted_ovs[i - 1] : NULL;
/*
* check if current overlay is at same z_order as
* previous one and qualifies as a right blend. If yes,
* pass a pointer to the pipe representing previous
* overlay or in other terms left blend overlay.
*/
if (prev_req && (prev_req->z_order == req->z_order) &&
is_ov_right_blend(&prev_req->dst_rect,
&req->dst_rect, left_lm_w)) {
left_blend_pipe = pipe;
}
} else {
req = &ip_ovs[i];
}
if (IS_RIGHT_MIXER_OV(ip_ovs[i].flags, ip_ovs[i].dst_rect.x,
left_lm_w))
is_single_layer = (right_lm_ovs == 1);
else
is_single_layer = (left_lm_ovs == 1);
req->z_order += MDSS_MDP_STAGE_0;
ret = mdss_mdp_overlay_pipe_setup(mfd, req, &pipe,
left_blend_pipe, is_single_layer);
req->z_order -= MDSS_MDP_STAGE_0;
if (IS_ERR_VALUE(ret))
goto validate_exit;
pr_debug("pnum:%d id:0x%x flags:0x%x dst_x:%d l_blend_pnum%d\n",
pipe->num, req->id, req->flags, req->dst_rect.x,
left_blend_pipe ? left_blend_pipe->num : -1);
/* keep track of the new overlays to unset in case of errors */
if (pipe->play_cnt == 0)
new_reqs |= pipe->ndx;
if (IS_RIGHT_MIXER_OV(pipe->flags, pipe->dst.x, left_lm_w)) {
if (right_cnt >= MAX_PIPES_PER_LM) {
pr_err("too many pipes on right mixer\n");
ret = -EINVAL;
goto validate_exit;
}
right_plist[right_cnt] = pipe;
right_cnt++;
} else {
if (left_cnt >= MAX_PIPES_PER_LM) {
pr_err("too many pipes on left mixer\n");
ret = -EINVAL;
goto validate_exit;
}
left_plist[left_cnt] = pipe;
left_cnt++;
}
}
ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
right_plist, right_cnt);
validate_exit:
if (sort_needed)
ovlist->processed_overlays =
__mdss_overlay_map(sorted_ovs, ip_ovs, num_ovs, i);
else
ovlist->processed_overlays = i;
if (IS_ERR_VALUE(ret)) {
pr_debug("err=%d total_ovs:%d processed:%d left:%d right:%d\n",
ret, num_ovs, ovlist->processed_overlays, left_lm_ovs,
right_lm_ovs);
mdss_mdp_overlay_release(mfd, new_reqs);
}
mutex_unlock(&mdp5_data->ov_lock);
kfree(sorted_ovs);
return ret;
}
static int __handle_ioctl_overlay_prepare(struct msm_fb_data_type *mfd,
void __user *argp)
{
struct mdp_overlay_list ovlist;
struct mdp_overlay *req_list[OVERLAY_MAX];
struct mdp_overlay *overlays;
int i, ret;
if (copy_from_user(&ovlist, argp, sizeof(ovlist)))
return -EFAULT;
if (ovlist.num_overlays >= OVERLAY_MAX) {
pr_err("Number of overlays exceeds max\n");
return -EINVAL;
}
overlays = kmalloc(ovlist.num_overlays * sizeof(*overlays), GFP_KERNEL);
if (!overlays) {
pr_err("Unable to allocate memory for overlays\n");
return -ENOMEM;
}
if (copy_from_user(req_list, ovlist.overlay_list,
sizeof(struct mdp_overlay*) * ovlist.num_overlays)) {
ret = -EFAULT;
goto validate_exit;
}
for (i = 0; i < ovlist.num_overlays; i++) {
if (copy_from_user(overlays + i, req_list[i],
sizeof(struct mdp_overlay))) {
ret = -EFAULT;
goto validate_exit;
}
}
ret = __handle_overlay_prepare(mfd, &ovlist, overlays);
if (!IS_ERR_VALUE(ret)) {
for (i = 0; i < ovlist.num_overlays; i++) {
if (copy_to_user(req_list[i], overlays + i,
sizeof(struct mdp_overlay))) {
ret = -EFAULT;
goto validate_exit;
}
}
}
if (copy_to_user(argp, &ovlist, sizeof(ovlist)))
ret = -EFAULT;
validate_exit:
kfree(overlays);
return ret;
}
static int mdss_mdp_overlay_ioctl_handler(struct msm_fb_data_type *mfd,
u32 cmd, void __user *argp)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdp_overlay *req = NULL;
int val, ret = -ENOSYS;
struct msmfb_metadata metadata;
struct mdss_panel_data *pdata;
switch (cmd) {
case MSMFB_MDP_PP:
ret = mdss_mdp_pp_ioctl(mfd, argp);
break;
case MSMFB_HISTOGRAM_START:
case MSMFB_HISTOGRAM_STOP:
case MSMFB_HISTOGRAM:
ret = mdss_mdp_histo_ioctl(mfd, cmd, argp);
break;
case MSMFB_OVERLAY_GET:
req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = copy_from_user(req, argp, sizeof(*req));
if (!ret) {
ret = mdss_mdp_overlay_get(mfd, req);
if (!IS_ERR_VALUE(ret))
ret = copy_to_user(argp, req, sizeof(*req));
}
if (ret)
pr_debug("OVERLAY_GET failed (%d)\n", ret);
break;
case MSMFB_OVERLAY_SET:
req = kmalloc(sizeof(struct mdp_overlay), GFP_KERNEL);
if (!req)
return -ENOMEM;
ret = copy_from_user(req, argp, sizeof(*req));
if (!ret) {
ret = mdss_mdp_overlay_set(mfd, req);
if (!IS_ERR_VALUE(ret))
ret = copy_to_user(argp, req, sizeof(*req));
}
if (ret)
pr_debug("OVERLAY_SET failed (%d)\n", ret);
break;
case MSMFB_OVERLAY_UNSET:
if (!IS_ERR_VALUE(copy_from_user(&val, argp, sizeof(val))))
ret = mdss_mdp_overlay_unset(mfd, val);
break;
case MSMFB_OVERLAY_PLAY_ENABLE:
if (!copy_from_user(&val, argp, sizeof(val))) {
mdp5_data->overlay_play_enable = val;
ret = 0;
} else {
pr_err("OVERLAY_PLAY_ENABLE failed (%d)\n", ret);
ret = -EFAULT;
}
break;
case MSMFB_OVERLAY_PLAY:
if (mdp5_data->overlay_play_enable) {
struct msmfb_overlay_data data;
ret = copy_from_user(&data, argp, sizeof(data));
if (!ret)
ret = mdss_mdp_overlay_play(mfd, &data);
if (ret)
pr_debug("OVERLAY_PLAY failed (%d)\n", ret);
} else {
ret = 0;
}
break;
case MSMFB_OVERLAY_PLAY_WAIT:
if (mdp5_data->overlay_play_enable) {
struct msmfb_overlay_data data;
ret = copy_from_user(&data, argp, sizeof(data));
if (!ret)
ret = mdss_mdp_overlay_play_wait(mfd, &data);
if (ret)
pr_err("OVERLAY_PLAY_WAIT failed (%d)\n", ret);
} else {
ret = 0;
}
break;
case MSMFB_VSYNC_CTRL:
case MSMFB_OVERLAY_VSYNC_CTRL:
if (!copy_from_user(&val, argp, sizeof(val))) {
ret = mdss_mdp_overlay_vsync_ctrl(mfd, val);
} else {
pr_err("MSMFB_OVERLAY_VSYNC_CTRL failed (%d)\n", ret);
ret = -EFAULT;
}
break;
case MSMFB_OVERLAY_COMMIT:
mdss_fb_wait_for_fence(&(mfd->mdp_sync_pt_data));
ret = mfd->mdp.kickoff_fnc(mfd, NULL);
break;
case MSMFB_METADATA_SET:
ret = copy_from_user(&metadata, argp, sizeof(metadata));
if (ret)
return ret;
ret = mdss_fb_set_metadata(mfd, &metadata);
break;
case MSMFB_METADATA_GET:
ret = copy_from_user(&metadata, argp, sizeof(metadata));
if (ret)
return ret;
ret = mdss_fb_get_metadata(mfd, &metadata);
if (!ret)
ret = copy_to_user(argp, &metadata, sizeof(metadata));
break;
case MSMFB_OVERLAY_PREPARE:
ret = __handle_ioctl_overlay_prepare(mfd, argp);
break;
default:
if (mfd->panel.type == WRITEBACK_PANEL)
ret = mdss_mdp_wb_ioctl_handler(mfd, cmd, argp);
else if (mfd->panel.type == MIPI_VIDEO_PANEL ||
mfd->panel.type == MIPI_CMD_PANEL) {
pdata = dev_get_platdata(&mfd->pdev->dev);
if (!pdata)
return -EFAULT;
mutex_lock(&mdp5_data->ov_lock);
ret = mdss_dsi_ioctl_handler(pdata, cmd, argp);
mutex_unlock(&mdp5_data->ov_lock);
}
break;
}
kfree(req);
return ret;
}
/**
* __mdss_mdp_overlay_ctl_init - Helper function to intialize control structure
* @mfd: msm frame buffer data structure associated with the fb device.
*
* Helper function that allocates and initializes the mdp control structure
* for a frame buffer device. Whenver applicable, this function will also setup
* the control for the split display path as well.
*
* Return: pointer to the newly allocated control structure.
*/
static struct mdss_mdp_ctl *__mdss_mdp_overlay_ctl_init(
struct msm_fb_data_type *mfd)
{
int rc = 0;
struct mdss_mdp_ctl *ctl;
struct mdss_panel_data *pdata;
if (!mfd)
return ERR_PTR(-EINVAL);
pdata = dev_get_platdata(&mfd->pdev->dev);
if (!pdata) {
pr_err("no panel connected for fb%d\n", mfd->index);
rc = -ENODEV;
goto error;
}
ctl = mdss_mdp_ctl_init(pdata, mfd);
if (IS_ERR_OR_NULL(ctl)) {
pr_err("Unable to initialize ctl for fb%d\n",
mfd->index);
rc = PTR_ERR(ctl);
goto error;
}
ctl->vsync_handler.vsync_handler =
mdss_mdp_overlay_handle_vsync;
ctl->vsync_handler.cmd_post_flush = false;
if (mfd->split_display && pdata->next) {
/* enable split display */
rc = mdss_mdp_ctl_split_display_setup(ctl, pdata->next);
if (rc) {
mdss_mdp_ctl_destroy(ctl);
goto error;
}
}
error:
if (rc)
return ERR_PTR(rc);
else
return ctl;
}
static int mdss_mdp_overlay_on(struct msm_fb_data_type *mfd)
{
int rc;
struct mdss_overlay_private *mdp5_data;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_ctl *ctl = NULL;
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data)
return -EINVAL;
if (!mdp5_data->ctl) {
ctl = __mdss_mdp_overlay_ctl_init(mfd);
if (IS_ERR_OR_NULL(ctl))
return PTR_ERR(ctl);
mdp5_data->ctl = ctl;
}
mdss_mdp_footswitch_ctrl(mdata, true);
if (!mfd->panel_info->cont_splash_enabled &&
(mfd->panel_info->type != DTV_PANEL)) {
rc = mdss_mdp_overlay_start(mfd);
if (rc)
goto error_pm;
if (mfd->panel_info->type != WRITEBACK_PANEL) {
if (mfd->quickdraw_in_progress) {
/* In quickdraw, only turn the panel on, don't
kickoff so that we preserve the panel's
contents */
ctl = mfd_to_ctl(mfd);
if (ctl->panel_on_locked) {
mutex_lock(&ctl->lock);
ctl->panel_on_locked(ctl);
mutex_unlock(&ctl->lock);
}
} else
rc = mdss_mdp_overlay_kickoff(mfd, NULL);
}
} else {
rc = mdss_mdp_ctl_setup(mdp5_data->ctl);
if (rc)
goto error_pm;
}
if (IS_ERR_VALUE(rc)) {
pr_err("Failed to turn on fb%d\n", mfd->index);
mdss_mdp_overlay_off(mfd);
goto end;
}
error_pm:
if (rc)
mdss_mdp_footswitch_ctrl(mdata, false);
end:
return rc;
}
static int mdss_mdp_overlay_off(struct msm_fb_data_type *mfd)
{
int rc;
struct mdss_overlay_private *mdp5_data;
struct mdss_mdp_mixer *mixer;
struct mdss_data_type *mdata;
int need_cleanup;
if (!mfd)
return -ENODEV;
if (mfd->key != MFD_KEY)
return -EINVAL;
mdp5_data = mfd_to_mdp5_data(mfd);
mdata = mfd_to_mdata(mfd);
if (!mdp5_data || !mdp5_data->ctl) {
pr_err("ctl not initialized\n");
return -ENODEV;
}
if (!mdp5_data->ctl->power_on)
return 0;
if (mdp5_data->mdata->idle_pc) {
mdss_mdp_footswitch_ctrl_idle_pc(1, &mfd->pdev->dev);
mdss_mdp_ctl_restore(mdp5_data->ctl);
}
mutex_lock(&mdp5_data->ov_lock);
mdss_mdp_overlay_free_fb_pipe(mfd);
mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (mixer)
mixer->cursor_enabled = 0;
mixer = mdss_mdp_mixer_get(mdp5_data->ctl, MDSS_MDP_MIXER_MUX_RIGHT);
if (mixer)
mixer->cursor_enabled = 0;
mutex_lock(&mdp5_data->list_lock);
need_cleanup = !list_empty(&mdp5_data->pipes_cleanup);
mutex_unlock(&mdp5_data->list_lock);
mutex_unlock(&mdp5_data->ov_lock);
if (need_cleanup) {
pr_debug("cleaning up pipes on fb%d\n", mfd->index);
mdss_mdp_overlay_kickoff(mfd, NULL);
}
/*
* If retire fences are still active wait for a vsync time
* for retire fence to be updated.
* As a last resort signal the timeline if vsync doesn't arrive.
*/
if (mdp5_data->retire_cnt) {
u32 fps = mdss_panel_get_framerate(mfd->panel_info);
u32 vsync_time = 1000 / (fps ? : DEFAULT_FRAME_RATE);
msleep(vsync_time);
__vsync_retire_signal(mfd, mdp5_data->retire_cnt);
}
mutex_lock(&mdp5_data->ov_lock);
rc = mdss_mdp_ctl_stop(mdp5_data->ctl);
if (rc == 0) {
mutex_lock(&mdp5_data->list_lock);
__mdss_mdp_overlay_free_list_purge(mfd);
mutex_unlock(&mdp5_data->list_lock);
mdss_mdp_ctl_notifier_unregister(mdp5_data->ctl,
&mfd->mdp_sync_pt_data.notifier);
if (!mfd->ref_cnt) {
mdp5_data->borderfill_enable = false;
mdss_mdp_ctl_destroy(mdp5_data->ctl);
mdp5_data->ctl = NULL;
}
if (atomic_dec_return(&ov_active_panels) == 0)
mdss_mdp_rotator_release_all();
mdss_mdp_footswitch_ctrl(mdata, false);
}
mutex_unlock(&mdp5_data->ov_lock);
return rc;
}
int mdss_panel_register_done(struct mdss_panel_data *pdata)
{
if (pdata->panel_info.cont_splash_enabled)
mdss_mdp_footswitch_ctrl_splash(1);
return 0;
}
static int __mdss_mdp_ctl_handoff(struct mdss_mdp_ctl *ctl,
struct mdss_data_type *mdata)
{
int rc = 0;
int i, j;
u32 mixercfg;
struct mdss_mdp_pipe *pipe = NULL;
if (!ctl || !mdata)
return -EINVAL;
for (i = 0; i < mdata->nmixers_intf; i++) {
mixercfg = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_LAYER(i));
pr_debug("for lm%d mixercfg = 0x%09x\n", i, mixercfg);
j = MDSS_MDP_SSPP_VIG0;
for (; j < MDSS_MDP_MAX_SSPP && mixercfg; j++) {
u32 cfg = j * 3;
if ((j == MDSS_MDP_SSPP_VIG3) ||
(j == MDSS_MDP_SSPP_RGB3)) {
/* Add 2 to account for Cursor & Border bits */
cfg += 2;
}
if (mixercfg & (0x7 << cfg)) {
pr_debug("Pipe %d staged\n", j);
pipe = mdss_mdp_pipe_search(mdata, BIT(j));
if (!pipe) {
pr_warn("Invalid pipe %d staged\n", j);
continue;
}
rc = mdss_mdp_pipe_handoff(pipe);
if (rc) {
pr_err("Failed to handoff pipe%d\n",
pipe->num);
goto exit;
}
rc = mdss_mdp_mixer_handoff(ctl, i, pipe);
if (rc) {
pr_err("failed to handoff mix%d\n", i);
goto exit;
}
}
}
}
exit:
return rc;
}
/**
* mdss_mdp_overlay_handoff() - Read MDP registers to handoff an active ctl path
* @mfd: Msm frame buffer structure associated with the fb device.
*
* This function populates the MDP software structures with the current state of
* the MDP hardware to handoff any active control path for the framebuffer
* device. This is needed to identify any ctl, mixers and pipes being set up by
* the bootloader to display the splash screen when the continuous splash screen
* feature is enabled in kernel.
*/
static int mdss_mdp_overlay_handoff(struct msm_fb_data_type *mfd)
{
int rc = 0;
struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_ctl *ctl = NULL;
struct mdss_mdp_ctl *sctl = NULL;
if (!mdp5_data->ctl) {
ctl = __mdss_mdp_overlay_ctl_init(mfd);
if (IS_ERR_OR_NULL(ctl)) {
rc = PTR_ERR(ctl);
goto error;
}
mdp5_data->ctl = ctl;
}
/*
* vsync interrupt needs on during continuous splash, this is
* to initialize necessary ctl members here.
*/
rc = mdss_mdp_ctl_start(ctl, true);
if (rc) {
pr_err("Failed to initialize ctl\n");
goto error;
}
ctl->clk_rate = mdss_mdp_get_clk_rate(MDSS_CLK_MDP_SRC);
pr_debug("Set the ctl clock rate to %d Hz\n", ctl->clk_rate);
rc = __mdss_mdp_ctl_handoff(ctl, mdata);
if (rc) {
pr_err("primary ctl handoff failed. rc=%d\n", rc);
goto error;
}
if (mfd->split_display) {
sctl = mdss_mdp_get_split_ctl(ctl);
if (!sctl) {
pr_err("cannot get secondary ctl. fail the handoff\n");
rc = -EPERM;
goto error;
}
rc = __mdss_mdp_ctl_handoff(sctl, mdata);
if (rc) {
pr_err("secondary ctl handoff failed. rc=%d\n", rc);
goto error;
}
}
rc = mdss_mdp_smp_handoff(mdata);
if (rc)
pr_err("Failed to handoff smps\n");
mdp5_data->handoff = true;
error:
if (rc && ctl) {
mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_RGB);
mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_VIG);
mdss_mdp_handoff_cleanup_pipes(mfd, MDSS_MDP_PIPE_TYPE_DMA);
mdss_mdp_ctl_destroy(ctl);
mdp5_data->ctl = NULL;
mdp5_data->handoff = false;
}
return rc;
}
static void __vsync_retire_handle_vsync(struct mdss_mdp_ctl *ctl, ktime_t t)
{
struct msm_fb_data_type *mfd = ctl->mfd;
struct mdss_overlay_private *mdp5_data;
if (!mfd || !mfd->mdp.private1) {
pr_warn("Invalid handle for vsync\n");
return;
}
mdp5_data = mfd_to_mdp5_data(mfd);
schedule_work(&mdp5_data->retire_work);
}
static void __vsync_retire_work_handler(struct work_struct *work)
{
struct mdss_overlay_private *mdp5_data =
container_of(work, typeof(*mdp5_data), retire_work);
if (!mdp5_data->ctl || !mdp5_data->ctl->mfd)
return;
if (!mdp5_data->ctl->remove_vsync_handler)
return;
__vsync_retire_signal(mdp5_data->ctl->mfd, 1);
}
static void __vsync_retire_signal(struct msm_fb_data_type *mfd, int val)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
if (mdp5_data->retire_cnt > 0) {
sw_sync_timeline_inc(mdp5_data->vsync_timeline, val);
mdp5_data->retire_cnt -= min(val, mdp5_data->retire_cnt);
if (mdp5_data->retire_cnt == 0) {
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false);
mdp5_data->ctl->remove_vsync_handler(mdp5_data->ctl,
&mdp5_data->vsync_retire_handler);
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false);
}
}
mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
}
static struct sync_fence *
__vsync_retire_get_fence(struct msm_sync_pt_data *sync_pt_data)
{
struct msm_fb_data_type *mfd;
struct mdss_overlay_private *mdp5_data;
struct mdss_mdp_ctl *ctl;
int value;
mfd = container_of(sync_pt_data, typeof(*mfd), mdp_sync_pt_data);
mdp5_data = mfd_to_mdp5_data(mfd);
if (!mdp5_data || !mdp5_data->ctl)
return ERR_PTR(-ENODEV);
ctl = mdp5_data->ctl;
if (!ctl->add_vsync_handler)
return ERR_PTR(-EOPNOTSUPP);
if (!ctl->power_on) {
pr_debug("fb%d vsync pending first update\n", mfd->index);
return ERR_PTR(-EPERM);
}
value = mdp5_data->vsync_timeline->value + 1 + mdp5_data->retire_cnt;
mdp5_data->retire_cnt++;
return mdss_fb_sync_get_fence(mdp5_data->vsync_timeline,
"mdp-retire", value);
}
static int __vsync_set_vsync_handler(struct msm_fb_data_type *mfd)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
struct mdss_mdp_ctl *ctl;
int rc;
ctl = mdp5_data->ctl;
if (!mdp5_data->retire_cnt ||
mdp5_data->vsync_retire_handler.enabled)
return 0;
if (!ctl->add_vsync_handler)
return -EOPNOTSUPP;
if (!ctl->power_on) {
pr_debug("fb%d vsync pending first update\n", mfd->index);
return -EPERM;
}
rc = ctl->add_vsync_handler(ctl,
&mdp5_data->vsync_retire_handler);
return rc;
}
static int __vsync_retire_setup(struct msm_fb_data_type *mfd)
{
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
char name[24];
snprintf(name, sizeof(name), "mdss_fb%d_retire", mfd->index);
mdp5_data->vsync_timeline = sw_sync_timeline_create(name);
if (mdp5_data->vsync_timeline == NULL) {
pr_err("cannot vsync create time line");
return -ENOMEM;
}
mfd->mdp_sync_pt_data.get_retire_fence = __vsync_retire_get_fence;
mdp5_data->vsync_retire_handler.vsync_handler =
__vsync_retire_handle_vsync;
mdp5_data->vsync_retire_handler.cmd_post_flush = false;
INIT_WORK(&mdp5_data->retire_work, __vsync_retire_work_handler);
return 0;
}
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd)
{
struct device *dev = mfd->fbi->dev;
struct msm_mdp_interface *mdp5_interface = &mfd->mdp;
struct mdss_overlay_private *mdp5_data = NULL;
int rc;
mdp5_interface->on_fnc = mdss_mdp_overlay_on;
mdp5_interface->off_fnc = mdss_mdp_overlay_off;
mdp5_interface->release_fnc = __mdss_mdp_overlay_release_all;
mdp5_interface->do_histogram = NULL;
mdp5_interface->cursor_update = mdss_mdp_hw_cursor_update;
mdp5_interface->dma_fnc = mdss_mdp_overlay_pan_display;
mdp5_interface->ioctl_handler = mdss_mdp_overlay_ioctl_handler;
mdp5_interface->panel_register_done = mdss_panel_register_done;
mdp5_interface->kickoff_fnc = mdss_mdp_overlay_kickoff;
mdp5_interface->get_sync_fnc = mdss_mdp_rotator_sync_pt_get;
mdp5_interface->splash_init_fnc = mdss_mdp_splash_init;
mdp5_data = kzalloc(sizeof(struct mdss_overlay_private), GFP_KERNEL);
if (!mdp5_data) {
pr_err("fail to allocate mdp5 private data structure");
return -ENOMEM;
}
INIT_LIST_HEAD(&mdp5_data->pipes_used);
INIT_LIST_HEAD(&mdp5_data->pipes_cleanup);
INIT_LIST_HEAD(&mdp5_data->rot_proc_list);
mutex_init(&mdp5_data->list_lock);
mutex_init(&mdp5_data->ov_lock);
mdp5_data->hw_refresh = true;
mdp5_data->overlay_play_enable = true;
mdp5_data->mdata = dev_get_drvdata(mfd->pdev->dev.parent);
if (!mdp5_data->mdata) {
pr_err("unable to initialize overlay for fb%d\n", mfd->index);
rc = -ENODEV;
goto init_fail;
}
mfd->mdp.private1 = mdp5_data;
mfd->wait_for_kickoff = true;
if (mfd->panel_info->partial_update_enabled && mfd->split_display)
mdp5_data->mdata->has_src_split = false;
rc = mdss_mdp_overlay_fb_parse_dt(mfd);
if (rc)
return rc;
rc = sysfs_create_group(&dev->kobj, &mdp_overlay_sysfs_group);
if (rc) {
pr_err("vsync sysfs group creation failed, ret=%d\n", rc);
goto init_fail;
}
mdp5_data->vsync_event_sd = sysfs_get_dirent(dev->kobj.sd, NULL,
"vsync_event");
if (!mdp5_data->vsync_event_sd) {
pr_err("vsync_event sysfs lookup failed\n");
rc = -ENODEV;
goto init_fail;
}
rc = sysfs_create_link_nowarn(&dev->kobj,
&mdp5_data->mdata->pdev->dev.kobj, "mdp");
if (rc)
pr_warn("problem creating link to mdp sysfs\n");
rc = sysfs_create_link_nowarn(&dev->kobj,
&mfd->pdev->dev.kobj, "mdss_fb");
if (rc)
pr_warn("problem creating link to mdss_fb sysfs\n");
if (mfd->panel_info->type == MIPI_VIDEO_PANEL) {
rc = sysfs_create_group(&dev->kobj,
&dynamic_fps_fs_attrs_group);
if (rc) {
pr_err("Error dfps sysfs creation ret=%d\n", rc);
goto init_fail;
}
} else if (mfd->panel_info->type == MIPI_CMD_PANEL) {
rc = __vsync_retire_setup(mfd);
if (IS_ERR_VALUE(rc)) {
pr_err("unable to create vsync timeline\n");
goto init_fail;
}
}
if (mfd->panel_info->type == MIPI_CMD_PANEL) {
rc = sysfs_create_group(&dev->kobj,
&factory_te_attrs_group);
if (rc) {
pr_err("Error factory te sysfs creation ret=%d\n", rc);
goto init_fail;
}
}
if (mfd->panel_info->hbm_feature_enabled) {
rc = sysfs_create_group(&dev->kobj,
&hbm_attrs_group);
if (rc) {
pr_err("Error for HBM sysfs creation ret = %d\n", rc);
goto init_fail;
}
}
mfd->mdp_sync_pt_data.async_wait_fences = true;
kobject_uevent(&dev->kobj, KOBJ_ADD);
pr_debug("vsync kobject_uevent(KOBJ_ADD)\n");
mdp5_data->cpu_pm_hdl = add_event_timer(NULL, (void *)mdp5_data);
if (!mdp5_data->cpu_pm_hdl)
pr_warn("%s: unable to add event timer\n", __func__);
if (mfd->panel_info->cont_splash_enabled) {
rc = mdss_mdp_overlay_handoff(mfd);
if (rc) {
/*
* Even though handoff failed, it is not fatal.
* MDP can continue, just that we would have a longer
* delay in transitioning from splash screen to boot
* animation
*/
pr_warn("Overlay handoff failed for fb%d. rc=%d\n",
mfd->index, rc);
rc = 0;
}
}
if (mfd->index == 0 && mfd->panel_info->quickdraw_enabled)
mdss_quickdraw_register(mfd);
return rc;
init_fail:
kfree(mdp5_data);
return rc;
}
static int mdss_mdp_overlay_fb_parse_dt(struct msm_fb_data_type *mfd)
{
int rc = 0;
struct platform_device *pdev = mfd->pdev;
struct mdss_overlay_private *mdp5_mdata = mfd_to_mdp5_data(mfd);
mdp5_mdata->mixer_swap = of_property_read_bool(pdev->dev.of_node,
"qcom,mdss-mixer-swap");
if (mdp5_mdata->mixer_swap) {
pr_info("mixer swap is enabled for fb device=%s\n",
pdev->name);
}
mdp5_mdata->fb_rot_180 = of_property_read_bool(pdev->dev.of_node,
"qcom,mdss-fb-rot-180");
if (mdp5_mdata->fb_rot_180) {
pr_info("180 degree rotation is enabled for fb device=%s\n",
pdev->name);
}
return rc;
}
|
W4TCH0UT/zz_quark
|
drivers/video/msm/mdss/mdss_mdp_overlay.c
|
C
|
gpl-2.0
| 103,452
|
<?php
namespace T3Monitor\T3monitoring\ViewHelpers\Format;
/*
* This file is part of the t3monitoring extension for TYPO3 CMS.
*
* For the full copyright and license information, please read the
* LICENSE.txt file that was distributed with this source code.
*/
use T3Monitor\T3monitoring\Domain\Model\Extension;
use TYPO3Fluid\Fluid\Core\Rendering\RenderingContextInterface;
use TYPO3Fluid\Fluid\Core\ViewHelper\AbstractViewHelper;
/**
* Class ExtensionStateViewHelper
*/
class ExtensionStateViewHelper extends AbstractViewHelper
{
public function initializeArguments()
{
parent::initializeArguments();
$this->registerArgument('state', 'int', 'state', false, 0);
}
public static function renderStatic(array $arguments, \Closure $renderChildrenClosure, RenderingContextInterface $renderingContext)
{
$state = $arguments['state'] ?: $renderChildrenClosure();
$stateString = '';
if (isset(Extension::$defaultStates[$state])) {
$stateString = Extension::$defaultStates[$state];
}
return $stateString;
}
}
|
georgringer/t3monitoring
|
Classes/ViewHelpers/Format/ExtensionStateViewHelper.php
|
PHP
|
gpl-2.0
| 1,105
|
/*
* To change this template, choose Tools | Templates
* and open the template in the editor.
*/
package zprojectsupdater.models;
import org.json.JSONObject;
/**
*
* @author m4tuu
*/
public class Archivo {
private int id;
private String url, ruta;
public int getId() {
return id;
}
public void setId(int id) {
this.id = id;
}
public String getRuta() {
return ruta;
}
public void setRuta(String ruta) {
this.ruta = ruta;
}
public String getUrl() {
return url;
}
public void setUrl(String url) {
this.url = url;
}
public Archivo(int id, String url, String ruta) {
this.id = id;
this.url = url;
this.ruta = ruta;
}
public Archivo() {
}
public Archivo(JSONObject json) {
try {
this.id = json.getInt("id");
this.ruta = json.getString("ruta");
this.url = json.getString("url");
} catch (Exception e) {
}
}
}
|
infoINGenieria/zprojectsUpdater
|
ZProjectsUpdater/src/zprojectsupdater/models/Archivo.java
|
Java
|
gpl-2.0
| 1,030
|
# Makefile for the Linux video drivers.
# 5 Aug 1999, James Simmons, <mailto:jsimmons@users.sf.net>
# Rewritten to use lists instead of if-statements.
# Each configuration option enables a list of files.
obj-$(CONFIG_VGASTATE) += vgastate.o
obj-y += fb_notify.o
obj-$(CONFIG_FB) += fb.o
fb-y := fbmem.o fbmon.o fbcmap.o fbsysfs.o \
modedb.o fbcvt.o
fb-objs := $(fb-y)
obj-$(CONFIG_VT) += console/
obj-$(CONFIG_LOGO) += logo/
obj-y += backlight/ display/
obj-$(CONFIG_FB_CFB_FILLRECT) += cfbfillrect.o
obj-$(CONFIG_FB_CFB_COPYAREA) += cfbcopyarea.o
obj-$(CONFIG_FB_CFB_IMAGEBLIT) += cfbimgblt.o
obj-$(CONFIG_FB_SYS_FILLRECT) += sysfillrect.o
obj-$(CONFIG_FB_SYS_COPYAREA) += syscopyarea.o
obj-$(CONFIG_FB_SYS_IMAGEBLIT) += sysimgblt.o
obj-$(CONFIG_FB_SYS_FOPS) += fb_sys_fops.o
obj-$(CONFIG_FB_SVGALIB) += svgalib.o
obj-$(CONFIG_FB_MACMODES) += macmodes.o
obj-$(CONFIG_FB_DDC) += fb_ddc.o
obj-$(CONFIG_FB_DEFERRED_IO) += fb_defio.o
obj-$(CONFIG_FB_WMT_GE_ROPS) += wmt_ge_rops.o
# Hardware specific drivers go first
obj-$(CONFIG_FB_LOONGSON1) += ls1xfb.o
obj-$(CONFIG_FB_LS1X_I2C) += ls1xfb-i2c.o
obj-$(CONFIG_FB_AMIGA) += amifb.o c2p_planar.o
obj-$(CONFIG_FB_ARC) += arcfb.o
obj-$(CONFIG_FB_SSD1305) += ssd1305fb.o
obj-$(CONFIG_FB_ST7565) += st7565fb.o
obj-$(CONFIG_FB_ST7920) += st7920fb.o
obj-$(CONFIG_FB_CLPS711X) += clps711xfb.o
obj-$(CONFIG_FB_CYBER2000) += cyber2000fb.o
obj-$(CONFIG_FB_PM2) += pm2fb.o
obj-$(CONFIG_FB_PM3) += pm3fb.o
obj-$(CONFIG_FB_MATROX) += matrox/
obj-$(CONFIG_FB_RIVA) += riva/
obj-$(CONFIG_FB_NVIDIA) += nvidia/
obj-$(CONFIG_FB_ATY) += aty/ macmodes.o
obj-$(CONFIG_FB_ATY128) += aty/ macmodes.o
obj-$(CONFIG_FB_RADEON) += aty/
obj-$(CONFIG_FB_SIS) += sis/
obj-$(CONFIG_FB_VIA) += via/
obj-$(CONFIG_FB_KYRO) += kyro/
obj-$(CONFIG_FB_SAVAGE) += savage/
obj-$(CONFIG_FB_GEODE) += geode/
obj-$(CONFIG_FB_MBX) += mbx/
obj-$(CONFIG_FB_NEOMAGIC) += neofb.o
obj-$(CONFIG_FB_3DFX) += tdfxfb.o
obj-$(CONFIG_FB_CONTROL) += controlfb.o
obj-$(CONFIG_FB_PLATINUM) += platinumfb.o
obj-$(CONFIG_FB_VALKYRIE) += valkyriefb.o
obj-$(CONFIG_FB_CT65550) += chipsfb.o
obj-$(CONFIG_FB_IMSTT) += imsttfb.o
obj-$(CONFIG_FB_FM2) += fm2fb.o
obj-$(CONFIG_FB_VT8623) += vt8623fb.o
obj-$(CONFIG_FB_TRIDENT) += tridentfb.o
obj-$(CONFIG_FB_LE80578) += vermilion/
obj-$(CONFIG_FB_S3) += s3fb.o
obj-$(CONFIG_FB_ARK) += arkfb.o
obj-$(CONFIG_FB_STI) += stifb.o
obj-$(CONFIG_FB_FFB) += ffb.o sbuslib.o
obj-$(CONFIG_FB_CG6) += cg6.o sbuslib.o
obj-$(CONFIG_FB_CG3) += cg3.o sbuslib.o
obj-$(CONFIG_FB_BW2) += bw2.o sbuslib.o
obj-$(CONFIG_FB_CG14) += cg14.o sbuslib.o
obj-$(CONFIG_FB_P9100) += p9100.o sbuslib.o
obj-$(CONFIG_FB_TCX) += tcx.o sbuslib.o
obj-$(CONFIG_FB_LEO) += leo.o sbuslib.o
obj-$(CONFIG_FB_SGIVW) += sgivwfb.o
obj-$(CONFIG_FB_ACORN) += acornfb.o
obj-$(CONFIG_FB_ATARI) += atafb.o c2p_iplan2.o atafb_mfb.o \
atafb_iplan2p2.o atafb_iplan2p4.o atafb_iplan2p8.o
obj-$(CONFIG_FB_MAC) += macfb.o
obj-$(CONFIG_FB_HECUBA) += hecubafb.o
obj-$(CONFIG_FB_N411) += n411.o
obj-$(CONFIG_FB_HGA) += hgafb.o
obj-$(CONFIG_FB_XVR500) += sunxvr500.o
obj-$(CONFIG_FB_XVR2500) += sunxvr2500.o
obj-$(CONFIG_FB_XVR1000) += sunxvr1000.o
obj-$(CONFIG_FB_IGA) += igafb.o
obj-$(CONFIG_FB_APOLLO) += dnfb.o
obj-$(CONFIG_FB_Q40) += q40fb.o
obj-$(CONFIG_FB_TGA) += tgafb.o
obj-$(CONFIG_FB_HP300) += hpfb.o
obj-$(CONFIG_FB_G364) += g364fb.o
obj-$(CONFIG_FB_EP93XX) += ep93xx-fb.o
obj-$(CONFIG_FB_SA1100) += sa1100fb.o
obj-$(CONFIG_FB_HIT) += hitfb.o
obj-$(CONFIG_FB_EPSON1355) += epson1355fb.o
obj-$(CONFIG_FB_ATMEL) += atmel_lcdfb.o
obj-$(CONFIG_FB_PVR2) += pvr2fb.o
obj-$(CONFIG_FB_VOODOO1) += sstfb.o
obj-$(CONFIG_FB_ARMCLCD) += amba-clcd.o
obj-$(CONFIG_FB_68328) += 68328fb.o
obj-$(CONFIG_FB_GBE) += gbefb.o
obj-$(CONFIG_FB_CIRRUS) += cirrusfb.o
obj-$(CONFIG_FB_ASILIANT) += asiliantfb.o
obj-$(CONFIG_FB_PXA) += pxafb.o
obj-$(CONFIG_FB_PXA168) += pxa168fb.o
obj-$(CONFIG_PXA3XX_GCU) += pxa3xx-gcu.o
obj-$(CONFIG_FB_W100) += w100fb.o
obj-$(CONFIG_FB_TMIO) += tmiofb.o
obj-$(CONFIG_FB_AU1100) += au1100fb.o
obj-$(CONFIG_FB_AU1200) += au1200fb.o
obj-$(CONFIG_FB_VT8500) += vt8500lcdfb.o
obj-$(CONFIG_FB_WM8505) += wm8505fb.o
obj-$(CONFIG_FB_PMAG_AA) += pmag-aa-fb.o
obj-$(CONFIG_FB_PMAG_BA) += pmag-ba-fb.o
obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
obj-$(CONFIG_FB_MAXINE) += maxinefb.o
obj-$(CONFIG_FB_METRONOME) += metronomefb.o
obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
obj-$(CONFIG_FB_SH7760) += sh7760fb.o
obj-$(CONFIG_FB_IMX) += imxfb.o
obj-$(CONFIG_FB_S3C) += s3c-fb.o
obj-$(CONFIG_FB_S3C2410) += s3c2410fb.o
obj-$(CONFIG_FB_FSL_DIU) += fsl-diu-fb.o
obj-$(CONFIG_FB_COBALT) += cobalt_lcdfb.o
obj-$(CONFIG_FB_PNX4008_DUM) += pnx4008/
obj-$(CONFIG_FB_PNX4008_DUM_RGB) += pnx4008/
obj-$(CONFIG_FB_IBM_GXT4500) += gxt4500.o
obj-$(CONFIG_FB_PS3) += ps3fb.o
obj-$(CONFIG_FB_SM501) += sm501fb.o
obj-$(CONFIG_FB_UDL) += udlfb.o
obj-$(CONFIG_FB_XILINX) += xilinxfb.o
obj-$(CONFIG_SH_MIPI_DSI) += sh_mipi_dsi.o
obj-$(CONFIG_FB_SH_MOBILE_HDMI) += sh_mobile_hdmi.o
obj-$(CONFIG_FB_SH_MOBILE_MERAM) += sh_mobile_meram.o
obj-$(CONFIG_FB_SH_MOBILE_LCDC) += sh_mobile_lcdcfb.o
obj-$(CONFIG_FB_OMAP) += omap/
obj-y += omap2/
obj-$(CONFIG_XEN_FBDEV_FRONTEND) += xen-fbfront.o
obj-$(CONFIG_FB_CARMINE) += carminefb.o
obj-$(CONFIG_FB_MB862XX) += mb862xx/
obj-$(CONFIG_FB_MSM) += msm/
obj-$(CONFIG_FB_NUC900) += nuc900fb.o
obj-$(CONFIG_FB_JZ4740) += jz4740_fb.o
obj-$(CONFIG_FB_PUV3_UNIGFX) += fb-puv3.o
# Platform or fallback drivers go here
obj-$(CONFIG_FB_UVESA) += uvesafb.o
obj-$(CONFIG_FB_VESA) += vesafb.o
obj-$(CONFIG_FB_EFI) += efifb.o
obj-$(CONFIG_FB_VGA16) += vga16fb.o
obj-$(CONFIG_FB_OF) += offb.o
obj-$(CONFIG_FB_BF537_LQ035) += bf537-lq035.o
obj-$(CONFIG_FB_BF54X_LQ043) += bf54x-lq043fb.o
obj-$(CONFIG_FB_BFIN_LQ035Q1) += bfin-lq035q1-fb.o
obj-$(CONFIG_FB_BFIN_T350MCQB) += bfin-t350mcqb-fb.o
obj-$(CONFIG_FB_BFIN_7393) += bfin_adv7393fb.o
obj-$(CONFIG_FB_MX3) += mx3fb.o
obj-$(CONFIG_FB_DA8XX) += da8xx-fb.o
obj-$(CONFIG_FB_MXS) += mxsfb.o
# the test framebuffer is last
obj-$(CONFIG_FB_VIRTUAL) += vfb.o
#video output switch sysfs driver
obj-$(CONFIG_VIDEO_OUTPUT_CONTROL) += output.o
|
lshw/loongson1-linux-3.0
|
drivers/video/Makefile
|
Makefile
|
gpl-2.0
| 7,298
|
/*
* Copyright (C) 2009 Red Hat, Inc.
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*/
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/hugetlb.h>
#include <linux/mmu_notifier.h>
#include <linux/rmap.h>
#include <linux/swap.h>
#include <linux/mm_inline.h>
#include <linux/kthread.h>
#include <linux/khugepaged.h>
#include <linux/freezer.h>
#include <linux/mman.h>
#include <asm/tlb.h>
#include <asm/pgalloc.h>
#include "internal.h"
/*
* By default transparent hugepage support is enabled for all mappings
* and khugepaged scans all mappings. Defrag is only invoked by
* khugepaged hugepage allocations and by page faults inside
* MADV_HUGEPAGE regions to avoid the risk of slowing down short lived
* allocations.
*/
unsigned long transparent_hugepage_flags __read_mostly =
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS
(1<<TRANSPARENT_HUGEPAGE_FLAG)|
#endif
#ifdef CONFIG_TRANSPARENT_HUGEPAGE_MADVISE
(1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)|
#endif
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_FLAG)|
(1<<TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
/* default scan 8*512 pte (or vmas) every 30 second */
static unsigned int khugepaged_pages_to_scan __read_mostly = HPAGE_PMD_NR*8;
static unsigned int khugepaged_pages_collapsed;
static unsigned int khugepaged_full_scans;
static unsigned int khugepaged_scan_sleep_millisecs __read_mostly = 10000;
/* during fragmentation poll the hugepage allocator once every minute */
static unsigned int khugepaged_alloc_sleep_millisecs __read_mostly = 60000;
static struct task_struct *khugepaged_thread __read_mostly;
static DEFINE_MUTEX(khugepaged_mutex);
static DEFINE_SPINLOCK(khugepaged_mm_lock);
static DECLARE_WAIT_QUEUE_HEAD(khugepaged_wait);
/*
* default collapse hugepages if there is at least one pte mapped like
* it would have happened if the vma was large enough during page
* fault.
*/
static unsigned int khugepaged_max_ptes_none __read_mostly = HPAGE_PMD_NR-1;
static int khugepaged(void *none);
static int mm_slots_hash_init(void);
static int khugepaged_slab_init(void);
static void khugepaged_slab_free(void);
#define MM_SLOTS_HASH_HEADS 1024
static struct hlist_head *mm_slots_hash __read_mostly;
static struct kmem_cache *mm_slot_cache __read_mostly;
/**
* struct mm_slot - hash lookup from mm to mm_slot
* @hash: hash collision list
* @mm_node: khugepaged scan list headed in khugepaged_scan.mm_head
* @mm: the mm that this information is valid for
*/
struct mm_slot {
struct hlist_node hash;
struct list_head mm_node;
struct mm_struct *mm;
};
/**
* struct khugepaged_scan - cursor for scanning
* @mm_head: the head of the mm list to scan
* @mm_slot: the current mm_slot we are scanning
* @address: the next address inside that to be scanned
*
* There is only the one khugepaged_scan instance of this cursor structure.
*/
struct khugepaged_scan {
struct list_head mm_head;
struct mm_slot *mm_slot;
unsigned long address;
} khugepaged_scan = {
.mm_head = LIST_HEAD_INIT(khugepaged_scan.mm_head),
};
static int set_recommended_min_free_kbytes(void)
{
struct zone *zone;
int nr_zones = 0;
unsigned long recommended_min;
extern int min_free_kbytes;
if (!test_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags) &&
!test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags))
return 0;
for_each_populated_zone(zone)
nr_zones++;
/* Make sure at least 2 hugepages are free for MIGRATE_RESERVE */
recommended_min = pageblock_nr_pages * nr_zones * 2;
/*
* Make sure that on average at least two pageblocks are almost free
* of another type, one for a migratetype to fall back to and a
* second to avoid subsequent fallbacks of other types There are 3
* MIGRATE_TYPES we care about.
*/
recommended_min += pageblock_nr_pages * nr_zones *
MIGRATE_PCPTYPES * MIGRATE_PCPTYPES;
/* don't ever allow to reserve more than 5% of the lowmem */
recommended_min = min(recommended_min,
(unsigned long) nr_free_buffer_pages() / 20);
recommended_min <<= (PAGE_SHIFT-10);
if (recommended_min > min_free_kbytes)
min_free_kbytes = recommended_min;
setup_per_zone_wmarks();
return 0;
}
late_initcall(set_recommended_min_free_kbytes);
static int start_khugepaged(void)
{
int err = 0;
if (khugepaged_enabled()) {
int wakeup;
if (unlikely(!mm_slot_cache || !mm_slots_hash)) {
err = -ENOMEM;
goto out;
}
mutex_lock(&khugepaged_mutex);
if (!khugepaged_thread)
khugepaged_thread = kthread_run(khugepaged, NULL,
"khugepaged");
if (unlikely(IS_ERR(khugepaged_thread))) {
printk(KERN_ERR
"khugepaged: kthread_run(khugepaged) failed\n");
err = PTR_ERR(khugepaged_thread);
khugepaged_thread = NULL;
}
wakeup = !list_empty(&khugepaged_scan.mm_head);
mutex_unlock(&khugepaged_mutex);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);
set_recommended_min_free_kbytes();
} else
/* wakeup to exit */
wake_up_interruptible(&khugepaged_wait);
out:
return err;
}
#ifdef CONFIG_SYSFS
static ssize_t double_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag enabled,
enum transparent_hugepage_flag req_madv)
{
if (test_bit(enabled, &transparent_hugepage_flags)) {
VM_BUG_ON(test_bit(req_madv, &transparent_hugepage_flags));
return sprintf(buf, "[always] madvise never\n");
} else if (test_bit(req_madv, &transparent_hugepage_flags))
return sprintf(buf, "always [madvise] never\n");
else
return sprintf(buf, "always madvise [never]\n");
}
static ssize_t double_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag enabled,
enum transparent_hugepage_flag req_madv)
{
if (!memcmp("always", buf,
min(sizeof("always")-1, count))) {
set_bit(enabled, &transparent_hugepage_flags);
clear_bit(req_madv, &transparent_hugepage_flags);
} else if (!memcmp("madvise", buf,
min(sizeof("madvise")-1, count))) {
clear_bit(enabled, &transparent_hugepage_flags);
set_bit(req_madv, &transparent_hugepage_flags);
} else if (!memcmp("never", buf,
min(sizeof("never")-1, count))) {
clear_bit(enabled, &transparent_hugepage_flags);
clear_bit(req_madv, &transparent_hugepage_flags);
} else
return -EINVAL;
return count;
}
static ssize_t enabled_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return double_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
}
static ssize_t enabled_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
ssize_t ret;
ret = double_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_FLAG,
TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG);
if (ret > 0) {
int err = start_khugepaged();
if (err)
ret = err;
}
if (ret > 0 &&
(test_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags) ||
test_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags)))
set_recommended_min_free_kbytes();
return ret;
}
static struct kobj_attribute enabled_attr =
__ATTR(enabled, 0644, enabled_show, enabled_store);
static ssize_t single_flag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf,
enum transparent_hugepage_flag flag)
{
return sprintf(buf, "%d\n",
!!test_bit(flag, &transparent_hugepage_flags));
}
static ssize_t single_flag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count,
enum transparent_hugepage_flag flag)
{
unsigned long value;
int ret;
ret = kstrtoul(buf, 10, &value);
if (ret < 0)
return ret;
if (value > 1)
return -EINVAL;
if (value)
set_bit(flag, &transparent_hugepage_flags);
else
clear_bit(flag, &transparent_hugepage_flags);
return count;
}
/*
* Currently defrag only disables __GFP_NOWAIT for allocation. A blind
* __GFP_REPEAT is too aggressive, it's never worth swapping tons of
* memory just to allocate one more hugepage.
*/
static ssize_t defrag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return double_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
}
static ssize_t defrag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return double_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEFRAG_FLAG,
TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG);
}
static struct kobj_attribute defrag_attr =
__ATTR(defrag, 0644, defrag_show, defrag_store);
#ifdef CONFIG_DEBUG_VM
static ssize_t debug_cow_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return single_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
}
static ssize_t debug_cow_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return single_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEBUG_COW_FLAG);
}
static struct kobj_attribute debug_cow_attr =
__ATTR(debug_cow, 0644, debug_cow_show, debug_cow_store);
#endif /* CONFIG_DEBUG_VM */
static struct attribute *hugepage_attr[] = {
&enabled_attr.attr,
&defrag_attr.attr,
#ifdef CONFIG_DEBUG_VM
&debug_cow_attr.attr,
#endif
NULL,
};
static struct attribute_group hugepage_attr_group = {
.attrs = hugepage_attr,
};
static ssize_t scan_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_scan_sleep_millisecs);
}
static ssize_t scan_sleep_millisecs_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long msecs;
int err;
err = strict_strtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
khugepaged_scan_sleep_millisecs = msecs;
wake_up_interruptible(&khugepaged_wait);
return count;
}
static struct kobj_attribute scan_sleep_millisecs_attr =
__ATTR(scan_sleep_millisecs, 0644, scan_sleep_millisecs_show,
scan_sleep_millisecs_store);
static ssize_t alloc_sleep_millisecs_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_alloc_sleep_millisecs);
}
static ssize_t alloc_sleep_millisecs_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
unsigned long msecs;
int err;
err = strict_strtoul(buf, 10, &msecs);
if (err || msecs > UINT_MAX)
return -EINVAL;
khugepaged_alloc_sleep_millisecs = msecs;
wake_up_interruptible(&khugepaged_wait);
return count;
}
static struct kobj_attribute alloc_sleep_millisecs_attr =
__ATTR(alloc_sleep_millisecs, 0644, alloc_sleep_millisecs_show,
alloc_sleep_millisecs_store);
static ssize_t pages_to_scan_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_pages_to_scan);
}
static ssize_t pages_to_scan_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long pages;
err = strict_strtoul(buf, 10, &pages);
if (err || !pages || pages > UINT_MAX)
return -EINVAL;
khugepaged_pages_to_scan = pages;
return count;
}
static struct kobj_attribute pages_to_scan_attr =
__ATTR(pages_to_scan, 0644, pages_to_scan_show,
pages_to_scan_store);
static ssize_t pages_collapsed_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_pages_collapsed);
}
static struct kobj_attribute pages_collapsed_attr =
__ATTR_RO(pages_collapsed);
static ssize_t full_scans_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_full_scans);
}
static struct kobj_attribute full_scans_attr =
__ATTR_RO(full_scans);
static ssize_t khugepaged_defrag_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return single_flag_show(kobj, attr, buf,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
}
static ssize_t khugepaged_defrag_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
return single_flag_store(kobj, attr, buf, count,
TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG);
}
static struct kobj_attribute khugepaged_defrag_attr =
__ATTR(defrag, 0644, khugepaged_defrag_show,
khugepaged_defrag_store);
/*
* max_ptes_none controls if khugepaged should collapse hugepages over
* any unmapped ptes in turn potentially increasing the memory
* footprint of the vmas. When max_ptes_none is 0 khugepaged will not
* reduce the available free memory in the system as it
* runs. Increasing max_ptes_none will instead potentially reduce the
* free memory in the system during the khugepaged scan.
*/
static ssize_t khugepaged_max_ptes_none_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%u\n", khugepaged_max_ptes_none);
}
static ssize_t khugepaged_max_ptes_none_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t count)
{
int err;
unsigned long max_ptes_none;
err = strict_strtoul(buf, 10, &max_ptes_none);
if (err || max_ptes_none > HPAGE_PMD_NR-1)
return -EINVAL;
khugepaged_max_ptes_none = max_ptes_none;
return count;
}
static struct kobj_attribute khugepaged_max_ptes_none_attr =
__ATTR(max_ptes_none, 0644, khugepaged_max_ptes_none_show,
khugepaged_max_ptes_none_store);
static struct attribute *khugepaged_attr[] = {
&khugepaged_defrag_attr.attr,
&khugepaged_max_ptes_none_attr.attr,
&pages_to_scan_attr.attr,
&pages_collapsed_attr.attr,
&full_scans_attr.attr,
&scan_sleep_millisecs_attr.attr,
&alloc_sleep_millisecs_attr.attr,
NULL,
};
static struct attribute_group khugepaged_attr_group = {
.attrs = khugepaged_attr,
.name = "khugepaged",
};
#endif /* CONFIG_SYSFS */
static int __init hugepage_init(void)
{
int err;
#ifdef CONFIG_SYSFS
static struct kobject *hugepage_kobj;
#endif
err = -EINVAL;
if (!has_transparent_hugepage()) {
transparent_hugepage_flags = 0;
goto out;
}
#ifdef CONFIG_SYSFS
err = -ENOMEM;
hugepage_kobj = kobject_create_and_add("transparent_hugepage", mm_kobj);
if (unlikely(!hugepage_kobj)) {
printk(KERN_ERR "hugepage: failed kobject create\n");
goto out;
}
err = sysfs_create_group(hugepage_kobj, &hugepage_attr_group);
if (err) {
printk(KERN_ERR "hugepage: failed register hugeage group\n");
goto out;
}
err = sysfs_create_group(hugepage_kobj, &khugepaged_attr_group);
if (err) {
printk(KERN_ERR "hugepage: failed register hugeage group\n");
goto out;
}
#endif
err = khugepaged_slab_init();
if (err)
goto out;
err = mm_slots_hash_init();
if (err) {
khugepaged_slab_free();
goto out;
}
/*
* By default disable transparent hugepages on smaller systems,
* where the extra memory used could hurt more than TLB overhead
* is likely to save. The admin can still enable it through /sys.
*/
if (totalram_pages < (512 << (20 - PAGE_SHIFT)))
transparent_hugepage_flags = 0;
start_khugepaged();
set_recommended_min_free_kbytes();
out:
return err;
}
module_init(hugepage_init)
static int __init setup_transparent_hugepage(char *str)
{
int ret = 0;
if (!str)
goto out;
if (!strcmp(str, "always")) {
set_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "madvise")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
set_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
} else if (!strcmp(str, "never")) {
clear_bit(TRANSPARENT_HUGEPAGE_FLAG,
&transparent_hugepage_flags);
clear_bit(TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
&transparent_hugepage_flags);
ret = 1;
}
out:
if (!ret)
printk(KERN_WARNING
"transparent_hugepage= cannot parse, ignored\n");
return ret;
}
__setup("transparent_hugepage=", setup_transparent_hugepage);
static void prepare_pmd_huge_pte(pgtable_t pgtable,
struct mm_struct *mm)
{
assert_spin_locked(&mm->page_table_lock);
/* FIFO */
if (!mm->pmd_huge_pte)
INIT_LIST_HEAD(&pgtable->lru);
else
list_add(&pgtable->lru, &mm->pmd_huge_pte->lru);
mm->pmd_huge_pte = pgtable;
}
static inline pmd_t maybe_pmd_mkwrite(pmd_t pmd, struct vm_area_struct *vma)
{
if (likely(vma->vm_flags & VM_WRITE))
pmd = pmd_mkwrite(pmd);
return pmd;
}
static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long haddr, pmd_t *pmd,
struct page *page)
{
int ret = 0;
pgtable_t pgtable;
VM_BUG_ON(!PageCompound(page));
pgtable = pte_alloc_one(mm, haddr);
if (unlikely(!pgtable)) {
mem_cgroup_uncharge_page(page);
put_page(page);
return VM_FAULT_OOM;
}
clear_huge_page(page, haddr, HPAGE_PMD_NR);
__SetPageUptodate(page);
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_none(*pmd))) {
spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_page(page);
put_page(page);
pte_free(mm, pgtable);
} else {
pmd_t entry;
entry = mk_pmd(page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
/*
* The spinlocking to take the lru_lock inside
* page_add_new_anon_rmap() acts as a full memory
* barrier to be sure clear_huge_page writes become
* visible after the set_pmd_at() write.
*/
page_add_new_anon_rmap(page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
prepare_pmd_huge_pte(pgtable, mm);
add_mm_counter(mm, MM_ANONPAGES, HPAGE_PMD_NR);
mm->nr_ptes++;
spin_unlock(&mm->page_table_lock);
}
return ret;
}
static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
{
return (GFP_TRANSHUGE & ~(defrag ? 0 : __GFP_WAIT)) | extra_gfp;
}
static inline struct page *alloc_hugepage_vma(int defrag,
struct vm_area_struct *vma,
unsigned long haddr, int nd,
gfp_t extra_gfp)
{
return alloc_pages_vma(alloc_hugepage_gfpmask(defrag, extra_gfp),
HPAGE_PMD_ORDER, vma, haddr, nd);
}
#ifndef CONFIG_NUMA
static inline struct page *alloc_hugepage(int defrag)
{
return alloc_pages(alloc_hugepage_gfpmask(defrag, 0),
HPAGE_PMD_ORDER);
}
#endif
int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd,
unsigned int flags)
{
struct page *page;
unsigned long haddr = address & HPAGE_PMD_MASK;
pte_t *pte;
if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) {
if (unlikely(anon_vma_prepare(vma)))
return VM_FAULT_OOM;
if (unlikely(khugepaged_enter(vma)))
return VM_FAULT_OOM;
page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
if (unlikely(!page)) {
count_vm_event(THP_FAULT_FALLBACK);
goto out;
}
count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
put_page(page);
goto out;
}
return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page);
}
out:
/*
* Use __pte_alloc instead of pte_alloc_map, because we can't
* run pte_offset_map on the pmd, if an huge pmd could
* materialize from under us from a different thread.
*/
if (unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
/* if an huge pmd materialized from under us just retry later */
if (unlikely(pmd_trans_huge(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
* from under us anymore at this point because we hold the mmap_sem
* read mode and khugepaged takes it in write mode. So now it's
* safe to run pte_offset_map().
*/
pte = pte_offset_map(pmd, address);
return handle_pte_fault(mm, vma, address, pte, pmd, flags);
}
int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
struct vm_area_struct *vma)
{
struct page *src_page;
pmd_t pmd;
pgtable_t pgtable;
int ret;
ret = -ENOMEM;
pgtable = pte_alloc_one(dst_mm, addr);
if (unlikely(!pgtable))
goto out;
spin_lock(&dst_mm->page_table_lock);
spin_lock_nested(&src_mm->page_table_lock, SINGLE_DEPTH_NESTING);
ret = -EAGAIN;
pmd = *src_pmd;
if (unlikely(!pmd_trans_huge(pmd))) {
pte_free(dst_mm, pgtable);
goto out_unlock;
}
if (unlikely(pmd_trans_splitting(pmd))) {
/* split huge page running from under us */
spin_unlock(&src_mm->page_table_lock);
spin_unlock(&dst_mm->page_table_lock);
pte_free(dst_mm, pgtable);
wait_split_huge_page(vma->anon_vma, src_pmd); /* src_vma */
goto out;
}
src_page = pmd_page(pmd);
VM_BUG_ON(!PageHead(src_page));
get_page(src_page);
page_dup_rmap(src_page);
add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR);
pmdp_set_wrprotect(src_mm, addr, src_pmd);
pmd = pmd_mkold(pmd_wrprotect(pmd));
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
prepare_pmd_huge_pte(pgtable, dst_mm);
dst_mm->nr_ptes++;
ret = 0;
out_unlock:
spin_unlock(&src_mm->page_table_lock);
spin_unlock(&dst_mm->page_table_lock);
out:
return ret;
}
/* no "address" argument so destroys page coloring of some arch */
pgtable_t get_pmd_huge_pte(struct mm_struct *mm)
{
pgtable_t pgtable;
assert_spin_locked(&mm->page_table_lock);
/* FIFO */
pgtable = mm->pmd_huge_pte;
if (list_empty(&pgtable->lru))
mm->pmd_huge_pte = NULL;
else {
mm->pmd_huge_pte = list_entry(pgtable->lru.next,
struct page, lru);
list_del(&pgtable->lru);
}
return pgtable;
}
static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
pmd_t *pmd, pmd_t orig_pmd,
struct page *page,
unsigned long haddr)
{
pgtable_t pgtable;
pmd_t _pmd;
int ret = 0, i;
struct page **pages;
pages = kmalloc(sizeof(struct page *) * HPAGE_PMD_NR,
GFP_KERNEL);
if (unlikely(!pages)) {
ret |= VM_FAULT_OOM;
goto out;
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
pages[i] = alloc_page_vma_node(GFP_HIGHUSER_MOVABLE |
__GFP_OTHER_NODE,
vma, address, page_to_nid(page));
if (unlikely(!pages[i] ||
mem_cgroup_newpage_charge(pages[i], mm,
GFP_KERNEL))) {
if (pages[i])
put_page(pages[i]);
mem_cgroup_uncharge_start();
while (--i >= 0) {
mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]);
}
mem_cgroup_uncharge_end();
kfree(pages);
ret |= VM_FAULT_OOM;
goto out;
}
}
for (i = 0; i < HPAGE_PMD_NR; i++) {
copy_user_highpage(pages[i], page + i,
haddr + PAGE_SHIFT*i, vma);
__SetPageUptodate(pages[i]);
cond_resched();
}
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_free_pages;
VM_BUG_ON(!PageHead(page));
pmdp_clear_flush_notify(vma, haddr, pmd);
/* leave pmd empty until pte is filled */
pgtable = get_pmd_huge_pte(mm);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
entry = mk_pte(pages[i], vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
page_add_new_anon_rmap(pages[i], vma, haddr);
pte = pte_offset_map(&_pmd, haddr);
VM_BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
pte_unmap(pte);
}
kfree(pages);
smp_wmb(); /* make pte visible before pmd */
pmd_populate(mm, pmd, pgtable);
page_remove_rmap(page);
spin_unlock(&mm->page_table_lock);
ret |= VM_FAULT_WRITE;
put_page(page);
out:
return ret;
out_free_pages:
spin_unlock(&mm->page_table_lock);
mem_cgroup_uncharge_start();
for (i = 0; i < HPAGE_PMD_NR; i++) {
mem_cgroup_uncharge_page(pages[i]);
put_page(pages[i]);
}
mem_cgroup_uncharge_end();
kfree(pages);
goto out;
}
int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long address, pmd_t *pmd, pmd_t orig_pmd)
{
int ret = 0;
struct page *page, *new_page;
unsigned long haddr;
VM_BUG_ON(!vma->anon_vma);
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_same(*pmd, orig_pmd)))
goto out_unlock;
page = pmd_page(orig_pmd);
VM_BUG_ON(!PageCompound(page) || !PageHead(page));
haddr = address & HPAGE_PMD_MASK;
if (page_mapcount(page) == 1) {
pmd_t entry;
entry = pmd_mkyoung(orig_pmd);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
if (pmdp_set_access_flags(vma, haddr, pmd, entry, 1))
update_mmu_cache(vma, address, entry);
ret |= VM_FAULT_WRITE;
goto out_unlock;
}
get_page(page);
spin_unlock(&mm->page_table_lock);
if (transparent_hugepage_enabled(vma) &&
!transparent_hugepage_debug_cow())
new_page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
vma, haddr, numa_node_id(), 0);
else
new_page = NULL;
if (unlikely(!new_page)) {
count_vm_event(THP_FAULT_FALLBACK);
ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
pmd, orig_pmd, page, haddr);
put_page(page);
goto out;
}
count_vm_event(THP_FAULT_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
put_page(new_page);
put_page(page);
ret |= VM_FAULT_OOM;
goto out;
}
copy_user_huge_page(new_page, page, haddr, vma, HPAGE_PMD_NR);
__SetPageUptodate(new_page);
spin_lock(&mm->page_table_lock);
put_page(page);
if (unlikely(!pmd_same(*pmd, orig_pmd))) {
mem_cgroup_uncharge_page(new_page);
put_page(new_page);
} else {
pmd_t entry;
VM_BUG_ON(!PageHead(page));
entry = mk_pmd(new_page, vma->vm_page_prot);
entry = maybe_pmd_mkwrite(pmd_mkdirty(entry), vma);
entry = pmd_mkhuge(entry);
pmdp_clear_flush_notify(vma, haddr, pmd);
page_add_new_anon_rmap(new_page, vma, haddr);
set_pmd_at(mm, haddr, pmd, entry);
update_mmu_cache(vma, address, entry);
page_remove_rmap(page);
put_page(page);
ret |= VM_FAULT_WRITE;
}
out_unlock:
spin_unlock(&mm->page_table_lock);
out:
return ret;
}
struct page *follow_trans_huge_pmd(struct mm_struct *mm,
unsigned long addr,
pmd_t *pmd,
unsigned int flags)
{
struct page *page = NULL;
assert_spin_locked(&mm->page_table_lock);
if (flags & FOLL_WRITE && !pmd_write(*pmd))
goto out;
page = pmd_page(*pmd);
VM_BUG_ON(!PageHead(page));
if (flags & FOLL_TOUCH) {
pmd_t _pmd;
/*
* We should set the dirty bit only for FOLL_WRITE but
* for now the dirty bit in the pmd is meaningless.
* And if the dirty bit will become meaningful and
* we'll only set it with FOLL_WRITE, an atomic
* set_bit will be required on the pmd to set the
* young bit, instead of the current set_pmd_at.
*/
_pmd = pmd_mkyoung(pmd_mkdirty(*pmd));
set_pmd_at(mm, addr & HPAGE_PMD_MASK, pmd, _pmd);
}
page += (addr & ~HPAGE_PMD_MASK) >> PAGE_SHIFT;
VM_BUG_ON(!PageCompound(page));
if (flags & FOLL_GET)
get_page_foll(page);
out:
return page;
}
int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
pmd_t *pmd)
{
int ret = 0;
spin_lock(&tlb->mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&tlb->mm->page_table_lock);
wait_split_huge_page(vma->anon_vma,
pmd);
} else {
struct page *page;
pgtable_t pgtable;
pgtable = get_pmd_huge_pte(tlb->mm);
page = pmd_page(*pmd);
pmd_clear(pmd);
page_remove_rmap(page);
VM_BUG_ON(page_mapcount(page) < 0);
add_mm_counter(tlb->mm, MM_ANONPAGES, -HPAGE_PMD_NR);
VM_BUG_ON(!PageHead(page));
tlb->mm->nr_ptes--;
spin_unlock(&tlb->mm->page_table_lock);
tlb_remove_page(tlb, page);
pte_free(tlb->mm, pgtable);
ret = 1;
}
} else
spin_unlock(&tlb->mm->page_table_lock);
return ret;
}
int mincore_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, unsigned long end,
unsigned char *vec)
{
int ret = 0;
spin_lock(&vma->vm_mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
ret = !pmd_trans_splitting(*pmd);
spin_unlock(&vma->vm_mm->page_table_lock);
if (unlikely(!ret))
wait_split_huge_page(vma->anon_vma, pmd);
else {
/*
* All logical pages in the range are present
* if backed by a huge page.
*/
memset(vec, 1, (end - addr) >> PAGE_SHIFT);
}
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret;
}
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot)
{
struct mm_struct *mm = vma->vm_mm;
int ret = 0;
spin_lock(&mm->page_table_lock);
if (likely(pmd_trans_huge(*pmd))) {
if (unlikely(pmd_trans_splitting(*pmd))) {
spin_unlock(&mm->page_table_lock);
wait_split_huge_page(vma->anon_vma, pmd);
} else {
pmd_t entry;
entry = pmdp_get_and_clear(mm, addr, pmd);
entry = pmd_modify(entry, newprot);
set_pmd_at(mm, addr, pmd, entry);
spin_unlock(&vma->vm_mm->page_table_lock);
flush_tlb_range(vma, addr, addr + HPAGE_PMD_SIZE);
ret = 1;
}
} else
spin_unlock(&vma->vm_mm->page_table_lock);
return ret;
}
pmd_t *page_check_address_pmd(struct page *page,
struct mm_struct *mm,
unsigned long address,
enum page_check_address_pmd_flag flag)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, *ret = NULL;
if (address & ~HPAGE_PMD_MASK)
goto out;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (pmd_none(*pmd))
goto out;
if (pmd_page(*pmd) != page)
goto out;
/*
* split_vma() may create temporary aliased mappings. There is
* no risk as long as all huge pmd are found and have their
* splitting bit set before __split_huge_page_refcount
* runs. Finding the same huge pmd more than once during the
* same rmap walk is not a problem.
*/
if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
pmd_trans_splitting(*pmd))
goto out;
if (pmd_trans_huge(*pmd)) {
VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
!pmd_trans_splitting(*pmd));
ret = pmd;
}
out:
return ret;
}
static int __split_huge_page_splitting(struct page *page,
struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd;
int ret = 0;
spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG);
if (pmd) {
/*
* We can't temporarily set the pmd to null in order
* to split it, the pmd must remain marked huge at all
* times or the VM won't take the pmd_trans_huge paths
* and it won't wait on the anon_vma->root->mutex to
* serialize against split_huge_page*.
*/
pmdp_splitting_flush_notify(vma, address, pmd);
ret = 1;
}
spin_unlock(&mm->page_table_lock);
return ret;
}
static void __split_huge_page_refcount(struct page *page)
{
int i;
unsigned long head_index = page->index;
struct zone *zone = page_zone(page);
int zonestat;
int tail_count = 0;
/* prevent PageLRU to go away from under us, and freeze lru stats */
spin_lock_irq(&zone->lru_lock);
compound_lock(page);
for (i = 1; i < HPAGE_PMD_NR; i++) {
struct page *page_tail = page + i;
/* tail_page->_mapcount cannot change */
BUG_ON(page_mapcount(page_tail) < 0);
tail_count += page_mapcount(page_tail);
/* check for overflow */
BUG_ON(tail_count < 0);
BUG_ON(atomic_read(&page_tail->_count) != 0);
/*
* tail_page->_count is zero and not changing from
* under us. But get_page_unless_zero() may be running
* from under us on the tail_page. If we used
* atomic_set() below instead of atomic_add(), we
* would then run atomic_set() concurrently with
* get_page_unless_zero(), and atomic_set() is
* implemented in C not using locked ops. spin_unlock
* on x86 sometime uses locked ops because of PPro
* errata 66, 92, so unless somebody can guarantee
* atomic_set() here would be safe on all archs (and
* not only on x86), it's safer to use atomic_add().
*/
atomic_add(page_mapcount(page) + page_mapcount(page_tail) + 1,
&page_tail->_count);
/* after clearing PageTail the gup refcount can be released */
smp_mb();
/*
* retain hwpoison flag of the poisoned tail page:
* fix for the unsuitable process killed on Guest Machine(KVM)
* by the memory-failure.
*/
page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
page_tail->flags |= (page->flags &
((1L << PG_referenced) |
(1L << PG_swapbacked) |
(1L << PG_mlocked) |
(1L << PG_uptodate)));
page_tail->flags |= (1L << PG_dirty);
/* clear PageTail before overwriting first_page */
smp_wmb();
/*
* __split_huge_page_splitting() already set the
* splitting bit in all pmd that could map this
* hugepage, that will ensure no CPU can alter the
* mapcount on the head page. The mapcount is only
* accounted in the head page and it has to be
* transferred to all tail pages in the below code. So
* for this code to be safe, the split the mapcount
* can't change. But that doesn't mean userland can't
* keep changing and reading the page contents while
* we transfer the mapcount, so the pmd splitting
* status is achieved setting a reserved bit in the
* pmd, not by clearing the present bit.
*/
page_tail->_mapcount = page->_mapcount;
BUG_ON(page_tail->mapping);
page_tail->mapping = page->mapping;
page_tail->index = ++head_index;
BUG_ON(!PageAnon(page_tail));
BUG_ON(!PageUptodate(page_tail));
BUG_ON(!PageDirty(page_tail));
BUG_ON(!PageSwapBacked(page_tail));
mem_cgroup_split_huge_fixup(page, page_tail);
lru_add_page_tail(zone, page, page_tail);
}
atomic_sub(tail_count, &page->_count);
BUG_ON(atomic_read(&page->_count) <= 0);
__dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
__mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
/*
* A hugepage counts for HPAGE_PMD_NR pages on the LRU statistics,
* so adjust those appropriately if this page is on the LRU.
*/
if (PageLRU(page)) {
zonestat = NR_LRU_BASE + page_lru(page);
__mod_zone_page_state(zone, zonestat, -(HPAGE_PMD_NR-1));
}
ClearPageCompound(page);
compound_unlock(page);
spin_unlock_irq(&zone->lru_lock);
for (i = 1; i < HPAGE_PMD_NR; i++) {
struct page *page_tail = page + i;
BUG_ON(page_count(page_tail) <= 0);
/*
* Tail pages may be freed if there wasn't any mapping
* like if add_to_swap() is running on a lru page that
* had its mapping zapped. And freeing these pages
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
put_page(page_tail);
}
/*
* Only the head page (now become a regular page) is required
* to be pinned by the caller.
*/
BUG_ON(page_count(page) <= 0);
}
static int __split_huge_page_map(struct page *page,
struct vm_area_struct *vma,
unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
pmd_t *pmd, _pmd;
int ret = 0, i;
pgtable_t pgtable;
unsigned long haddr;
spin_lock(&mm->page_table_lock);
pmd = page_check_address_pmd(page, mm, address,
PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG);
if (pmd) {
pgtable = get_pmd_huge_pte(mm);
pmd_populate(mm, &_pmd, pgtable);
for (i = 0, haddr = address; i < HPAGE_PMD_NR;
i++, haddr += PAGE_SIZE) {
pte_t *pte, entry;
BUG_ON(PageCompound(page+i));
entry = mk_pte(page + i, vma->vm_page_prot);
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
if (!pmd_write(*pmd))
entry = pte_wrprotect(entry);
else
BUG_ON(page_mapcount(page) != 1);
if (!pmd_young(*pmd))
entry = pte_mkold(entry);
pte = pte_offset_map(&_pmd, haddr);
BUG_ON(!pte_none(*pte));
set_pte_at(mm, haddr, pte, entry);
pte_unmap(pte);
}
smp_wmb(); /* make pte visible before pmd */
/*
* Up to this point the pmd is present and huge and
* userland has the whole access to the hugepage
* during the split (which happens in place). If we
* overwrite the pmd with the not-huge version
* pointing to the pte here (which of course we could
* if all CPUs were bug free), userland could trigger
* a small page size TLB miss on the small sized TLB
* while the hugepage TLB entry is still established
* in the huge TLB. Some CPU doesn't like that. See
* http://support.amd.com/us/Processor_TechDocs/41322.pdf,
* Erratum 383 on page 93. Intel should be safe but is
* also warns that it's only safe if the permission
* and cache attributes of the two entries loaded in
* the two TLB is identical (which should be the case
* here). But it is generally safer to never allow
* small and huge TLB entries for the same virtual
* address to be loaded simultaneously. So instead of
* doing "pmd_populate(); flush_tlb_range();" we first
* mark the current pmd notpresent (atomically because
* here the pmd_trans_huge and pmd_trans_splitting
* must remain set at all times on the pmd until the
* split is complete for this pmd), then we flush the
* SMP TLB and finally we write the non-huge version
* of the pmd entry with pmd_populate.
*/
set_pmd_at(mm, address, pmd, pmd_mknotpresent(*pmd));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
pmd_populate(mm, pmd, pgtable);
ret = 1;
}
spin_unlock(&mm->page_table_lock);
return ret;
}
/* must be called with anon_vma->root->mutex hold */
static void __split_huge_page(struct page *page,
struct anon_vma *anon_vma)
{
int mapcount, mapcount2;
struct anon_vma_chain *avc;
BUG_ON(!PageHead(page));
BUG_ON(PageTail(page));
mapcount = 0;
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
if (addr == -EFAULT)
continue;
mapcount += __split_huge_page_splitting(page, vma, addr);
}
/*
* It is critical that new vmas are added to the tail of the
* anon_vma list. This guarantes that if copy_huge_pmd() runs
* and establishes a child pmd before
* __split_huge_page_splitting() freezes the parent pmd (so if
* we fail to prevent copy_huge_pmd() from running until the
* whole __split_huge_page() is complete), we will still see
* the newly established pmd of the child later during the
* walk, to be able to set it as pmd_trans_splitting too.
*/
if (mapcount != page_mapcount(page))
printk(KERN_ERR "mapcount %d page_mapcount %d\n",
mapcount, page_mapcount(page));
BUG_ON(mapcount != page_mapcount(page));
__split_huge_page_refcount(page);
mapcount2 = 0;
list_for_each_entry(avc, &anon_vma->head, same_anon_vma) {
struct vm_area_struct *vma = avc->vma;
unsigned long addr = vma_address(page, vma);
BUG_ON(is_vma_temporary_stack(vma));
if (addr == -EFAULT)
continue;
mapcount2 += __split_huge_page_map(page, vma, addr);
}
if (mapcount != mapcount2)
printk(KERN_ERR "mapcount %d mapcount2 %d page_mapcount %d\n",
mapcount, mapcount2, page_mapcount(page));
BUG_ON(mapcount != mapcount2);
}
int split_huge_page(struct page *page)
{
struct anon_vma *anon_vma;
int ret = 1;
BUG_ON(!PageAnon(page));
anon_vma = page_lock_anon_vma(page);
if (!anon_vma)
goto out;
ret = 0;
if (!PageCompound(page))
goto out_unlock;
BUG_ON(!PageSwapBacked(page));
__split_huge_page(page, anon_vma);
count_vm_event(THP_SPLIT);
BUG_ON(PageCompound(page));
out_unlock:
page_unlock_anon_vma(anon_vma);
out:
return ret;
}
#define VM_NO_THP (VM_SPECIAL|VM_INSERTPAGE|VM_MIXEDMAP|VM_SAO| \
VM_HUGETLB|VM_SHARED|VM_MAYSHARE)
int hugepage_madvise(struct vm_area_struct *vma,
unsigned long *vm_flags, int advice)
{
switch (advice) {
case MADV_HUGEPAGE:
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_HUGEPAGE | VM_NO_THP))
return -EINVAL;
*vm_flags &= ~VM_NOHUGEPAGE;
*vm_flags |= VM_HUGEPAGE;
/*
* If the vma become good for khugepaged to scan,
* register it here without waiting a page fault that
* may not happen any time soon.
*/
if (unlikely(khugepaged_enter_vma_merge(vma)))
return -ENOMEM;
break;
case MADV_NOHUGEPAGE:
/*
* Be somewhat over-protective like KSM for now!
*/
if (*vm_flags & (VM_NOHUGEPAGE | VM_NO_THP))
return -EINVAL;
*vm_flags &= ~VM_HUGEPAGE;
*vm_flags |= VM_NOHUGEPAGE;
/*
* Setting VM_NOHUGEPAGE will prevent khugepaged from scanning
* this vma even if we leave the mm registered in khugepaged if
* it got registered before VM_NOHUGEPAGE was set.
*/
break;
}
return 0;
}
static int __init khugepaged_slab_init(void)
{
mm_slot_cache = kmem_cache_create("khugepaged_mm_slot",
sizeof(struct mm_slot),
__alignof__(struct mm_slot), 0, NULL);
if (!mm_slot_cache)
return -ENOMEM;
return 0;
}
static void __init khugepaged_slab_free(void)
{
kmem_cache_destroy(mm_slot_cache);
mm_slot_cache = NULL;
}
static inline struct mm_slot *alloc_mm_slot(void)
{
if (!mm_slot_cache) /* initialization failed */
return NULL;
return kmem_cache_zalloc(mm_slot_cache, GFP_KERNEL);
}
static inline void free_mm_slot(struct mm_slot *mm_slot)
{
kmem_cache_free(mm_slot_cache, mm_slot);
}
static int __init mm_slots_hash_init(void)
{
mm_slots_hash = kzalloc(MM_SLOTS_HASH_HEADS * sizeof(struct hlist_head),
GFP_KERNEL);
if (!mm_slots_hash)
return -ENOMEM;
return 0;
}
#if 0
static void __init mm_slots_hash_free(void)
{
kfree(mm_slots_hash);
mm_slots_hash = NULL;
}
#endif
static struct mm_slot *get_mm_slot(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
struct hlist_head *bucket;
struct hlist_node *node;
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
hlist_for_each_entry(mm_slot, node, bucket, hash) {
if (mm == mm_slot->mm)
return mm_slot;
}
return NULL;
}
static void insert_to_mm_slots_hash(struct mm_struct *mm,
struct mm_slot *mm_slot)
{
struct hlist_head *bucket;
bucket = &mm_slots_hash[((unsigned long)mm / sizeof(struct mm_struct))
% MM_SLOTS_HASH_HEADS];
mm_slot->mm = mm;
hlist_add_head(&mm_slot->hash, bucket);
}
static inline int khugepaged_test_exit(struct mm_struct *mm)
{
return atomic_read(&mm->mm_users) == 0;
}
int __khugepaged_enter(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int wakeup;
mm_slot = alloc_mm_slot();
if (!mm_slot)
return -ENOMEM;
/* __khugepaged_exit() must not run from under us */
VM_BUG_ON(khugepaged_test_exit(mm));
if (unlikely(test_and_set_bit(MMF_VM_HUGEPAGE, &mm->flags))) {
free_mm_slot(mm_slot);
return 0;
}
spin_lock(&khugepaged_mm_lock);
insert_to_mm_slots_hash(mm, mm_slot);
/*
* Insert just behind the scanning cursor, to let the area settle
* down a little.
*/
wakeup = list_empty(&khugepaged_scan.mm_head);
list_add_tail(&mm_slot->mm_node, &khugepaged_scan.mm_head);
spin_unlock(&khugepaged_mm_lock);
atomic_inc(&mm->mm_count);
if (wakeup)
wake_up_interruptible(&khugepaged_wait);
return 0;
}
int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
{
unsigned long hstart, hend;
if (!vma->anon_vma)
/*
* Not yet faulted in so we will register later in the
* page fault if needed.
*/
return 0;
if (vma->vm_ops)
/* khugepaged not yet working on file or special mappings */
return 0;
/*
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
* true too, verify it here.
*/
VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart < hend)
return khugepaged_enter(vma);
return 0;
}
void __khugepaged_exit(struct mm_struct *mm)
{
struct mm_slot *mm_slot;
int free = 0;
spin_lock(&khugepaged_mm_lock);
mm_slot = get_mm_slot(mm);
if (mm_slot && khugepaged_scan.mm_slot != mm_slot) {
hlist_del(&mm_slot->hash);
list_del(&mm_slot->mm_node);
free = 1;
}
if (free) {
spin_unlock(&khugepaged_mm_lock);
clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
free_mm_slot(mm_slot);
mmdrop(mm);
} else if (mm_slot) {
spin_unlock(&khugepaged_mm_lock);
/*
* This is required to serialize against
* khugepaged_test_exit() (which is guaranteed to run
* under mmap sem read mode). Stop here (after we
* return all pagetables will be destroyed) until
* khugepaged has finished working on the pagetables
* under the mmap_sem.
*/
down_write(&mm->mmap_sem);
up_write(&mm->mmap_sem);
} else
spin_unlock(&khugepaged_mm_lock);
}
static void release_pte_page(struct page *page)
{
/* 0 stands for page_is_file_cache(page) == false */
dec_zone_page_state(page, NR_ISOLATED_ANON + 0);
unlock_page(page);
putback_lru_page(page);
}
static void release_pte_pages(pte_t *pte, pte_t *_pte)
{
while (--_pte >= pte) {
pte_t pteval = *_pte;
if (!pte_none(pteval))
release_pte_page(pte_page(pteval));
}
}
static void release_all_pte_pages(pte_t *pte)
{
release_pte_pages(pte, pte + HPAGE_PMD_NR);
}
static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
unsigned long address,
pte_t *pte)
{
struct page *page;
pte_t *_pte;
int referenced = 0, isolated = 0, none = 0;
for (_pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (pte_none(pteval)) {
if (++none <= khugepaged_max_ptes_none)
continue;
else {
release_pte_pages(pte, _pte);
goto out;
}
}
if (!pte_present(pteval) || !pte_write(pteval)) {
release_pte_pages(pte, _pte);
goto out;
}
page = vm_normal_page(vma, address, pteval);
if (unlikely(!page)) {
release_pte_pages(pte, _pte);
goto out;
}
VM_BUG_ON(PageCompound(page));
BUG_ON(!PageAnon(page));
VM_BUG_ON(!PageSwapBacked(page));
/* cannot use mapcount: can't collapse if there's a gup pin */
if (page_count(page) != 1) {
release_pte_pages(pte, _pte);
goto out;
}
/*
* We can do it before isolate_lru_page because the
* page can't be freed from under us. NOTE: PG_lock
* is needed to serialize against split_huge_page
* when invoked from the VM.
*/
if (!trylock_page(page)) {
release_pte_pages(pte, _pte);
goto out;
}
/*
* Isolate the page to avoid collapsing an hugepage
* currently in use by the VM.
*/
if (isolate_lru_page(page)) {
unlock_page(page);
release_pte_pages(pte, _pte);
goto out;
}
/* 0 stands for page_is_file_cache(page) == false */
inc_zone_page_state(page, NR_ISOLATED_ANON + 0);
VM_BUG_ON(!PageLocked(page));
VM_BUG_ON(PageLRU(page));
/* If there is no mapped pte young don't collapse the page */
if (pte_young(pteval) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address))
referenced = 1;
}
if (unlikely(!referenced))
release_all_pte_pages(pte);
else
isolated = 1;
out:
return isolated;
}
static void __collapse_huge_page_copy(pte_t *pte, struct page *page,
struct vm_area_struct *vma,
unsigned long address,
spinlock_t *ptl)
{
pte_t *_pte;
for (_pte = pte; _pte < pte+HPAGE_PMD_NR; _pte++) {
pte_t pteval = *_pte;
struct page *src_page;
if (pte_none(pteval)) {
clear_user_highpage(page, address);
add_mm_counter(vma->vm_mm, MM_ANONPAGES, 1);
} else {
src_page = pte_page(pteval);
copy_user_highpage(page, src_page, address, vma);
VM_BUG_ON(page_mapcount(src_page) != 1);
VM_BUG_ON(page_count(src_page) != 2);
release_pte_page(src_page);
/*
* ptl mostly unnecessary, but preempt has to
* be disabled to update the per-cpu stats
* inside page_remove_rmap().
*/
spin_lock(ptl);
/*
* paravirt calls inside pte_clear here are
* superfluous.
*/
pte_clear(vma->vm_mm, address, _pte);
page_remove_rmap(src_page);
spin_unlock(ptl);
free_page_and_swap_cache(src_page);
}
address += PAGE_SIZE;
page++;
}
}
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *ptl;
int isolated;
unsigned long hstart, hend;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
#ifndef CONFIG_NUMA
up_read(&mm->mmap_sem);
VM_BUG_ON(!*hpage);
new_page = *hpage;
#else
VM_BUG_ON(*hpage);
/*
* Allocate the page while the vma is still valid and under
* the mmap_sem read mode so there is no memory allocation
* later when we take the mmap_sem in write mode. This is more
* friendly behavior (OTOH it may actually hide bugs) to
* filesystems in userland with daemons allocating memory in
* the userland I/O paths. Allocating memory with the
* mmap_sem in read mode is good idea also to allow greater
* scalability.
*/
new_page = alloc_hugepage_vma(khugepaged_defrag(), vma, address,
node, __GFP_OTHER_NODE);
/*
* After allocating the hugepage, release the mmap_sem read lock in
* preparation for taking it in write mode.
*/
up_read(&mm->mmap_sem);
if (unlikely(!new_page)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
*hpage = ERR_PTR(-ENOMEM);
return;
}
#endif
count_vm_event(THP_COLLAPSE_ALLOC);
if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
return;
}
/*
* Prevent all access to pagetables with the exception of
* gup_fast later hanlded by the ptep_clear_flush and the VM
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm)))
goto out;
vma = find_vma(mm, address);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
goto out;
if ((!(vma->vm_flags & VM_HUGEPAGE) && !khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE))
goto out;
if (!vma->anon_vma || vma->vm_ops)
goto out;
if (is_vma_temporary_stack(vma))
goto out;
/*
* If is_pfn_mapping() is true is_learn_pfn_mapping() must be
* true too, verify it here.
*/
VM_BUG_ON(is_linear_pfn_mapping(vma) || vma->vm_flags & VM_NO_THP);
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
/* pmd can't go away or become huge under us */
if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
goto out;
anon_vma_lock(vma->anon_vma);
pte = pte_offset_map(pmd, address);
ptl = pte_lockptr(mm, pmd);
spin_lock(&mm->page_table_lock); /* probably unnecessary */
/*
* After this gup_fast can't run anymore. This also removes
* any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual address
* to avoid the risk of CPU bugs in that area.
*/
_pmd = pmdp_clear_flush_notify(vma, address, pmd);
spin_unlock(&mm->page_table_lock);
spin_lock(ptl);
isolated = __collapse_huge_page_isolate(vma, address, pte);
spin_unlock(ptl);
if (unlikely(!isolated)) {
pte_unmap(pte);
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
/*
* We can only use set_pmd_at when establishing
* hugepmds and never for establishing regular pmds that
* points to regular pagetables. Use pmd_populate for that
*/
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(&mm->page_table_lock);
anon_vma_unlock(vma->anon_vma);
goto out;
}
/*
* All pages are isolated and locked so anon_vma rmap
* can't run anymore.
*/
anon_vma_unlock(vma->anon_vma);
__collapse_huge_page_copy(pte, new_page, vma, address, ptl);
pte_unmap(pte);
__SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd);
VM_BUG_ON(page_count(pgtable) != 1);
VM_BUG_ON(page_mapcount(pgtable) != 0);
_pmd = mk_pmd(new_page, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
_pmd = pmd_mkhuge(_pmd);
/*
* spin_lock() below is not the equivalent of smp_wmb(), so
* this is needed to avoid the copy_huge_page writes to become
* visible after the set_pmd_at() write.
*/
smp_wmb();
spin_lock(&mm->page_table_lock);
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache(vma, address, entry);
prepare_pmd_huge_pte(pgtable, mm);
spin_unlock(&mm->page_table_lock);
#ifndef CONFIG_NUMA
*hpage = NULL;
#endif
khugepaged_pages_collapsed++;
out_up_write:
up_write(&mm->mmap_sem);
return;
out:
mem_cgroup_uncharge_page(new_page);
#ifdef CONFIG_NUMA
put_page(new_page);
#endif
goto out_up_write;
}
static int khugepaged_scan_pmd(struct mm_struct *mm,
struct vm_area_struct *vma,
unsigned long address,
struct page **hpage)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte, *_pte;
int ret = 0, referenced = 0, none = 0;
struct page *page;
unsigned long _address;
spinlock_t *ptl;
int node = -1;
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
goto out;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd) || pmd_trans_huge(*pmd))
goto out;
pte = pte_offset_map_lock(mm, pmd, address, &ptl);
for (_address = address, _pte = pte; _pte < pte+HPAGE_PMD_NR;
_pte++, _address += PAGE_SIZE) {
pte_t pteval = *_pte;
if (pte_none(pteval)) {
if (++none <= khugepaged_max_ptes_none)
continue;
else
goto out_unmap;
}
if (!pte_present(pteval) || !pte_write(pteval))
goto out_unmap;
page = vm_normal_page(vma, _address, pteval);
if (unlikely(!page))
goto out_unmap;
/*
* Chose the node of the first page. This could
* be more sophisticated and look at more pages,
* but isn't for now.
*/
if (node == -1)
node = page_to_nid(page);
VM_BUG_ON(PageCompound(page));
if (!PageLRU(page) || PageLocked(page) || !PageAnon(page))
goto out_unmap;
/* cannot use mapcount: can't collapse if there's a gup pin */
if (page_count(page) != 1)
goto out_unmap;
if (pte_young(pteval) || PageReferenced(page) ||
mmu_notifier_test_young(vma->vm_mm, address))
referenced = 1;
}
if (referenced)
ret = 1;
out_unmap:
pte_unmap_unlock(pte, ptl);
if (ret)
/* collapse_huge_page will return with the mmap_sem released */
collapse_huge_page(mm, address, hpage, vma, node);
out:
return ret;
}
static void collect_mm_slot(struct mm_slot *mm_slot)
{
struct mm_struct *mm = mm_slot->mm;
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_test_exit(mm)) {
/* free mm_slot */
hlist_del(&mm_slot->hash);
list_del(&mm_slot->mm_node);
/*
* Not strictly needed because the mm exited already.
*
* clear_bit(MMF_VM_HUGEPAGE, &mm->flags);
*/
/* khugepaged_mm_lock actually not necessary for the below */
free_mm_slot(mm_slot);
mmdrop(mm);
}
}
static unsigned int khugepaged_scan_mm_slot(unsigned int pages,
struct page **hpage)
{
struct mm_slot *mm_slot;
struct mm_struct *mm;
struct vm_area_struct *vma;
int progress = 0;
VM_BUG_ON(!pages);
VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&khugepaged_mm_lock));
if (khugepaged_scan.mm_slot)
mm_slot = khugepaged_scan.mm_slot;
else {
mm_slot = list_entry(khugepaged_scan.mm_head.next,
struct mm_slot, mm_node);
khugepaged_scan.address = 0;
khugepaged_scan.mm_slot = mm_slot;
}
spin_unlock(&khugepaged_mm_lock);
mm = mm_slot->mm;
down_read(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm)))
vma = NULL;
else
vma = find_vma(mm, khugepaged_scan.address);
progress++;
for (; vma; vma = vma->vm_next) {
unsigned long hstart, hend;
cond_resched();
if (unlikely(khugepaged_test_exit(mm))) {
progress++;
break;
}
if ((!(vma->vm_flags & VM_HUGEPAGE) &&
!khugepaged_always()) ||
(vma->vm_flags & VM_NOHUGEPAGE)) {
skip:
progress++;
continue;
}
if (!vma->anon_vma || vma->vm_ops)
goto skip;
if (is_vma_temporary_stack(vma))
goto skip;
/*
* If is_pfn_mapping() is true is_learn_pfn_mapping()
* must be true too, verify it here.
*/
VM_BUG_ON(is_linear_pfn_mapping(vma) ||
vma->vm_flags & VM_NO_THP);
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
hend = vma->vm_end & HPAGE_PMD_MASK;
if (hstart >= hend)
goto skip;
if (khugepaged_scan.address > hend)
goto skip;
if (khugepaged_scan.address < hstart)
khugepaged_scan.address = hstart;
VM_BUG_ON(khugepaged_scan.address & ~HPAGE_PMD_MASK);
while (khugepaged_scan.address < hend) {
int ret;
cond_resched();
if (unlikely(khugepaged_test_exit(mm)))
goto breakouterloop;
VM_BUG_ON(khugepaged_scan.address < hstart ||
khugepaged_scan.address + HPAGE_PMD_SIZE >
hend);
ret = khugepaged_scan_pmd(mm, vma,
khugepaged_scan.address,
hpage);
/* move to next address */
khugepaged_scan.address += HPAGE_PMD_SIZE;
progress += HPAGE_PMD_NR;
if (ret)
/* we released mmap_sem so break loop */
goto breakouterloop_mmap_sem;
if (progress >= pages)
goto breakouterloop;
}
}
breakouterloop:
up_read(&mm->mmap_sem); /* exit_mmap will destroy ptes after this */
breakouterloop_mmap_sem:
spin_lock(&khugepaged_mm_lock);
VM_BUG_ON(khugepaged_scan.mm_slot != mm_slot);
/*
* Release the current mm_slot if this mm is about to die, or
* if we scanned all vmas of this mm.
*/
if (khugepaged_test_exit(mm) || !vma) {
/*
* Make sure that if mm_users is reaching zero while
* khugepaged runs here, khugepaged_exit will find
* mm_slot not pointing to the exiting mm.
*/
if (mm_slot->mm_node.next != &khugepaged_scan.mm_head) {
khugepaged_scan.mm_slot = list_entry(
mm_slot->mm_node.next,
struct mm_slot, mm_node);
khugepaged_scan.address = 0;
} else {
khugepaged_scan.mm_slot = NULL;
khugepaged_full_scans++;
}
collect_mm_slot(mm_slot);
}
return progress;
}
static int khugepaged_has_work(void)
{
return !list_empty(&khugepaged_scan.mm_head) &&
khugepaged_enabled();
}
static int khugepaged_wait_event(void)
{
return !list_empty(&khugepaged_scan.mm_head) ||
!khugepaged_enabled();
}
static void khugepaged_do_scan(struct page **hpage)
{
unsigned int progress = 0, pass_through_head = 0;
unsigned int pages = khugepaged_pages_to_scan;
barrier(); /* write khugepaged_pages_to_scan to local stack */
while (progress < pages) {
cond_resched();
#ifndef CONFIG_NUMA
if (!*hpage) {
*hpage = alloc_hugepage(khugepaged_defrag());
if (unlikely(!*hpage)) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
break;
}
count_vm_event(THP_COLLAPSE_ALLOC);
}
#else
if (IS_ERR(*hpage))
break;
#endif
if (unlikely(kthread_should_stop() || freezing(current)))
break;
spin_lock(&khugepaged_mm_lock);
if (!khugepaged_scan.mm_slot)
pass_through_head++;
if (khugepaged_has_work() &&
pass_through_head < 2)
progress += khugepaged_scan_mm_slot(pages - progress,
hpage);
else
progress = pages;
spin_unlock(&khugepaged_mm_lock);
}
}
static void khugepaged_alloc_sleep(void)
{
DEFINE_WAIT(wait);
add_wait_queue(&khugepaged_wait, &wait);
schedule_timeout_interruptible(
msecs_to_jiffies(
khugepaged_alloc_sleep_millisecs));
remove_wait_queue(&khugepaged_wait, &wait);
}
#ifndef CONFIG_NUMA
static struct page *khugepaged_alloc_hugepage(void)
{
struct page *hpage;
do {
hpage = alloc_hugepage(khugepaged_defrag());
if (!hpage) {
count_vm_event(THP_COLLAPSE_ALLOC_FAILED);
khugepaged_alloc_sleep();
} else
count_vm_event(THP_COLLAPSE_ALLOC);
} while (unlikely(!hpage) &&
likely(khugepaged_enabled()));
return hpage;
}
#endif
static void khugepaged_loop(void)
{
struct page *hpage;
#ifdef CONFIG_NUMA
hpage = NULL;
#endif
while (likely(khugepaged_enabled())) {
#ifndef CONFIG_NUMA
hpage = khugepaged_alloc_hugepage();
if (unlikely(!hpage))
break;
#else
if (IS_ERR(hpage)) {
khugepaged_alloc_sleep();
hpage = NULL;
}
#endif
khugepaged_do_scan(&hpage);
#ifndef CONFIG_NUMA
if (hpage)
put_page(hpage);
#endif
try_to_freeze();
if (unlikely(kthread_should_stop()))
break;
if (khugepaged_has_work()) {
DEFINE_WAIT(wait);
if (!khugepaged_scan_sleep_millisecs)
continue;
add_wait_queue(&khugepaged_wait, &wait);
schedule_timeout_interruptible(
msecs_to_jiffies(
khugepaged_scan_sleep_millisecs));
remove_wait_queue(&khugepaged_wait, &wait);
} else if (khugepaged_enabled())
wait_event_freezable(khugepaged_wait,
khugepaged_wait_event());
}
}
static int khugepaged(void *none)
{
struct mm_slot *mm_slot;
set_freezable();
set_user_nice(current, 19);
/* serialize with start_khugepaged() */
mutex_lock(&khugepaged_mutex);
for (;;) {
mutex_unlock(&khugepaged_mutex);
VM_BUG_ON(khugepaged_thread != current);
khugepaged_loop();
VM_BUG_ON(khugepaged_thread != current);
mutex_lock(&khugepaged_mutex);
if (!khugepaged_enabled())
break;
if (unlikely(kthread_should_stop()))
break;
}
spin_lock(&khugepaged_mm_lock);
mm_slot = khugepaged_scan.mm_slot;
khugepaged_scan.mm_slot = NULL;
if (mm_slot)
collect_mm_slot(mm_slot);
spin_unlock(&khugepaged_mm_lock);
khugepaged_thread = NULL;
mutex_unlock(&khugepaged_mutex);
return 0;
}
void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
{
struct page *page;
spin_lock(&mm->page_table_lock);
if (unlikely(!pmd_trans_huge(*pmd))) {
spin_unlock(&mm->page_table_lock);
return;
}
page = pmd_page(*pmd);
VM_BUG_ON(!page_count(page));
get_page(page);
spin_unlock(&mm->page_table_lock);
split_huge_page(page);
put_page(page);
BUG_ON(pmd_trans_huge(*pmd));
}
static void split_huge_page_address(struct mm_struct *mm,
unsigned long address)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
return;
pud = pud_offset(pgd, address);
if (!pud_present(*pud))
return;
pmd = pmd_offset(pud, address);
if (!pmd_present(*pmd))
return;
/*
* Caller holds the mmap_sem write mode, so a huge pmd cannot
* materialize from under us.
*/
split_huge_page_pmd(mm, pmd);
}
void __vma_adjust_trans_huge(struct vm_area_struct *vma,
unsigned long start,
unsigned long end,
long adjust_next)
{
/*
* If the new start address isn't hpage aligned and it could
* previously contain an hugepage: check if we need to split
* an huge pmd.
*/
if (start & ~HPAGE_PMD_MASK &&
(start & HPAGE_PMD_MASK) >= vma->vm_start &&
(start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_page_address(vma->vm_mm, start);
/*
* If the new end address isn't hpage aligned and it could
* previously contain an hugepage: check if we need to split
* an huge pmd.
*/
if (end & ~HPAGE_PMD_MASK &&
(end & HPAGE_PMD_MASK) >= vma->vm_start &&
(end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
split_huge_page_address(vma->vm_mm, end);
/*
* If we're also updating the vma->vm_next->vm_start, if the new
* vm_next->vm_start isn't page aligned and it could previously
* contain an hugepage: check if we need to split an huge pmd.
*/
if (adjust_next > 0) {
struct vm_area_struct *next = vma->vm_next;
unsigned long nstart = next->vm_start;
nstart += adjust_next << PAGE_SHIFT;
if (nstart & ~HPAGE_PMD_MASK &&
(nstart & HPAGE_PMD_MASK) >= next->vm_start &&
(nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
split_huge_page_address(next->vm_mm, nstart);
}
}
|
tweakos/HD-Kernel
|
mm/huge_memory.c
|
C
|
gpl-2.0
| 63,956
|
/*
* USE - UML based specification environment
* Copyright (C) 1999-2004 Mark Richters, University of Bremen
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/* $ProjectHeader: use 2-1-0-release.1 Sun, 09 May 2004 13:57:11 +0200 mr $ */
package org.tzi.use.uml.ocl.expr;
import org.tzi.use.uml.mm.MAssociationEnd;
import org.tzi.use.uml.sys.MObject;
import org.tzi.use.uml.sys.MObjectState;
import org.tzi.use.uml.sys.MSystemState;
import org.tzi.use.uml.ocl.value.Value;
import org.tzi.use.uml.ocl.value.UndefinedValue;
import org.tzi.use.uml.ocl.value.ObjectValue;
import org.tzi.use.uml.ocl.value.SetValue;
import org.tzi.use.uml.ocl.value.SequenceValue;
import org.tzi.use.uml.ocl.type.Type;
import org.tzi.use.uml.ocl.type.TypeFactory;
import org.tzi.use.uml.ocl.type.ObjectType;
import org.tzi.use.uml.ocl.type.SetType;
import org.tzi.use.uml.ocl.type.SequenceType;
import java.util.List;
import java.util.Iterator;
/**
* Navigation expression from one class to another.
*
* @version $ProjectVersion: 2-1-0-release.1 $
* @author Mark Richters
*/
public final class ExpNavigation extends Expression {
private MAssociationEnd fSrcEnd;
private MAssociationEnd fDstEnd;
private Expression fObjExp;
public ExpNavigation(Expression objExp,
MAssociationEnd srcEnd,
MAssociationEnd dstEnd)
throws ExpInvalidException
{
// set result type later
super(null);
// let c be the class at dstEnd, then the result type is:
// (1) c if the multiplicity is max. one
// (2) Set(c) if the multiplicity is greater than 1
// (3) Sequence(c) if the association end is marked as {ordered}
Type t = TypeFactory.mkObjectType(dstEnd.cls());
if ( dstEnd.multiplicity().isCollection() ) {
if ( dstEnd.isOrdered() )
t = TypeFactory.mkSequence(t);
else
t = TypeFactory.mkSet(t);
}
setResultType(t);
fSrcEnd = srcEnd;
fDstEnd = dstEnd;
fObjExp = objExp;
if ( ! objExp.type().isObjectType() )
throw new ExpInvalidException(
"Target expression of navigation operation must have " +
"object type, found `" + objExp.type() + "'.");
}
/**
* Evaluates expression and returns result value.
*/
public Value eval(EvalContext ctx) {
ctx.enter(this);
Value res = new UndefinedValue(type());
Value val = fObjExp.eval(ctx);
// if we don't have an object we can't navigate
if ( ! val.isUndefined() ) {
// get the object
ObjectValue objVal = (ObjectValue) val;
MObject obj = objVal.value();
MSystemState state = isPre() ? ctx.preState() : ctx.postState();
MObjectState objState = obj.state(state);
// get objects at association end
List objList = obj.getLinkedObjects(state, fSrcEnd, fDstEnd);
Type resultType = type();
if ( resultType.isObjectType() ) {
if ( objList.size() > 1 )
throw new RuntimeException("expected link set size 1 at " +
"association end `" + fDstEnd +
"', found: " +
objList.size());
if ( objList.size() == 1 ) {
obj = (MObject) objList.get(0);
if ( obj.exists(state) )
res = new ObjectValue((ObjectType) type(), obj);
}
} else if ( resultType.isSet() ) {
res = new SetValue(((SetType) resultType).elemType(),
oidsToObjectValues(state, objList));
} else if ( resultType.isSequence() ) {
res = new SequenceValue(((SequenceType) resultType).elemType(),
oidsToObjectValues(state, objList));
} else
throw new RuntimeException("Unexpected association end type `" +
resultType + "'");
}
ctx.exit(this, res);
return res;
}
private Value[] oidsToObjectValues(MSystemState state, List objList) {
Value[] res = new ObjectValue[objList.size()];
Iterator it = objList.iterator();
int i = 0;
while ( it.hasNext() ) {
MObject obj = (MObject) it.next();
MObjectState objState = obj.state(state);
if ( objState != null )
res[i++] = new ObjectValue(obj.type(), obj);
}
return res;
}
public String toString() {
return fObjExp + "." + fDstEnd.name() + atPre();
}
}
|
stormymauldin/stuff
|
src/main/org/tzi/use/uml/ocl/expr/ExpNavigation.java
|
Java
|
gpl-2.0
| 4,724
|
<?php
/*
Template Name: Archive 2
*/
?>
<?php
get_header();
?>
<div id="primary" class="site-content">
<div id="content" role="main">
<div class="readable-content">
<?php
if ( have_posts() ) :
while ( have_posts() ) : the_post();
?>
<article id="post-<?php the_ID(); ?>" <?php post_class( 'page hentry clearfix' ); ?> style="padding-top: 0;">
<header class="entry-header">
<h1 class="entry-title"><?php the_title(); ?></h1>
</header>
<!-- end .entry-header -->
<div class="entry-content">
<?php
the_content();
?>
<?php
wp_link_pages( array( 'before' => '<div class="page-links">' . __( 'Pages:', 'read' ), 'after' => '</div>' ) );
?>
<div class="post-list archives-list last-30-posts">
<h2><?php echo __( 'Last 30 posts', 'read' ); ?></h2>
<ul>
<?php
$args_homepage = array( 'post_type' => 'post', 'posts_per_page' => 30 );
$loop_homepage = new WP_Query( $args_homepage );
if ( $loop_homepage->have_posts() ) :
while ( $loop_homepage->have_posts() ) : $loop_homepage->the_post();
$format = get_post_format();
if ( $format == false )
{
?>
<li>
<h3><a href="<?php the_permalink(); ?>"><?php the_title(); ?></a></h3>
<span class="date"><?php echo get_the_date(); ?></span>
</li>
<?php
}
// end if
endwhile;
endif;
wp_reset_query();
?>
</ul>
</div>
<!-- end Last 30 posts -->
<div class="post-list archives-list archives-tag archives-by-month">
<h2><?php echo __( 'Archives by month', 'read' ); ?></h2>
<ul>
<?php
$args = array( 'format' => 'custom',
'before' => '<li>',
'after' => '</li>' );
wp_get_archives( $args );
?>
</ul>
</div>
<!-- end Archives by month -->
</div>
<!-- end .entry-content -->
</article>
<!-- end .hentry -->
<?php
endwhile;
endif;
wp_reset_query();
?>
<?php
comments_template( "", true );
?>
</div>
<!-- end .readable-content -->
</div>
<!-- end #content -->
</div>
<!-- end #primary -->
<?php
get_footer();
?>
|
octavian-nita/cadeauxsucres2
|
wp-content/themes/read-v3-9-1/page-archive-2.php
|
PHP
|
gpl-2.0
| 2,650
|
<?php
/**
* @package reason
* @subpackage function_libraries
*/
/**
* Include the reason header
*/
include_once( 'reason_header.php' );
/**
* @param string $entity_a The unique name of the type on the "A" side of the relationship
* @param string $entity_b The unique name of the type on the "B" side of the relationship
* @return mixed false if not found or if multiple rels available; else space delimited string data (a_to_b the_rel_name or b_to_a the_rel_name)
* @deprecated since Reason 4.2
*/
function find_relationship_name( $entity_a, $entity_b )
{
$total_results = 0;
$found = false;
$a_b_relationship_name = $entity_a . '_to_' . $entity_b;
$b_a_relationship_name = $entity_b . '_to_' . $entity_a;
$query = 'SELECT id FROM allowable_relationship WHERE name="' . $a_b_relationship_name . '"';
$a_b_results = db_query( $query );
$num_results = mysql_num_rows( $a_b_results );
if( $num_results > 1 )
{
return false;
}
elseif( $num_results == 1 )
{
return 'a_to_b ' . $a_b_relationship_name;
}
$query = 'SELECT id FROM allowable_relationship WHERE name="' . $b_a_relationship_name . '"';
$b_a_results = db_query( $query );
$num_results = mysql_num_rows( $b_a_results );
if( $num_results > 1 )
{
return false;
}
elseif( $num_results == 1 )
{
return 'b_to_a ' . $b_a_relationship_name;
}
else
{
return false;
}
}
/**
* Relationship Finder
*
* A function to find the id of a relationship given two entities' unique names.
* relationship_finder will return false if zero, or multiple relationships are found.
*
* For example:
*
* echo relationship_finder( 'site', 'minisite_page', 'owns' ) . '<br />';
*
* gives you:
* 78
*
* @param mixed $entity_a The unique name, id, or entity of the type on the "A" side of the relationship
* @param mixed $entity_b The unique name, id, or entity of the type on the "B" side of the relationship
* @param string $name the name of the relationship
* @return mixed The ID of the allowable relationship or NULL if not found
* @deprecated since Reason 4.2 just use relationship_id_of with the name
*/
function relationship_finder( $entity_a, $entity_b, $name = 'owns' )
{
if(is_object($entity_a))
$a_id = $entity_a->id();
elseif(is_numeric($entity_a))
$a_id = (integer) $entity_a;
else
$a_id = id_of($entity_a);
if(is_object($entity_b))
$b_id = $entity_b->id();
elseif(is_numeric($entity_b))
$b_id = (integer) $entity_b;
else
$b_id = id_of($entity_b);
$name = (string) $name;
// if the name string passed in is simply "owns" or "borrows" and relationship uses unique relationship names, update the name we look for and trigger an error
if ( ($name == 'owns' || $name == 'borrows') && reason_relationship_names_are_unique())
{
$a = new entity($a_id);
$b = new entity($b_id);
$name = $a->get_value('unique_name') . '_' . $name . '_' . $b->get_value('unique_name'); // this assumes unique names not cool
trigger_error('The function relationship_finder was called to discover an owns or borrows relationship. The strings "owns" and "borrows"
are no longer used as relationship names. Calling this method is no longer necessary to find the relationship_id of owns
or borrows relationships. Use get_owns_relationship_id or get_borrows_relationship_id instead.');
}
if( empty($a_id))
{
trigger_error( '$entity_a ('.$entity_a.') is not a valid unique name');
return;
}
if( empty($b_id))
{
trigger_error( '$entity_b ('.$entity_b.') is not a valid unique name');
return;
}
if(empty($name))
{
trigger_error( 'An entity name must be provided for relationship_finder to work');
return;
}
$query = 'SELECT id FROM allowable_relationship WHERE ' .
'relationship_a="' . $a_id . '" ' .
'AND relationship_b="' . $b_id . '" ' .
'AND name="' . addslashes($name) . '"';
$results = db_query( $query );
$num = mysql_num_rows( $results );
if( $num < 1 )
{
//Relationship finder returned zero results.
return false;
}
elseif( $num > 1 )
{
//Relationship finder returned too many results!
if(is_object($entity_a))
$a_name = $entity_a->get_value('name');
else
$a_name = $entity_a;
if(is_object($entity_b))
$b_name = $entity_b->get_value('name');
else
$b_name = $entity_b;
trigger_error('Multiple relationships exist for "'.$a_name.'" to "'.$b_name.'" under name "'.$name.'"; returning only first result.');
}
$results = mysql_fetch_array( $results );
return (integer) $results['id'];
}
?>
|
mischler/reason_package
|
reason_4.0/lib/core/function_libraries/relationship_finder.php
|
PHP
|
gpl-2.0
| 4,713
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.6"/>
<title>Platinum UPnP SDK: PLT_MediaCache< T, U > Class Template Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td style="padding-left: 0.5em;">
<div id="projectname">Platinum UPnP SDK
 <span id="projectnumber">1.0.5.13</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.6 -->
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li class="current"><a href="annotated.html"><span>Classes</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="annotated.html"><span>Class List</span></a></li>
<li><a href="classes.html"><span>Class Index</span></a></li>
<li><a href="hierarchy.html"><span>Class Hierarchy</span></a></li>
<li><a href="functions.html"><span>Class Members</span></a></li>
</ul>
</div>
</div><!-- top -->
<div class="header">
<div class="summary">
<a href="classPLT__MediaCache-members.html">List of all members</a> </div>
<div class="headertitle">
<div class="title">PLT_MediaCache< T, U > Class Template Reference</div> </div>
</div><!--header-->
<div class="contents">
<p>The <a class="el" href="classPLT__MediaCache.html" title="The PLT_MediaCache template provides a way to hold references to object in memory. ">PLT_MediaCache</a> template provides a way to hold references to object in memory.
<a href="classPLT__MediaCache.html#details">More...</a></p>
<p><code>#include <<a class="el" href="PltMediaCache_8h_source.html">PltMediaCache.h</a>></code></p>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><h3>template<typename T, typename U><br/>
class PLT_MediaCache< T, U ></h3>
<p>The <a class="el" href="classPLT__MediaCache.html" title="The PLT_MediaCache template provides a way to hold references to object in memory. ">PLT_MediaCache</a> template provides a way to hold references to object in memory. </p>
</div><hr/>The documentation for this class was generated from the following file:<ul>
<li><a class="el" href="PltMediaCache_8h_source.html">PltMediaCache.h</a></li>
</ul>
</div><!-- contents -->
</body>
</html>
|
widora/openwrt_widora
|
package/multimedia/platinum/src/Platinum/Docs/Doxygen/html/classPLT__MediaCache.html
|
HTML
|
gpl-2.0
| 3,100
|
package com.myJava.file.metadata.posix.jni.wrapper;
import com.myJava.object.ToStringHelper;
/**
* <BR>
* @author Olivier PETRUCCI
* <BR>
*
*/
/*
Copyright 2005-2014, Olivier PETRUCCI.
This file is part of Areca.
Areca is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
Areca is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Areca; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
public class SetFileOwnerResult extends AbstractMethodResult {
public String toString() {
StringBuffer sb = ToStringHelper.init(this);
ToStringHelper.append("returnCode", returnCode, sb);
ToStringHelper.append("errorNumber", errorNumber, sb);
ToStringHelper.append("transcodedErrorNumber", transcodedErrorNumber, sb);
return ToStringHelper.close(sb);
}
}
|
wintonBy/areca-backup-release-mirror
|
src/com/myJava/file/metadata/posix/jni/wrapper/SetFileOwnerResult.java
|
Java
|
gpl-2.0
| 1,297
|
/* -*- mode: c; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4; c-file-style: "stroustrup"; -*-
*
* $Id: edsam.c,v 1.31.2.1 2008/12/18 15:24:30 carsten Exp $
*
* This source code is part of
*
* G R O M A C S
*
* GROningen MAchine for Chemical Simulations
*
* VERSION 3.2.0
* Written by David van der Spoel, Erik Lindahl, Berk Hess, and others.
* Copyright (c) 1991-2000, University of Groningen, The Netherlands.
* Copyright (c) 2001-2004, The GROMACS development team,
* check out http://www.gromacs.org for more information.
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* If you want to redistribute modifications, please consider that
* scientific software is very special. Version control is crucial -
* bugs must be traceable. We will be happy to consider code for
* inclusion in the official distribution, but derived work must not
* be called official GROMACS. Details are found in the README & COPYING
* files - if they are missing, get the official version at www.gromacs.org.
*
* To help us fund GROMACS development, we humbly ask that you cite
* the papers on the package - you can find them in the top README file.
*
* For more info, check our website at http://www.gromacs.org
*
* And Hey:
* GROwing Monsters And Cloning Shrimps
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif
#include <stdio.h>
#include <time.h>
#include "typedefs.h"
#include "string2.h"
#include "smalloc.h"
#include "names.h"
#include "confio.h"
#include "mvdata.h"
#include "txtdump.h"
#include "vec.h"
#include "time.h"
#include "nrnb.h"
#include "mshift.h"
#include "mdrun.h"
#include "update.h"
#include "physics.h"
#include "rmpbc.h"
#include "nrjac.h"
#include "mtop_util.h"
#include "edsam.h"
#include "mpelogging.h"
#include "gmxfio.h"
/* We use the same defines as in mvdata.c here */
#define block_bc(cr, d) gmx_bcast( sizeof(d), &(d),(cr))
#define nblock_bc(cr,nr,d) gmx_bcast((nr)*sizeof((d)[0]), (d),(cr))
#define snew_bc(cr,d,nr) { if (!MASTER(cr)) snew((d),(nr)); }
/* enum to identify the type of ED: none, normal ED, flooding */
enum {eEDnone, eEDedsam, eEDflood, eEDnr};
/* enum to identify operations on reference, average, origin, target structures */
enum {eedREF, eedAV, eedORI, eedTAR, eedNR};
typedef struct
{
int neig; /* nr of eigenvectors */
int *ieig; /* index nrs of eigenvectors */
real *stpsz; /* stepsizes (per eigenvector) */
rvec **vec; /* eigenvector components */
real *xproj; /* instantaneous x projections */
real *fproj; /* instantaneous f projections */
real *refproj; /* starting or target projecions */
real radius; /* instantaneous radius */
} t_eigvec;
typedef struct
{
t_eigvec mon; /* only monitored, no constraints */
t_eigvec linfix; /* fixed linear constraints */
t_eigvec linacc; /* acceptance linear constraints */
t_eigvec radfix; /* fixed radial constraints (exp) */
t_eigvec radacc; /* acceptance radial constraints (exp) */
t_eigvec radcon; /* acceptance rad. contraction constr. */
} t_edvecs;
typedef struct
{
real deltaF0;
bool bHarmonic;
real tau;
real deltaF;
real Efl;
real kT;
real Vfl;
real dt;
real constEfl;
real alpha2;
int flood_id;
rvec *forces_cartesian;
t_eigvec vecs; /* use flooding for these */
} t_edflood;
/* This type is for the average, reference, target, and origin structure */
typedef struct gmx_edx
{
int nr; /* number of atoms this structure contains */
int nr_loc; /* number of atoms on local node */
int *anrs; /* atom index numbers */
int *anrs_loc; /* local atom index numbers */
int *c_ind; /* at which position of the whole anrs
* array is a local atom?, i.e.
* c_ind[0...nr_loc-1] gives the atom index
* with respect to the collective
* anrs[0...nr-1] array */
rvec *x; /* coordinates for this structure */
rvec *x_old; /* used to keep track of the shift vectors
such that the ED molecule can always be
made whole in the parallel case */
real *m; /* masses */
real mtot; /* total mass (only used in sref) */
real *sqrtm; /* sqrt of the masses used for mass-
* weighting of analysis (only used in sav) */
} t_gmx_edx;
typedef struct edpar
{
int nini; /* total Nr of atoms */
bool fitmas; /* true if trans fit with cm */
bool pcamas; /* true if mass-weighted PCA */
int presteps; /* number of steps to run without any
* perturbations ... just monitoring */
int outfrq; /* freq (in steps) of writing to edo */
int maxedsteps; /* max nr of steps per cycle */
/* all gmx_edx datasets are copied to all nodes in the parallel case */
struct gmx_edx sref; /* reference positions, to these fitting
* will be done */
bool bRefEqAv; /* If true, reference & average indices
* are the same. Used for optimization */
struct gmx_edx sav; /* average positions */
struct gmx_edx star; /* target positions */
struct gmx_edx sori; /* origin positions */
t_edvecs vecs; /* eigenvectors */
real slope; /* minimal slope in acceptance radexp */
bool bNeedDoEdsam; /* if any of the options mon, linfix, ...
* is used (i.e. apart from flooding) */
t_edflood flood; /* parameters especially for flooding */
struct t_ed_buffer *buf; /* handle to local buffers */
struct edpar *next_edi; /* Pointer to another ed dataset */
} t_edpar;
typedef struct gmx_edsam
{
int eEDtype; /* Type of ED: see enums above */
char *edinam; /* name of ED sampling input file */
char *edonam; /* output */
FILE *edo; /* output file pointer */
t_edpar *edpar;
} t_gmx_edsam;
struct t_do_edsam
{
matrix old_rotmat;
real oldrad;
rvec old_transvec,older_transvec,transvec_compact;
rvec *xcoll; /* Coordinates from all nodes, this is the collective set of coords we work on.
* These are the coordinates of atoms with average structure indices */
rvec *xc_ref; /* same but with reference structure indices */
ivec *shifts_xcoll; /* Shifts for xcoll */
ivec *extra_shifts_xcoll; /* xcoll shift changes since last NS step */
ivec *shifts_xc_ref; /* Shifts for xc_ref */
ivec *extra_shifts_xc_ref; /* xc_ref shift changes since last NS step */
bool bUpdateShifts; /* TRUE in NS steps to indicate that the ED shifts
* for this ED dataset need to be updated */
};
/* definition of ED buffer structure */
struct t_ed_buffer
{
struct t_fitit * fitit;
struct t_do_edfit * do_edfit;
struct t_remove_pbc_effect * remove_pbc_effect;
struct t_do_edsam * do_edsam;
struct t_do_radcon * do_radcon;
};
/* Function declarations */
static void fit_to_reference(rvec *xcoll,rvec transvec,matrix rotmat,t_edpar *edi);
static void get_coordinates(t_commrec *cr,rvec *xc,ivec *shifts_xc,ivec *extra_shifts_xc,bool bNeedShiftsUpdate,
rvec *x_loc,struct gmx_edx *s, matrix box,char title[]);
static void translate_and_rotate(rvec *x,int nat,rvec transvec,matrix rotmat);
/* End funtion declarations */
/* Does not subtract average positions, projection on single eigenvector is returned
* used by: do_linfix, do_linacc, do_radfix, do_radacc, do_radcon
* Average position is subtracted in ed_apply_constraints prior to calling projectx
*/
static real projectx(t_edpar *edi, rvec *xcoll, rvec *vec)
{
int i;
real proj=0.0;
for (i=0; i<edi->sav.nr; i++)
proj += edi->sav.sqrtm[i]*iprod(vec[i], xcoll[i]);
return proj;
}
/* Specialized: projection is stored in vec->refproj
* -> used for radacc, radfix, radcon and center of flooding potential
* subtracts average positions, projects vector x */
static void rad_project(t_edpar *edi, rvec *x, t_eigvec *vec, t_commrec *cr)
{
int i;
real rad=0.0;
/* Subtract average positions */
for (i = 0; i < edi->sav.nr; i++)
rvec_dec(x[i], edi->sav.x[i]);
for (i = 0; i < vec->neig; i++)
{
vec->refproj[i] = projectx(edi,x,vec->vec[i]);
rad += pow((vec->refproj[i]-vec->xproj[i]),2);
}
vec->radius=sqrt(rad);
/* Add average positions */
for (i = 0; i < edi->sav.nr; i++)
rvec_inc(x[i], edi->sav.x[i]);
}
/* Project vector x, subtract average positions prior to projection and add
* them afterwards to retain the unchanged vector. Store in xproj. Mass-weighting
* is applied. */
static void project_to_eigvectors(rvec *x, /* The coordinates to project to an eigenvector */
t_eigvec *vec, /* The eigenvectors */
t_edpar *edi)
{
int i;
if (!vec->neig) return;
/* Subtract average positions */
for (i=0; i<edi->sav.nr; i++)
rvec_dec(x[i], edi->sav.x[i]);
for (i=0; i<vec->neig; i++)
vec->xproj[i] = projectx(edi, x, vec->vec[i]);
/* Add average positions */
for (i=0; i<edi->sav.nr; i++)
rvec_inc(x[i], edi->sav.x[i]);
}
/* Project vector x onto all edi->vecs (mon, linfix,...) */
static void project(rvec *x, /* coordinates to project */
t_edpar *edi) /* edi data set */
{
/* It is not more work to subtract the average position in every s
* ubroutine again, because these routines are rarely used simultanely */
project_to_eigvectors(x, &edi->vecs.mon , edi);
project_to_eigvectors(x, &edi->vecs.linfix, edi);
project_to_eigvectors(x, &edi->vecs.linacc, edi);
project_to_eigvectors(x, &edi->vecs.radfix, edi);
project_to_eigvectors(x, &edi->vecs.radacc, edi);
project_to_eigvectors(x, &edi->vecs.radcon, edi);
}
static real calc_radius(t_eigvec *vec)
{
int i;
real rad=0.0;
for (i=0; i<vec->neig; i++)
rad += pow((vec->refproj[i]-vec->xproj[i]),2);
return rad=sqrt(rad);
}
/* Debug helper */
static void dump_xcoll(t_edpar *edi, struct t_do_edsam *buf, t_commrec *cr, int step)
{
int i;
static FILE *fp = NULL;
rvec *xcoll;
ivec *shifts, *eshifts;
if (!MASTER(cr))
return;
xcoll = buf->xcoll;
shifts = buf->shifts_xcoll;
eshifts = buf->extra_shifts_xcoll;
if (!fp)
fp = gmx_fio_fopen("xcolldump.txt", "w");
fprintf(fp, "Step %d\n", step);
for (i=0; i<edi->sav.nr; i++)
fprintf(fp, "%d %9.5f %9.5f %9.5f %d %d %d %d %d %d\n", edi->sav.anrs[i]+1,
xcoll[i][XX], xcoll[i][YY], xcoll[i][ZZ],
shifts[i][XX], shifts[i][YY], shifts[i][ZZ],
eshifts[i][XX], eshifts[i][YY], eshifts[i][ZZ]);
fflush(fp);
}
/* Debug helper */
static void dump_edi_positions(FILE *out, struct gmx_edx *s, char name[])
{
int i;
fprintf(out, "#%s coordinates:\n%d\n", name, s->nr);
if (s->nr == 0)
return;
fprintf(out, "#index, x, y, z");
if (s->sqrtm)
fprintf(out, ", sqrt(m)");
for (i=0; i<s->nr; i++)
{
fprintf(out, "\n%6d %11.6f %11.6f %11.6f",s->anrs[i], s->x[i][XX], s->x[i][YY], s->x[i][ZZ]);
if (s->sqrtm)
fprintf(out,"%9.3f",s->sqrtm[i]);
}
fprintf(out, "\n");
}
/* Debug helper */
static void dump_edi_eigenvecs(FILE *out, t_eigvec *ev, char name[], int length)
{
int i,j;
fprintf(out, "#%s eigenvectors:\n%d\n", name, ev->neig);
/* Dump the data for every eigenvector: */
for (i=0; i<ev->neig; i++)
{
fprintf(out, "EV %4d\ncomponents %d\nstepsize %f\nxproj %f\nfproj %f\nrefproj %f\nradius %f\nComponents:\n",
ev->ieig[i], length, ev->stpsz[i], ev->xproj[i], ev->fproj[i], ev->refproj[i], ev->radius);
for (j=0; j<length; j++)
fprintf(out, "%11.6f %11.6f %11.6f\n", ev->vec[i][j][XX], ev->vec[i][j][YY], ev->vec[i][j][ZZ]);
}
}
/* Debug helper */
static void dump_edi(t_edpar *edpars, t_commrec *cr, int nr_edi)
{
static FILE *out;
static bool bFirst=TRUE;
char fname[255];
if (bFirst)
{
sprintf(fname, "EDdump%.2d", cr->nodeid);
out = fopen(fname, "w");
bFirst=FALSE;
}
fprintf(out,"=== ED dataset #%d ===\n", nr_edi);
fprintf(out,"#NINI\n %d\n#SELMAS\n %d\n#ANALYSIS_MAS\n %d\n",
edpars->nini,edpars->fitmas,edpars->pcamas);
fprintf(out,"#OUTFRQ\n %d\n#MAXLEN\n %d\n#SLOPECRIT\n %f\n",
edpars->outfrq,edpars->maxedsteps,edpars->slope);
fprintf(out,"#PRESTEPS\n %d\n#DELTA_F0\n %f\n#TAU\n %f\n#EFL_NULL\n %f\n#ALPHA2\n %f\n",
edpars->presteps,edpars->flood.deltaF0,edpars->flood.tau,edpars->flood.constEfl,edpars->flood.alpha2);
/* Dump reference, average, target, origin positions */
dump_edi_positions(out, &edpars->sref, "REFERENCE");
dump_edi_positions(out, &edpars->sav , "AVERAGE" );
dump_edi_positions(out, &edpars->star, "TARGET" );
dump_edi_positions(out, &edpars->sori, "ORIGIN" );
/* Dump eigenvectors */
dump_edi_eigenvecs(out, &edpars->vecs.mon , "MONITORED", edpars->sav.nr);
dump_edi_eigenvecs(out, &edpars->vecs.linfix, "LINFIX" , edpars->sav.nr);
dump_edi_eigenvecs(out, &edpars->vecs.linacc, "LINACC" , edpars->sav.nr);
dump_edi_eigenvecs(out, &edpars->vecs.radfix, "RADFIX" , edpars->sav.nr);
dump_edi_eigenvecs(out, &edpars->vecs.radacc, "RADACC" , edpars->sav.nr);
dump_edi_eigenvecs(out, &edpars->vecs.radcon, "RADCON" , edpars->sav.nr);
/* Dump flooding eigenvectors */
dump_edi_eigenvecs(out, &edpars->flood.vecs, "FLOODING" , edpars->sav.nr);
/* Dump ed local buffer */
fprintf(out, "buf->fitit =%p\n", edpars->buf->fitit );
fprintf(out, "buf->do_edfit =%p\n", edpars->buf->do_edfit );
fprintf(out, "buf->remove_pbc_effect=%p\n", edpars->buf->remove_pbc_effect);
fprintf(out, "buf->do_edsam =%p\n", edpars->buf->do_edsam );
fprintf(out, "buf->do_radcon =%p\n", edpars->buf->do_radcon );
fprintf(out, "\n");
fflush(out);
}
/* Debug helper */
static void dump_rotmat(FILE* out,matrix rotmat)
{
fprintf(out,"ROTMAT: %12.8f %12.8f %12.8f\n",rotmat[XX][XX],rotmat[XX][YY],rotmat[XX][ZZ]);
fprintf(out,"ROTMAT: %12.8f %12.8f %12.8f\n",rotmat[YY][XX],rotmat[YY][YY],rotmat[YY][ZZ]);
fprintf(out,"ROTMAT: %12.8f %12.8f %12.8f\n",rotmat[ZZ][XX],rotmat[ZZ][YY],rotmat[ZZ][ZZ]);
}
/* Debug helper */
static void dump_rvec(FILE *out, int dim, rvec *x)
{
int i;
for (i=0; i<dim; i++)
fprintf(out,"%4d %f %f %f\n",i,x[i][XX],x[i][YY],x[i][ZZ]);
}
/* Debug helper */
static void dump_mat(FILE* out, int dim, double** mat)
{
int i,j;
fprintf(out,"MATRIX:\n");
for (i=0;i<dim;i++)
{
for (j=0;j<dim;j++)
fprintf(out,"%f ",mat[i][j]);
fprintf(out,"\n");
}
}
struct t_do_edfit {
double **omega;
double **om;
};
static void do_edfit(int natoms,rvec *xp,rvec *x,matrix R,t_edpar *edi)
{
/* this is a copy of do_fit with some modifications */
int c,r,n,j,i,irot;
double d[6],xnr,xpc;
matrix vh,vk,u;
int index;
real max_d;
struct t_do_edfit *loc;
bool bFirst;
if(edi->buf->do_edfit != NULL)
bFirst = FALSE;
else
{
bFirst = TRUE;
snew(edi->buf->do_edfit,1);
}
loc = edi->buf->do_edfit;
if (bFirst)
{
snew(loc->omega,2*DIM);
snew(loc->om,2*DIM);
for(i=0; i<2*DIM; i++)
{
snew(loc->omega[i],2*DIM);
snew(loc->om[i],2*DIM);
}
}
for(i=0;(i<6);i++)
{
d[i]=0;
for(j=0;(j<6);j++)
{
loc->omega[i][j]=0;
loc->om[i][j]=0;
}
}
/* calculate the matrix U */
clear_mat(u);
for(n=0;(n<natoms);n++)
{
for(c=0; (c<DIM); c++)
{
xpc=xp[n][c];
for(r=0; (r<DIM); r++)
{
xnr=x[n][r];
u[c][r]+=xnr*xpc;
}
}
}
/* construct loc->omega */
/* loc->omega is symmetric -> loc->omega==loc->omega' */
for(r=0;(r<6);r++)
for(c=0;(c<=r);c++)
if ((r>=3) && (c<3))
{
loc->omega[r][c]=u[r-3][c];
loc->omega[c][r]=u[r-3][c];
}
else
{
loc->omega[r][c]=0;
loc->omega[c][r]=0;
}
/* determine h and k */
#ifdef DEBUG
{
int i;
dump_mat(stderr,2*DIM,loc->omega);
for (i=0; i<6; i++)
fprintf(stderr,"d[%d] = %f\n",i,d[i]);
}
#endif
jacobi(loc->omega,6,d,loc->om,&irot);
if (irot==0)
fprintf(stderr,"IROT=0\n");
index=0; /* For the compiler only */
for(j=0;(j<3);j++)
{
max_d=-1000;
for(i=0;(i<6);i++)
if (d[i]>max_d)
{
max_d=d[i];
index=i;
}
d[index]=-10000;
for(i=0;(i<3);i++)
{
vh[j][i]=M_SQRT2*loc->om[i][index];
vk[j][i]=M_SQRT2*loc->om[i+DIM][index];
}
}
/* determine R */
for(c=0;(c<3);c++)
for(r=0;(r<3);r++)
R[c][r]=vk[0][r]*vh[0][c]+
vk[1][r]*vh[1][c]+
vk[2][r]*vh[2][c];
if (det(R) < 0)
for(c=0;(c<3);c++)
for(r=0;(r<3);r++)
R[c][r]=vk[0][r]*vh[0][c]+
vk[1][r]*vh[1][c]-
vk[2][r]*vh[2][c];
}
static void rotate_x(int nr,rvec *x,matrix rmat)
{
int i,j,k;
rvec x_old;
/* Apply the rotation matrix */
for(i=0;(i<nr);i++)
{
for(j=0;(j<3);j++)
x_old[j]=x[i][j];
for(j=0;(j<3);j++)
{
x[i][j]=0;
for(k=0;(k<3);k++)
x[i][j]+=rmat[k][j]*x_old[k];
}
}
}
static void rmrotfit(int nat, rvec *xcoll, matrix rotmat)
{
int i,j,k;
rvec xdum;
/* invert the rotation matrix and apply */
for (i=0; i<nat; i++)
{
for (j=0; j<3; j++)
xdum[j]=xcoll[i][j];
for (j=0; j<3; j++)
{
xcoll[i][j]=0;
for (k=0; k<3; k++)
xcoll[i][j] += rotmat[j][k]*xdum[k];
}
}
}
static void rmtransfit(int nat, rvec *xcoll, rvec transvec)
{
int i;
/* subtract the translation vector */
for(i=0; i<nat; i++)
rvec_dec(xcoll[i], transvec);
}
static void rmfit(int nat, rvec *xcoll, rvec transvec, matrix rotmat)
{
rmrotfit(nat, xcoll, rotmat);
rmtransfit(nat, xcoll, transvec);
}
/**********************************************************************************
******************** FLOODING ****************************************************
**********************************************************************************
The flooding ability was added later to edsam. Many of the edsam functionality could be reused for that purpose.
The flooding covariance matrix, i.e. the selected eigenvectors and their corresponding eigenvalues are
read as 7th Component Group. The eigenvalues are coded into the stepsize parameter (as used by -linfix or -linacc).
do_md clls right in the beginning the function init_edsam, which reads the edi file, saves all the necessary information in
the edi structure and calls init_flood, to initialise some extra fields in the edi->flood structure.
since the flooding acts on forces do_flood is called from the function force() (force.c), while the other
edsam functionality is hooked into md via the update() (update.c) function acting as constraint on positions.
do_flood makes a copy of the positions,
fits them, projects them computes flooding_energy, and flooding forces. The forces are computed in the
space of the eigenvectors and are then blown up to the full cartesian space and rotated back to remove the
fit. Then do_flood adds these forces to the forcefield-forces
(given as parameter) and updates the adaptive flooding parameters Efl and deltaF.
To center the flooding potential at a different location one can use the -ori option in make_edi. The ori
structure is projected to the system of eigenvectors and then this position in the subspace is used as
center of the flooding potential. If the option is not used, the center will be zero in the subspace,
i.e. the average structure as given in the make_edi file.
To use the flooding potential as restraint, make_edi has the option -restrain, which leads to inverted
signs of alpha2 and Efl, such that the sign in the exponential of Vfl is not inverted but the sign of
Vfl is inverted. Vfl = Efl * exp (- .../Efl/alpha2*x^2...) With tau>0 the negative Efl will grow slowly
so that the restraint is switched off slowly. When Efl==0 and inverted flooding is ON is reached no
further adaption is applied, Efl will stay constant at zero.
To use restraints with harmonic potentials switch -restrain and -harmonic. Then the eigenvalues are
used as spring constants for the harmonic potential.
Note that eq3 in the flooding paper (J. Comp. Chem. 2006, 27, 1693-1702) defines the parameter lambda \
as the inverse of the spring constant, whereas the implementation uses lambda as the spring constant.
To use more than one flooding matrix just concatenate several .edi files (cat flood1.edi flood2.edi > flood_all.edi)
the routine read_edi_file reads all of theses flooding files.
The structure t_edi is now organized as a list of t_edis and the function do_flood cycles through the list
calling the do_single_flood() routine for every single entry. Since every state variables have been kept in one
edi there is no interdependence whatsoever. The forces are added together.
To write energies into the .edr file, call the function
get_flood_enx_names(char**, int *nnames) to get the Header (Vfl1 Vfl2... Vfln)
and call
get_flood_energies(real Vfl[],int nnames);
TODO:
- one could program the whole thing such that Efl, Vfl and deltaF is written to the .edr file. -- i dont know how to do that, yet.
Maybe one should give a range of atoms for which to remove motion, so that motion is removed with
two edsam files from two peptide chains
*/
static void write_edo_flood(t_edpar *edi, FILE *fp, int step)
{
int i;
fprintf(fp,"%d.th FL: %d %g %g %g\n",edi->flood.flood_id,step, edi->flood.Efl, edi->flood.Vfl, edi->flood.deltaF);
fprintf(fp,"FL_FORCES: ");
for (i=0; i<edi->flood.vecs.neig; i++)
fprintf(fp," %f",edi->flood.vecs.fproj[i]);
fprintf(fp,"\n");
fflush(fp);
}
/* From flood.xproj compute the Vfl(x) at this point */
static real flood_energy(t_edpar *edi)
{
/* compute flooding energy Vfl
Vfl = Efl * exp( - \frac {kT} {2Efl alpha^2} * sum_i { \lambda_i c_i^2 } )
\lambda_i is the reciproce eigenvalue 1/\sigma_i
it is already computed by make_edi and stored in stpsz[i]
bHarmonic:
Vfl = - Efl * 1/2(sum _i {\frac 1{\lambda_i} c_i^2})
*/
real summe;
real Vfl;
int i;
summe=0.0;
/* Compute sum which will be the exponent of the exponential */
for (i=0; i<edi->flood.vecs.neig; i++)
summe += edi->flood.vecs.stpsz[i]*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i])*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i]);
/* Compute the Gauss function*/
if (edi->flood.bHarmonic)
Vfl = -0.5*edi->flood.Efl*summe; /* minus sign because Efl is negativ, if restrain is on. */
else
Vfl = edi->flood.Efl!=0 ? edi->flood.Efl*exp(-edi->flood.kT/2/edi->flood.Efl/edi->flood.alpha2*summe) :0;
return Vfl;
}
/* From the position and from Vfl compute forces in subspace -> store in edi->vec.flood.fproj */
static void flood_forces(t_edpar *edi)
{
/* compute the forces in the subspace of the flooding eigenvectors
* by the formula F_i= V_{fl}(c) * ( \frac {kT} {E_{fl}} \lambda_i c_i */
int i;
real energy=edi->flood.Vfl;
if (edi->flood.bHarmonic)
for (i=0; i<edi->flood.vecs.neig; i++)
{
edi->flood.vecs.fproj[i] = edi->flood.Efl* edi->flood.vecs.stpsz[i]*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i]);
}
else
for (i=0; i<edi->flood.vecs.neig; i++)
{
/* if Efl is zero the forces are zero if not use the formula */
edi->flood.vecs.fproj[i] = edi->flood.Efl!=0 ? edi->flood.kT/edi->flood.Efl/edi->flood.alpha2*energy*edi->flood.vecs.stpsz[i]*(edi->flood.vecs.xproj[i]-edi->flood.vecs.refproj[i]) : 0;
}
}
/* Raise forces from subspace into cartesian space */
static void flood_blowup(t_edpar *edi, rvec *forces_cart)
{
/* this function lifts the forces from the subspace to the cartesian space
all the values not contained in the subspace are assumed to be zero and then
a coordinate transformation from eigenvector to cartesian vectors is performed
The nonexistent values don't have to be set to zero explicitly, they would occur
as zero valued summands, hence we just stop to compute this part of the sum.
for every atom we add all the contributions to this atom from all the different eigenvectors.
NOTE: one could add directly to the forcefield forces, would mean we wouldn't have to clear the
field forces_cart prior the computation, but momentarily we want to compute the forces seperately
to have them accessible for diagnostics
*/
int j,eig;
rvec dum;
real *forces_sub;
forces_sub = edi->flood.vecs.fproj;
/* Calculate the cartesian forces for the local atoms */
/* Clear forces first */
for (j=0; j<edi->sav.nr_loc; j++)
clear_rvec(forces_cart[j]);
/* Now compute atomwise */
for (j=0; j<edi->sav.nr_loc; j++)
{
/* Compute forces_cart[edi->sav.anrs[j]] */
for (eig=0; eig<edi->flood.vecs.neig; eig++)
{
/* Force vector is force * eigenvector (compute only atom j) */
svmul(forces_sub[eig],edi->flood.vecs.vec[eig][edi->sav.c_ind[j]],dum);
/* Add this vector to the cartesian forces */
rvec_inc(forces_cart[j],dum);
}
}
}
/* Update the values of Efl, deltaF depending on tau and Vfl */
static void update_adaption(t_edpar *edi)
{
/* this function updates the parameter Efl and deltaF according to the rules given in
* 'predicting unimolecular chemical reactions: chemical flooding' M Mueller et al,
* J. chem Phys. */
if ((edi->flood.tau < 0 ? -edi->flood.tau : edi->flood.tau ) > 0.00000001)
{
edi->flood.Efl = edi->flood.Efl+edi->flood.dt/edi->flood.tau*(edi->flood.deltaF0-edi->flood.deltaF);
/* check if restrain (inverted flooding) -> don't let EFL become positive */
if (edi->flood.alpha2<0 && edi->flood.Efl>-0.00000001)
edi->flood.Efl = 0;
edi->flood.deltaF = (1-edi->flood.dt/edi->flood.tau)*edi->flood.deltaF+edi->flood.dt/edi->flood.tau*edi->flood.Vfl;
}
}
static void do_single_flood(FILE *edo,
rvec x[],
rvec force[],
t_edpar *edi,
int step,
matrix box,
t_commrec *cr)
{
int i;
matrix rotmat; /* rotation matrix */
rvec transvec; /* translation vector */
struct t_do_edsam *buf;
buf=edi->buf->do_edsam;
/* Broadcast the coordinates of the average structure such that they are known on
* every processor. Each node contributes its local coordinates x and stores them in
* the collective ED array buf->xcoll */
get_coordinates(cr, buf->xcoll, buf->shifts_xcoll, buf->extra_shifts_xcoll, buf->bUpdateShifts, x, &edi->sav, box, "XC_AVERAGE (FLOODING)");
/* Only assembly reference coordinates if their indices differ from the average ones */
if (!edi->bRefEqAv)
get_coordinates(cr, buf->xc_ref, buf->shifts_xc_ref, buf->extra_shifts_xc_ref, buf->bUpdateShifts, x, &edi->sref, box, "XC_REFERENCE (FLOODING)");
/* If bUpdateShifts was TRUE, the shifts have just been updated in get_coordinates.
* We do not need to update the shifts until the next NS step */
buf->bUpdateShifts = FALSE;
/* Now all nodes have all of the ED/flooding coordinates in edi->sav->xcoll,
* as well as the indices in edi->sav.anrs */
/* Fit the reference indices to the reference structure */
if (edi->bRefEqAv)
fit_to_reference(buf->xcoll , transvec, rotmat, edi);
else
fit_to_reference(buf->xc_ref, transvec, rotmat, edi);
/* Now apply the translation and rotation to the ED structure */
translate_and_rotate(buf->xcoll, edi->sav.nr, transvec, rotmat);
/* Project fitted structure onto supbspace -> store in edi->flood.vecs.xproj */
project_to_eigvectors(buf->xcoll,&edi->flood.vecs,edi);
/* Compute Vfl(x) from flood.xproj */
edi->flood.Vfl = flood_energy(edi);
update_adaption(edi);
/* Compute the flooding forces */
flood_forces(edi);
/* Translate them into cartesian coordinates */
flood_blowup(edi, edi->flood.forces_cartesian);
/* Rotate forces back so that they correspond to the given structure and not to the fitted one */
/* Each node rotates back its local forces */
rmrotfit(edi->sav.nr_loc, edi->flood.forces_cartesian, rotmat);
/* Finally add forces to the main force variable */
for (i=0; i<edi->sav.nr_loc; i++)
rvec_inc(force[edi->sav.anrs_loc[i]],edi->flood.forces_cartesian[i]);
/* Output is written by the master process */
if (do_per_step(step,edi->outfrq) && MASTER(cr))
write_edo_flood(edi,edo,step);
}
/* Main flooding routine, called from do_force */
extern void do_flood(FILE *log, /* md.log file */
t_commrec *cr, /* Communication record */
rvec x[], /* Coordinates on the local processor */
rvec force[], /* forcefield forces, to these the flooding forces are added */
gmx_edsam_t ed, /* ed data structure contains all ED and flooding datasets */
matrix box, /* the box */
int step) /* The time step */
{
t_edpar *edi;
if (ed->eEDtype != eEDflood)
return;
edi = ed->edpar;
while (edi)
{
/* Call flooding for one matrix */
if (edi->flood.vecs.neig)
do_single_flood(ed->edo,x,force,edi,step,box,cr);
edi = edi->next_edi;
}
}
/* Called by init_edi, configure some flooding related variables and structures,
* print headers to output files */
static void init_flood(t_edpar *edi, gmx_edsam_t ed, real dt, t_commrec *cr)
{
edi->flood.Efl = edi->flood.constEfl;
edi->flood.Vfl = 0;
edi->flood.dt = dt;
if (edi->flood.vecs.neig)
{
/* If in any of the datasets we find a flooding vector, flooding is turned on */
ed->eEDtype = eEDflood;
fprintf(ed->edo,"FL_HEADER: Flooding of matrix %d is switched on! The flooding output will have the following format:\n",
edi->flood.flood_id);
fprintf(stderr,"ED: Flooding of matrix %d is switched on.\n", edi->flood.flood_id);
if (edi->flood.flood_id<1)
fprintf(ed->edo,"FL_HEADER: Step Efl Vfl deltaF\n");
}
}
/*********** Energy book keeping ******/
static void get_flood_enx_names(t_edpar *edi, char** names, int *nnames) /* get header of energies */
{
t_edpar *actual;
int count;
char buf[STRLEN];
actual=edi;
count = 1;
while (actual)
{
srenew(names,count);
sprintf(buf,"Vfl_%d",count);
names[count-1]=strdup(buf);
actual=actual->next_edi;
count++;
}
*nnames=count-1;
}
static void get_flood_energies(t_edpar *edi, real Vfl[],int nnames)
{
/*fl has to be big enough to capture nnames-many entries*/
t_edpar *actual;
int count;
actual=edi;
count = 1;
while (actual)
{
Vfl[count-1]=actual->flood.Vfl;
actual=actual->next_edi;
count++;
}
if (nnames!=count-1)
gmx_fatal(FARGS,"Number of energies is not consistent with t_edi structure");
}
/************* END of FLOODING IMPLEMENTATION ****************************/
static void get_COM(int nat, /* number of atoms in the coordinate buffer */
rvec *x, /* coordinate buffer */
real *m, /* buffer for the masses */
real tmass, /* total mass */
rvec com) /* the center of mass */
{
int i;
rvec xm, dum_com = {.0, .0, .0};
/* calculate COM */
for (i=0; i<nat; i++)
{
svmul(m[i], x[i], xm);
rvec_inc(dum_com, xm);
}
svmul(1.0/tmass, dum_com, dum_com);
com[XX] = dum_com[XX];
com[YY] = dum_com[YY];
com[ZZ] = dum_com[ZZ];
}
/* Put current coordinates into origin */
static void subtract_COM(int nat, /* number of atoms in the coordinate buffer */
rvec *x, /* coordinate buffer */
rvec com) /* the center of mass */
{
int i;
/* subtract COM */
for (i=0; i<nat; i++)
rvec_dec(x[i], com);
}
gmx_edsam_t ed_open(int nfile,t_filenm fnm[],t_commrec *cr)
{
gmx_edsam_t ed;
/* Allocate space for the ED data structure */
snew(ed, 1);
/* We want to perform ED (this switch might later be upgraded to eEDflood) */
ed->eEDtype = eEDedsam;
if (MASTER(cr))
{
/* Open .edi input file: */
ed->edinam=ftp2fn(efEDI,nfile,fnm);
/* The master opens the .edo output file */
fprintf(stderr,"ED sampling will be performed!\n");
ed->edonam = ftp2fn(efEDO,nfile,fnm);
ed->edo = gmx_fio_fopen(ed->edonam,"w");
}
return ed;
}
/* Broadcasts the structure data */
static void bc_ed_positions(t_commrec *cr, struct gmx_edx *s, int stype)
{
snew_bc(cr, s->anrs, s->nr ); /* Index numbers */
snew_bc(cr, s->x , s->nr ); /* Positions */
nblock_bc(cr, s->nr, s->anrs );
nblock_bc(cr, s->nr, s->x );
/* For the average & reference structures we need an array for the collective indices,
* and we need to broadcast the masses as well */
if (stype == eedAV || stype == eedREF)
{
/* We need these additional variables in the parallel case: */
snew(s->c_ind , s->nr ); /* Collective indices */
snew(s->anrs_loc , s->nr ); /* Local atom indices */
snew_bc(cr, s->x_old, s->nr); /* To be able to always make the ED molecule whole, ... */
nblock_bc(cr, s->nr, s->x_old); /* ... keep track of shift changes with the help of old coords */
}
/* broadcast masses for the reference structure (for mass-weighted fitting) */
if (stype == eedREF)
{
snew_bc(cr, s->m, s->nr);
nblock_bc(cr, s->nr, s->m);
}
/* For the average structure we might need the masses for mass-weighting */
if (stype == eedAV)
{
snew_bc(cr, s->sqrtm, s->nr);
nblock_bc(cr, s->nr, s->sqrtm);
snew_bc(cr, s->m, s->nr);
nblock_bc(cr, s->nr, s->m);
}
}
/* Broadcasts the eigenvector data */
static void bc_ed_vecs(t_commrec *cr, t_eigvec *ev, int length)
{
int i;
snew_bc(cr, ev->ieig , ev->neig); /* index numbers of eigenvector */
snew_bc(cr, ev->stpsz , ev->neig); /* stepsizes per eigenvector */
snew_bc(cr, ev->xproj , ev->neig); /* instantaneous x projection */
snew_bc(cr, ev->fproj , ev->neig); /* instantaneous f projection */
snew_bc(cr, ev->refproj, ev->neig); /* starting or target projection */
nblock_bc(cr, ev->neig, ev->ieig );
nblock_bc(cr, ev->neig, ev->stpsz );
nblock_bc(cr, ev->neig, ev->xproj );
nblock_bc(cr, ev->neig, ev->fproj );
nblock_bc(cr, ev->neig, ev->refproj);
snew_bc(cr, ev->vec, ev->neig); /* Eigenvector components */
for (i=0; i<ev->neig; i++)
{
snew_bc(cr, ev->vec[i], length);
nblock_bc(cr, length, ev->vec[i]);
}
}
/* Broadcasts the ED / flooding data to other nodes
* and allocates memory where needed */
static void broadcast_ed_data(t_commrec *cr, gmx_edsam_t ed, int numedis)
{
int nr;
t_edpar *edi;
/* Master lets the other nodes know if its ED only or also flooding */
gmx_bcast(sizeof(ed->eEDtype), &(ed->eEDtype), cr);
snew_bc(cr, ed->edpar,1);
/* Now transfer the ED data set(s) */
edi = ed->edpar;
for (nr=0; nr<numedis; nr++)
{
/* Broadcast a single ED data set */
block_bc(cr, *edi);
/* Broadcast positions */
bc_ed_positions(cr, &(edi->sref), eedREF); /* reference positions (don't broadcast masses) */
bc_ed_positions(cr, &(edi->sav ), eedAV ); /* average positions (do broadcast masses as well) */
bc_ed_positions(cr, &(edi->star), eedTAR); /* target positions */
bc_ed_positions(cr, &(edi->sori), eedORI); /* origin positions */
/* Broadcast eigenvectors */
bc_ed_vecs(cr, &edi->vecs.mon , edi->sav.nr);
bc_ed_vecs(cr, &edi->vecs.linfix, edi->sav.nr);
bc_ed_vecs(cr, &edi->vecs.linacc, edi->sav.nr);
bc_ed_vecs(cr, &edi->vecs.radfix, edi->sav.nr);
bc_ed_vecs(cr, &edi->vecs.radacc, edi->sav.nr);
bc_ed_vecs(cr, &edi->vecs.radcon, edi->sav.nr);
/* Broadcast flooding eigenvectors */
bc_ed_vecs(cr, &edi->flood.vecs, edi->sav.nr);
/* Set the pointer to the next ED dataset */
if (edi->next_edi)
{
snew_bc(cr, edi->next_edi, 1);
edi = edi->next_edi;
}
}
}
/* init-routine called for every *.edi-cycle, initialises t_edpar structure */
static void init_edi(gmx_mtop_t *mtop,t_inputrec *ir,
t_commrec *cr,gmx_edsam_t ed,t_edpar *edi)
{
int i;
real totalmass = 0.0;
rvec com;
t_atom *atom;
/* NOTE Init_edi is executed on the master process only
* The initialized data sets are then transmitted to the
* other nodes in broadcast_ed_data */
edi->bNeedDoEdsam = edi->vecs.mon.neig
|| edi->vecs.linfix.neig
|| edi->vecs.linacc.neig
|| edi->vecs.radfix.neig
|| edi->vecs.radacc.neig
|| edi->vecs.radcon.neig;
/* evaluate masses (reference structure) */
snew(edi->sref.m, edi->sref.nr);
for (i = 0; i < edi->sref.nr; i++)
{
if (edi->fitmas)
{
gmx_mtop_atomnr_to_atom(mtop,edi->sref.anrs[i],&atom);
edi->sref.m[i] = atom->m;
}
else
{
edi->sref.m[i] = 1.0;
}
totalmass += edi->sref.m[i];
}
edi->sref.mtot = totalmass;
/* Masses m and sqrt(m) for the average structure. Note that m
* is needed if forces have to be evaluated in do_edsam */
snew(edi->sav.sqrtm, edi->sav.nr );
snew(edi->sav.m , edi->sav.nr );
for (i = 0; i < edi->sav.nr; i++)
{
gmx_mtop_atomnr_to_atom(mtop,edi->sav.anrs[i],&atom);
edi->sav.m[i] = atom->m;
if (edi->pcamas)
{
edi->sav.sqrtm[i] = sqrt(atom->m);
}
else
{
edi->sav.sqrtm[i] = 1.0;
}
}
/* put reference structure in origin */
get_COM(edi->sref.nr, edi->sref.x, edi->sref.m, edi->sref.mtot, com);
subtract_COM(edi->sref.nr, edi->sref.x, com);
/* Init ED buffer */
snew(edi->buf, 1);
}
static void check(char *line, char *label)
{
if (!strstr(line,label))
gmx_fatal(FARGS,"Could not find input parameter %s at expected position in edsam input-file (.edi)\nline read instead is %s",label,line);
}
static int read_checked_edint(FILE *file,char *label)
{
char line[STRLEN+1];
int idum;
fgets2 (line,STRLEN,file);
check(line,label);
fgets2 (line,STRLEN,file);
sscanf (line,"%d",&idum);
return idum;
}
static int read_edint(FILE *file,bool *bEOF)
{
char line[STRLEN+1];
int idum;
char *eof;
eof=fgets2 (line,STRLEN,file);
if (eof==NULL)
{
*bEOF = TRUE;
return -1;
}
eof=fgets2 (line,STRLEN,file);
if (eof==NULL)
{
*bEOF = TRUE;
return -1;
}
sscanf (line,"%d",&idum);
*bEOF = FALSE;
return idum;
}
static real read_checked_edreal(FILE *file,char *label)
{
char line[STRLEN+1];
double rdum;
fgets2 (line,STRLEN,file);
check(line,label);
fgets2 (line,STRLEN,file);
sscanf (line,"%lf",&rdum);
return (real) rdum; /* always read as double and convert to single */
}
static void read_edx(FILE *file,int number,int *anrs,rvec *x)
{
int i,j;
char line[STRLEN+1];
double d[3];
for(i=0; i<number; i++)
{
fgets2 (line,STRLEN,file);
sscanf (line,"%d%lf%lf%lf",&anrs[i],&d[0],&d[1],&d[2]);
anrs[i]--; /* we are reading FORTRAN indices */
for(j=0; j<3; j++)
x[i][j]=d[j]; /* always read as double and convert to single */
}
}
static void scan_edvec(FILE *in,int nr,rvec *vec)
{
char line[STRLEN+1];
int i;
double x,y,z;
for(i=0; (i < nr); i++)
{
fgets2 (line,STRLEN,in);
sscanf (line,"%le%le%le",&x,&y,&z);
vec[i][XX]=x;
vec[i][YY]=y;
vec[i][ZZ]=z;
}
}
static void read_edvec(FILE *in,int nr,t_eigvec *tvec)
{
int i,idum;
double rdum;
char line[STRLEN+1];
tvec->neig=read_checked_edint(in,"NUMBER OF EIGENVECTORS");
if (tvec->neig >0)
{
snew(tvec->ieig,tvec->neig);
snew(tvec->stpsz,tvec->neig);
snew(tvec->vec,tvec->neig);
snew(tvec->xproj,tvec->neig);
snew(tvec->fproj,tvec->neig);
snew(tvec->refproj,tvec->neig);
for(i=0; (i < tvec->neig); i++)
{
fgets2 (line,STRLEN,in);
sscanf (line,"%d%lf",&idum,&rdum);
tvec->ieig[i]=idum;
tvec->stpsz[i]=rdum;
}
for(i=0; (i < tvec->neig); i++)
{
snew(tvec->vec[i],nr);
scan_edvec(in,nr,tvec->vec[i]);
}
}
}
/* calls read_edvec for the vector groups, only for flooding there is an extra call */
static void read_edvecs(FILE *in,int nr,t_edvecs *vecs)
{
read_edvec(in,nr,&vecs->mon );
read_edvec(in,nr,&vecs->linfix);
read_edvec(in,nr,&vecs->linacc);
read_edvec(in,nr,&vecs->radfix);
read_edvec(in,nr,&vecs->radacc);
read_edvec(in,nr,&vecs->radcon);
}
/* Check if the same atom indices are used for reference and average positions */
static bool check_if_same(struct gmx_edx sref, struct gmx_edx sav)
{
int i;
/* If the number of atoms differs between the two structures,
* they cannot be identical */
if (sref.nr != sav.nr)
return FALSE;
/* Now that we know that both stuctures have the same number of atoms,
* check if also the indices are identical */
for (i=0; i < sav.nr; i++)
{
if (sref.anrs[i] != sav.anrs[i])
return FALSE;
}
fprintf(stderr, "ED: Note: Reference and average structure are composed of the same atom indices.\n");
return TRUE;
}
static int read_edi(FILE* in, gmx_edsam_t ed,t_edpar *edi,int nr_mdatoms, int edi_nr, t_commrec *cr)
{
int readmagic;
static const int magic=669;
bool bEOF;
/* the edi file is not free format, so expect problems if the input is corrupt. */
/* check the magic number */
readmagic=read_edint(in,&bEOF);
/* Check whether we have reached the end of the input file */
if (bEOF)
return 0;
if (readmagic != magic)
{
if (readmagic==666 || readmagic==667 || readmagic==668)
gmx_fatal(FARGS,"wrong magic number: Use newest version of make_edi to produce edi file");
else
gmx_fatal(FARGS,"Wrong magic number %d in %s",readmagic,ed->edinam);
}
/* check the number of atoms */
edi->nini=read_edint(in,&bEOF);
if (edi->nini != nr_mdatoms)
gmx_fatal(FARGS,"Nr of atoms in %s (%d) does not match nr of md atoms (%d)",
ed->edinam,edi->nini,nr_mdatoms);
/* Done checking. For the rest we blindly trust the input */
edi->fitmas = read_checked_edint(in,"FITMAS");
edi->pcamas = read_checked_edint(in,"ANALYSIS_MAS");
edi->outfrq = read_checked_edint(in,"OUTFRQ");
edi->maxedsteps = read_checked_edint(in,"MAXLEN");
edi->slope = read_checked_edreal(in,"SLOPECRIT");
edi->presteps = read_checked_edint(in,"PRESTEPS");
edi->flood.deltaF0 = read_checked_edreal(in,"DELTA_F0");
edi->flood.deltaF = read_checked_edreal(in,"INIT_DELTA_F");
edi->flood.tau = read_checked_edreal(in,"TAU");
edi->flood.constEfl = read_checked_edreal(in,"EFL_NULL");
edi->flood.alpha2 = read_checked_edreal(in,"ALPHA2");
edi->flood.kT = read_checked_edreal(in,"KT");
edi->flood.bHarmonic = read_checked_edint(in,"HARMONIC");
edi->flood.flood_id = edi_nr;
edi->sref.nr = read_checked_edint(in,"NREF");
/* allocate space for reference positions and read them */
snew(edi->sref.anrs,edi->sref.nr);
snew(edi->sref.x ,edi->sref.nr);
if (PAR(cr))
snew(edi->sref.x_old,edi->sref.nr);
edi->sref.sqrtm =NULL;
read_edx(in,edi->sref.nr,edi->sref.anrs,edi->sref.x);
/* average positions. they define which atoms will be used for ED sampling */
edi->sav.nr=read_checked_edint(in,"NAV");
snew(edi->sav.anrs,edi->sav.nr);
snew(edi->sav.x ,edi->sav.nr);
if (PAR(cr))
snew(edi->sav.x_old,edi->sav.nr);
read_edx(in,edi->sav.nr,edi->sav.anrs,edi->sav.x);
/* Check if the same atom indices are used for reference and average positions */
edi->bRefEqAv = check_if_same(edi->sref, edi->sav);
/* eigenvectors */
read_edvecs(in,edi->sav.nr,&edi->vecs);
read_edvec(in,edi->sav.nr,&edi->flood.vecs);
/* target positions */
edi->star.nr=read_edint(in,&bEOF);
if (edi->star.nr > 0)
{
snew(edi->star.anrs,edi->star.nr);
snew(edi->star.x ,edi->star.nr);
edi->star.sqrtm =NULL;
read_edx(in,edi->star.nr,edi->star.anrs,edi->star.x);
}
/* positions defining origin of expansion circle */
edi->sori.nr=read_edint(in,&bEOF);
if (edi->sori.nr > 0)
{
snew(edi->sori.anrs,edi->sori.nr);
snew(edi->sori.x ,edi->sori.nr);
edi->sori.sqrtm =NULL;
read_edx(in,edi->sori.nr,edi->sori.anrs,edi->sori.x);
}
/* all done */
return 1;
}
/* Read in the edi input file. Note that it may contain several ED data sets which were
* achieved by concatenating multiple edi files. The standard case would be a single ED
* data set, though. */
static void read_edi_file(gmx_edsam_t ed, t_edpar *edi, int nr_mdatoms, t_commrec *cr)
{
FILE *in;
t_edpar *curr_edi,*last_edi;
t_edpar *edi_read;
int edi_nr = 0;
/* This routine is executed on the master only */
/* Open the .edi parameter input file */
in = gmx_fio_fopen(ed->edinam,"r");
fprintf(stderr, "ED: Reading edi file %s\n", ed->edinam);
/* Now read a sequence of ED input parameter sets from the edi file */
curr_edi=edi;
last_edi=edi;
while( read_edi(in, ed, curr_edi, nr_mdatoms, edi_nr, cr) )
{
edi_nr++;
/* Make shure that the number of atoms in each dataset is the same as in the tpr file */
if (edi->nini != nr_mdatoms)
gmx_fatal(FARGS,"edi file %s (dataset #%d) was made for %d atoms, but the simulation contains %d atoms.",
ed->edinam, edi_nr, edi->nini, nr_mdatoms);
/* Since we arrived within this while loop we know that there is still another data set to be read in */
/* We need to allocate space for the data: */
snew(edi_read,1);
/* Point the 'next_edi' entry to the next edi: */
curr_edi->next_edi=edi_read;
/* Keep the curr_edi pointer for the case that the next dataset is empty: */
last_edi = curr_edi;
/* Let's prepare to read in the next edi data set: */
curr_edi = edi_read;
}
if (edi_nr == 0)
gmx_fatal(FARGS, "No complete ED data set found in edi file %s.", ed->edinam);
/* Terminate the edi dataset list with a NULL pointer: */
last_edi->next_edi = NULL;
fprintf(stderr, "ED: Found %d ED dataset%s.\n", edi_nr, edi_nr>1? "s" : "");
/* Close the .edi file again */
gmx_fio_fclose(in);
}
/* Fit the current coordinates to the reference coordinates
* Do not actually do the fit, just return rotation and translation.
* Note that the COM of the reference structure was already put into
* the origin by init_edi. New version of fitit */
static void fit_to_reference(rvec *xcoll, /* The coordinates to be fitted */
rvec transvec, /* The translation vector */
matrix rotmat, /* The rotation matrix */
t_edpar *edi) /* Just needed for do_edfit */
{
static rvec *xcopy=NULL; /* Working copy of the coordinates */
static int alloc=0; /* Keep track of buffer alloc size */
static rvec com; /* center of mass */
int i;
GMX_MPE_LOG(ev_fit_to_reference_start);
/* We do not touch the original coordinates but work on a copy.
* Take care with buffer allocation, since sref.nr can be different for
* each edi dataset */
if (alloc < edi->sref.nr)
{
alloc = edi->sref.nr;
srenew(xcopy, alloc);
}
for (i=0; i<edi->sref.nr; i++)
copy_rvec(xcoll[i], xcopy[i]);
/* Calculate the center of mass */
get_COM(edi->sref.nr, xcopy, edi->sref.m, edi->sref.mtot, com);
/* Subtract the center of mass from the copy */
subtract_COM(edi->sref.nr, xcopy, com);
/* Determine the rotation matrix */
do_edfit(edi->sref.nr, edi->sref.x, xcopy, rotmat, edi);
transvec[XX] = -com[XX];
transvec[YY] = -com[YY];
transvec[ZZ] = -com[ZZ];
GMX_MPE_LOG(ev_fit_to_reference_finish);
}
static void translate_and_rotate(rvec *x, /* The coordinates to be translated and rotated */
int nat, /* How many coordinates are there */
rvec transvec, /* The translation vector */
matrix rotmat) /* The rotation matrix */
{
int i;
/* Translation */
for (i=0; i<nat; i++)
rvec_inc(x[i], transvec);
/* Rotation */
rotate_x(nat, x, rotmat);
}
/* Gets the rms deviation of the x coordinates to the structure s */
/* fit_to_structure has to be called before calling this routine! */
static real rmsd_from_structure(rvec *x, /* The x coordinates under consideration */
struct gmx_edx *s) /* The structure from which the rmsd shall be computed */
{
real rmsd=0.0;
int i;
for (i=0; i < s->nr; i++)
rmsd += distance2(s->x[i], x[i]);
rmsd /= (real) s->nr;
rmsd = sqrt(rmsd);
return rmsd;
}
/* select the indices of the ED atoms which are local
*
* Only the indices that correspond to the structure s are
* taken into account and saved in s->c_ind[]
*/
static void dd_make_local_indices(gmx_domdec_t *dd, struct gmx_edx *s, t_mdatoms *md)
{
int i,ii;
gmx_ga2la_t *ga2la=NULL;
ga2la = dd->ga2la;
/* we have not yet found a local atom */
s->nr_loc = 0;
/* go through all the atom indices of the structure */
for(i=0; i<s->nr; i++)
{
if (ga2la[s->anrs[i]].cell == 0)
{
ii = ga2la[s->anrs[i]].a;
if (ii < md->start+md->homenr)
{
/* The atom with this index is a home atom, therefore
* save its local index in local atom numbers array */
s->anrs_loc[s->nr_loc] = ii;
/* keep track of where this local atom is in the collective c_ind array: */
s->c_ind[s->nr_loc] = i;
/* add one to the local atom count */
s->nr_loc++;
}
}
}
}
void dd_make_local_ed_indices(gmx_domdec_t *dd, struct gmx_edsam *ed,t_mdatoms *md)
{
t_edpar *edi;
if (ed->eEDtype != eEDnone)
{
/* Loop over ED datasets (usually there is just one dataset, though) */
edi=ed->edpar;
while (edi)
{
/* Local atoms of the reference structure (for fitting), need only be assembled
* if their indices differ from the average ones */
if (!edi->bRefEqAv)
dd_make_local_indices(dd, &edi->sref, md);
/* Local atoms of the average structure (on these ED will be performed) */
dd_make_local_indices(dd, &edi->sav , md);
/* Indicate that the ED shift vectors for this structure need to be updated
* at the next call to get_coordinates, since obviously we are in a NS step */
edi->buf->do_edsam->bUpdateShifts = TRUE;
/* Set the pointer to the next ED dataset (if any) */
edi=edi->next_edi;
}
}
}
static void ed_get_shifts(int npbcdim, matrix box,
rvec *xc, struct gmx_edx *s, ivec *shifts)
{
int i,m,d;
rvec dx;
/* Get the shifts such that each atom is within closest
* distance to its position at the last NS time step after shifting.
* If we start with a whole structure, and always keep track of
* shift changes, the structure will stay whole this way */
for (i=0; i < s->nr; i++)
clear_ivec(shifts[i]);
for (i=0; i<s->nr; i++)
{
/* The distance this atom moved since the last time step */
/* If this is more than just a bit, it has changed its home pbc box */
rvec_sub(xc[i],s->x_old[i],dx);
for(m=npbcdim-1; m>=0; m--)
{
while (dx[m] < -0.5*box[m][m])
{
for(d=0; d<DIM; d++)
dx[d] += box[m][d];
shifts[i][m]++;
}
while (dx[m] >= 0.5*box[m][m])
{
for(d=0; d<DIM; d++)
dx[d] -= box[m][d];
shifts[i][m]--;
}
}
}
}
static void ed_shift_coords(matrix box, rvec x[], ivec *is, int nr)
{
int i,tx,ty,tz;
GMX_MPE_LOG(ev_shift_start);
/* Loop over the ED atoms */
if(TRICLINIC(box))
{
for (i=0; i < nr; i++)
{
tx=is[i][XX];
ty=is[i][YY];
tz=is[i][ZZ];
x[i][XX]=x[i][XX]+tx*box[XX][XX]+ty*box[YY][XX]+tz*box[ZZ][XX];
x[i][YY]=x[i][YY]+ty*box[YY][YY]+tz*box[ZZ][YY];
x[i][ZZ]=x[i][ZZ]+tz*box[ZZ][ZZ];
}
} else
{
for (i=0; i < nr; i++)
{
tx=is[i][XX];
ty=is[i][YY];
tz=is[i][ZZ];
x[i][XX]=x[i][XX]+tx*box[XX][XX];
x[i][YY]=x[i][YY]+ty*box[YY][YY];
x[i][ZZ]=x[i][ZZ]+tz*box[ZZ][ZZ];
}
}
GMX_MPE_LOG(ev_shift_finish);
}
static inline void ed_unshift_single_coord(matrix box, const rvec x, const ivec is, rvec xu)
{
int tx,ty,tz;
GMX_MPE_LOG(ev_unshift_start);
tx=is[XX];
ty=is[YY];
tz=is[ZZ];
if(TRICLINIC(box))
{
xu[XX] = x[XX]-tx*box[XX][XX]-ty*box[YY][XX]-tz*box[ZZ][XX];
xu[YY] = x[YY]-ty*box[YY][YY]-tz*box[ZZ][YY];
xu[ZZ] = x[ZZ]-tz*box[ZZ][ZZ];
} else
{
xu[XX] = x[XX]-tx*box[XX][XX];
xu[YY] = x[YY]-ty*box[YY][YY];
xu[ZZ] = x[ZZ]-tz*box[ZZ][ZZ];
}
GMX_MPE_LOG(ev_unshift_finish);
}
/* Assemble the coordinates such that every node has all of them.
* Get the indices from structure s */
static void get_coordinates(t_commrec *cr,
rvec *xc, /* Collective array of coordinates (write here) */
ivec *shifts_xc, /* Collective array of shifts */
ivec *extra_shifts_xc, /* Extra shifts since last time step */
bool bNeedShiftsUpdate, /* NS step, the shifts have changed */
rvec *x_loc, /* Local coordinates on this node (read coords from here) */
struct gmx_edx *s, /* The structure for which to get the current coordinates */
matrix box,
char title[])
{
int i;
GMX_MPE_LOG(ev_get_coords_start);
/* Zero out the collective coordinate array */
clear_rvecs(s->nr, xc);
/* Put the local coordinates that this node has into the right place of
* the collective array. Note that in the serial case, s->c_ind[i] = i */
for (i=0; i<s->nr_loc; i++)
copy_rvec(x_loc[s->anrs_loc[i]], xc[s->c_ind[i]]);
if (PAR(cr))
{
/* Add the arrays from all nodes together */
gmx_sum(s->nr*3, xc[0], cr);
/* To make the ED molecule whole, start with a whole structure and each
* step move the assembled coordinates at closest distance to the positions
* from the last step. First shift the coordinates with the saved ED shift
* vectors (these are 0 when this routine is called for the first time!) */
ed_shift_coords(box, xc, shifts_xc, s->nr);
/* Now check if some shifts changed since the last step.
* This only needs to be done when the shifts are expected to have changed,
* i.e. after neighboursearching */
if (bNeedShiftsUpdate)
{
ed_get_shifts(3, box, xc, s, extra_shifts_xc);
/* Shift with the additional shifts such that we get a whole molecule now */
ed_shift_coords(box, xc, extra_shifts_xc, s->nr);
/* Add the shift vectors together for the next time step */
for (i=0; i<s->nr; i++)
{
shifts_xc[i][XX] += extra_shifts_xc[i][XX];
shifts_xc[i][YY] += extra_shifts_xc[i][YY];
shifts_xc[i][ZZ] += extra_shifts_xc[i][ZZ];
}
/* Store current correctly-shifted coordinates for comparison in the next NS time step */
for (i=0; i<s->nr; i++)
copy_rvec(xc[i],s->x_old[i]);
}
}
GMX_MPE_LOG(ev_get_coords_finish);
}
static void do_linfix(rvec *xcoll, t_edpar *edi, int step, t_commrec *cr)
{
int i, j;
real proj, add;
rvec vec_dum;
/* loop over linfix vectors */
for (i=0; i<edi->vecs.linfix.neig; i++)
{
/* calculate the projection */
proj = projectx(edi, xcoll, edi->vecs.linfix.vec[i]);
/* calculate the correction */
add = edi->vecs.linfix.refproj[i] + step*edi->vecs.linfix.stpsz[i] - proj;
/* apply the correction */
add /= edi->sav.sqrtm[i];
for (j=0; j<edi->sav.nr; j++)
{
svmul(add, edi->vecs.linfix.vec[i][j], vec_dum);
rvec_inc(xcoll[j], vec_dum);
}
}
}
static void do_linacc(rvec *xcoll, t_edpar *edi, t_commrec *cr)
{
int i, j;
real proj, add;
rvec vec_dum;
/* loop over linacc vectors */
for (i=0; i<edi->vecs.linacc.neig; i++)
{
/* calculate the projection */
proj=projectx(edi, xcoll, edi->vecs.linacc.vec[i]);
/* calculate the correction */
add = 0.0;
if (edi->vecs.linacc.stpsz[i] > 0.0)
{
if ((proj-edi->vecs.linacc.refproj[i]) < 0.0)
add = edi->vecs.linacc.refproj[i] - proj;
}
if (edi->vecs.linacc.stpsz[i] < 0.0)
{
if ((proj-edi->vecs.linacc.refproj[i]) > 0.0)
add = edi->vecs.linacc.refproj[i] - proj;
}
/* apply the correction */
add /= edi->sav.sqrtm[i];
for (j=0; j<edi->sav.nr; j++)
{
svmul(add, edi->vecs.linacc.vec[i][j], vec_dum);
rvec_inc(xcoll[j], vec_dum);
}
/* new positions will act as reference */
edi->vecs.linacc.refproj[i] = proj + add;
}
}
static void do_radfix(rvec *xcoll, t_edpar *edi, int step, t_commrec *cr)
{
int i,j;
real *proj, rad=0.0, ratio;
rvec vec_dum;
if (edi->vecs.radfix.neig == 0)
return;
snew(proj, edi->vecs.radfix.neig);
/* loop over radfix vectors */
for (i=0; i<edi->vecs.radfix.neig; i++)
{
/* calculate the projections, radius */
proj[i] = projectx(edi, xcoll, edi->vecs.radfix.vec[i]);
rad += pow(proj[i] - edi->vecs.radfix.refproj[i], 2);
}
rad = sqrt(rad);
ratio = (edi->vecs.radfix.stpsz[0]+edi->vecs.radfix.radius)/rad - 1.0;
edi->vecs.radfix.radius += edi->vecs.radfix.stpsz[0];
/* loop over radfix vectors */
for (i=0; i<edi->vecs.radfix.neig; i++)
{
proj[i] -= edi->vecs.radfix.refproj[i];
/* apply the correction */
proj[i] /= edi->sav.sqrtm[i];
proj[i] *= ratio;
for (j=0; j<edi->sav.nr; j++) {
svmul(proj[i], edi->vecs.radfix.vec[i][j], vec_dum);
rvec_inc(xcoll[j], vec_dum);
}
}
sfree(proj);
}
static void do_radacc(rvec *xcoll, t_edpar *edi, t_commrec *cr)
{
int i,j;
real *proj, rad=0.0, ratio=0.0;
rvec vec_dum;
if (edi->vecs.radacc.neig == 0)
return;
snew(proj,edi->vecs.radacc.neig);
/* loop over radacc vectors */
for (i=0; i<edi->vecs.radacc.neig; i++)
{
/* calculate the projections, radius */
proj[i] = projectx(edi, xcoll, edi->vecs.radacc.vec[i]);
rad += pow(proj[i] - edi->vecs.radacc.refproj[i], 2);
}
rad = sqrt(rad);
/* only correct when radius decreased */
if (rad < edi->vecs.radacc.radius)
{
ratio = edi->vecs.radacc.radius/rad - 1.0;
rad = edi->vecs.radacc.radius;
}
else
edi->vecs.radacc.radius = rad;
/* loop over radacc vectors */
for (i=0; i<edi->vecs.radacc.neig; i++)
{
proj[i] -= edi->vecs.radacc.refproj[i];
/* apply the correction */
proj[i] /= edi->sav.sqrtm[i];
proj[i] *= ratio;
for (j=0; j<edi->sav.nr; j++)
{
svmul(proj[i], edi->vecs.radacc.vec[i][j], vec_dum);
rvec_inc(xcoll[j], vec_dum);
}
}
sfree(proj);
}
struct t_do_radcon {
real *proj;
};
static void do_radcon(rvec *xcoll, t_edpar *edi, t_commrec *cr)
{
int i,j;
real rad=0.0, ratio=0.0;
struct t_do_radcon *loc;
bool bFirst;
rvec vec_dum;
if(edi->buf->do_radcon != NULL)
{
bFirst = FALSE;
loc = edi->buf->do_radcon;
}
else
{
bFirst = TRUE;
snew(edi->buf->do_radcon, 1);
}
loc = edi->buf->do_radcon;
if (edi->vecs.radcon.neig == 0)
return;
if (bFirst)
snew(loc->proj, edi->vecs.radcon.neig);
/* loop over radcon vectors */
for (i=0; i<edi->vecs.radcon.neig; i++)
{
/* calculate the projections, radius */
loc->proj[i] = projectx(edi, xcoll, edi->vecs.radcon.vec[i]);
rad += pow(loc->proj[i] - edi->vecs.radcon.refproj[i], 2);
}
rad = sqrt(rad);
/* only correct when radius increased */
if (rad > edi->vecs.radcon.radius)
{
ratio = edi->vecs.radcon.radius/rad - 1.0;
/* loop over radcon vectors */
for (i=0; i<edi->vecs.radcon.neig; i++)
{
/* apply the correction */
loc->proj[i] -= edi->vecs.radcon.refproj[i];
loc->proj[i] /= edi->sav.sqrtm[i];
loc->proj[i] *= ratio;
for (j=0; j<edi->sav.nr; j++)
{
svmul(loc->proj[i], edi->vecs.radcon.vec[i][j], vec_dum);
rvec_inc(xcoll[j], vec_dum);
}
}
}
else
edi->vecs.radcon.radius = rad;
if (rad != edi->vecs.radcon.radius)
{
rad = 0.0;
for (i=0; i<edi->vecs.radcon.neig; i++)
{
/* calculate the projections, radius */
loc->proj[i] = projectx(edi, xcoll, edi->vecs.radcon.vec[i]);
rad += pow(loc->proj[i] - edi->vecs.radcon.refproj[i], 2);
}
rad = sqrt(rad);
}
}
static void ed_apply_constraints(rvec *xcoll, t_edpar *edi, int step, t_commrec *cr)
{
int i;
GMX_MPE_LOG(ev_ed_apply_cons_start);
/* subtract the average positions */
for (i=0; i<edi->sav.nr; i++)
rvec_dec(xcoll[i], edi->sav.x[i]);
/* apply the constraints */
if (step >= 0)
do_linfix(xcoll, edi, step, cr);
do_linacc(xcoll, edi, cr);
if (step >= 0)
do_radfix(xcoll, edi, step, cr);
do_radacc(xcoll, edi, cr);
do_radcon(xcoll, edi, cr);
/* add back the average positions */
for (i=0; i<edi->sav.nr; i++)
rvec_inc(xcoll[i], edi->sav.x[i]);
GMX_MPE_LOG(ev_ed_apply_cons_finish);
}
/* Write out the projections onto the eigenvectors */
static void write_edo(int nr_edi, t_edpar *edi, gmx_edsam_t ed, int step,real rmsd)
{
int i;
if (edi->bNeedDoEdsam)
{
if (step == -1)
fprintf(ed->edo, "Initial projections:\n");
else
{
fprintf(ed->edo,"Step %d, ED #%d ",step,nr_edi);
fprintf(ed->edo," RMSD %f nm\n",rmsd);
if (ed->eEDtype == eEDflood)
fprintf(ed->edo, " Efl=%f deltaF=%f Vfl=%f\n",edi->flood.Efl,edi->flood.deltaF,edi->flood.Vfl);
}
if (edi->vecs.mon.neig)
{
fprintf(ed->edo," Monitor eigenvectors");
for (i=0; i<edi->vecs.mon.neig; i++)
fprintf(ed->edo," %d: %12.5e ",edi->vecs.mon.ieig[i],edi->vecs.mon.xproj[i]);
fprintf(ed->edo,"\n");
}
if (edi->vecs.linfix.neig)
{
fprintf(ed->edo," Linfix eigenvectors");
for (i=0; i<edi->vecs.linfix.neig; i++)
fprintf(ed->edo," %d: %12.5e ",edi->vecs.linfix.ieig[i],edi->vecs.linfix.xproj[i]);
fprintf(ed->edo,"\n");
}
if (edi->vecs.linacc.neig)
{
fprintf(ed->edo," Linacc eigenvectors");
for (i=0; i<edi->vecs.linacc.neig; i++)
fprintf(ed->edo," %d: %12.5e ",edi->vecs.linacc.ieig[i],edi->vecs.linacc.xproj[i]);
fprintf(ed->edo,"\n");
}
if (edi->vecs.radfix.neig)
{
fprintf(ed->edo," Radfix eigenvectors");
for (i=0; i<edi->vecs.radfix.neig; i++)
fprintf(ed->edo," %d: %12.5e ",edi->vecs.radfix.ieig[i],edi->vecs.radfix.xproj[i]);
fprintf(ed->edo,"\n");
fprintf(ed->edo," fixed increment radius = %f\n", calc_radius(&edi->vecs.radfix));
}
if (edi->vecs.radacc.neig)
{
fprintf(ed->edo," Radacc eigenvectors");
for (i=0; i<edi->vecs.radacc.neig; i++)
fprintf(ed->edo," %d: %12.5e ",edi->vecs.radacc.ieig[i],edi->vecs.radacc.xproj[i]);
fprintf(ed->edo,"\n");
fprintf(ed->edo," acceptance radius = %f\n", calc_radius(&edi->vecs.radacc));
}
if (edi->vecs.radcon.neig)
{
fprintf(ed->edo," Radcon eigenvectors");
for (i=0; i<edi->vecs.radcon.neig; i++)
fprintf(ed->edo," %d: %12.5e ",edi->vecs.radcon.ieig[i],edi->vecs.radcon.xproj[i]);
fprintf(ed->edo,"\n");
fprintf(ed->edo," contracting radius = %f\n", calc_radius(&edi->vecs.radcon));
}
}
else
{
fprintf(ed->edo, " NOTE: none of the ED options mon/linfix/linacc/radfix/radacc/radcon were chosen for dataset #%d!\n", nr_edi);
}
}
/* Returns if any constraints are switched on */
static int ed_constraints(bool edtype, t_edpar *edi)
{
if (edtype == eEDedsam || edtype == eEDflood)
{
return (edi->vecs.linfix.neig || edi->vecs.linacc.neig ||
edi->vecs.radfix.neig || edi->vecs.radacc.neig ||
edi->vecs.radcon.neig);
}
return 0;
}
void init_edsam(gmx_mtop_t *mtop, /* global topology */
t_inputrec *ir, /* input record */
t_commrec *cr, /* communication record */
gmx_edsam_t ed, /* contains all ED data */
rvec x[], /* coordinates of the whole MD system */
matrix box) /* the box */
{
t_edpar *edi = NULL; /* points to a single edi data set */
int numedis=0; /* keep track of the number of ED data sets in edi file */
int i,nr_edi;
rvec *x_pbc = NULL; /* coordinates of the whole MD system with pbc removed */
rvec *xfit = NULL; /* the coordinates which will be fitted to the reference structure */
rvec *xstart = NULL; /* the coordinates which are subject to ED sampling */
rvec fit_transvec; /* translation ... */
matrix fit_rotmat; /* ... and rotation from fit to reference structure */
if (!DOMAINDECOMP(cr) && PAR(cr) && MASTER(cr))
gmx_fatal(FARGS, "Please switch on domain decomposition to use essential dynamics in parallel.");
GMX_MPE_LOG(ev_edsam_start);
if (MASTER(cr))
fprintf(stderr, "ED: Initialzing essential dynamics constraints.\n");
/* The input file is read by the master and the edi structures are
* initialized here. Input is stored in ed->edpar. Then the edi
* structures are transferred to the other nodes */
if (MASTER(cr))
{
snew(ed->edpar,1);
/* Read the whole edi file at once: */
read_edi_file(ed,ed->edpar,mtop->natoms,cr);
/* Initialization for every ED/flooding dataset. Flooding uses one edi dataset per
* flooding vector, Essential dynamics can be applied to more than one structure
* as well, but will be done in the order given in the edi file, so
* expect different results for different order of edi file concatenation! */
edi=ed->edpar;
while(edi != NULL)
{
init_edi(mtop,ir,cr,ed,edi);
/* Init flooding parameters if needed */
init_flood(edi,ed,ir->delta_t,cr);
edi=edi->next_edi;
numedis++;
}
}
/* The master does the work here. The other nodes get the coordinates
* not before dd_partition_system which is called after init_edsam */
if (MASTER(cr))
{
/* Remove pbc, make molecule whole.
* When ir->bContinuation=TRUE this has already been done, but ok.
*/
snew(x_pbc,mtop->natoms);
m_rveccopy(mtop->natoms,x,x_pbc);
do_pbc_first_mtop(NULL,ir->ePBC,box,mtop,x_pbc);
/* Reset pointer to first ED data set which contains the actual ED data */
edi=ed->edpar;
/* Loop over all ED/flooding data sets (usually only one, though) */
for (nr_edi = 1; nr_edi <= numedis; nr_edi++)
{
/* We use srenew to allocate memory since the size of the buffers
* is likely to change with every ED dataset */
srenew(xfit , edi->sref.nr );
srenew(xstart, edi->sav.nr );
/* Extract the coordinates of the atoms to which will be fitted */
for (i=0; i < edi->sref.nr; i++)
{
copy_rvec(x_pbc[edi->sref.anrs[i]], xfit[i]);
/* Save the sref coordinates such that in the next time step the molecule can
* be made whole again (in the parallel case) */
if (PAR(cr))
copy_rvec(xfit[i], edi->sref.x_old[i]);
}
/* Extract the coordinates of the atoms subject to ED sampling */
for (i=0; i < edi->sav.nr; i++)
{
copy_rvec(x_pbc[edi->sav.anrs[i]], xstart[i]);
/* Save the sav coordinates such that in the next time step the molecule can
* be made whole again (in the parallel case) */
if (PAR(cr))
copy_rvec(xstart[i], edi->sav.x_old[i]);
}
/* Make the fit to the REFERENCE structure, get translation and rotation */
fit_to_reference(xfit, fit_transvec, fit_rotmat, edi);
/* Output how well we fit to the reference at the start */
translate_and_rotate(xfit, edi->sref.nr, fit_transvec, fit_rotmat);
fprintf(stderr, "ED: Initial RMSD from reference after fit = %f nm (dataset #%d)\n",
rmsd_from_structure(xfit, &edi->sref), nr_edi);
/* Now apply the translation and rotation to the atoms on which ED sampling will be performed */
translate_and_rotate(xstart, edi->sav.nr, fit_transvec, fit_rotmat);
/* calculate initial projections */
project(xstart, edi);
/* process target structure, if required */
if (edi->star.nr > 0)
{
/* get translation & rotation for fit of target structure to reference structure */
fit_to_reference(edi->star.x, fit_transvec, fit_rotmat, edi);
/* do the fit */
translate_and_rotate(edi->star.x, edi->sav.nr, fit_transvec, fit_rotmat);
rad_project(edi, edi->star.x, &edi->vecs.radcon, cr);
} else
rad_project(edi, xstart, &edi->vecs.radcon, cr);
/* process structure that will serve as origin of expansion circle */
if (edi->sori.nr > 0)
{
/* fit this structure to reference structure */
fit_to_reference(edi->sori.x, fit_transvec, fit_rotmat, edi);
/* do the fit */
translate_and_rotate(edi->sori.x, edi->sav.nr, fit_transvec, fit_rotmat);
rad_project(edi, edi->sori.x, &edi->vecs.radacc, cr);
rad_project(edi, edi->sori.x, &edi->vecs.radfix, cr);
if (ed->eEDtype == eEDflood)
{
/* Set center of flooding potential to the ORIGIN structure */
rad_project(edi, edi->sori.x, &edi->flood.vecs, cr);
}
}
else
{
rad_project(edi, xstart, &edi->vecs.radacc, cr);
rad_project(edi, xstart, &edi->vecs.radfix, cr);
if (ed->eEDtype == eEDflood)
{
/* Set center of flooding potential to the center of the covariance matrix,
* i.e. the average structure, i.e. zero in the projected system */
for (i=0; i<edi->flood.vecs.neig; i++)
edi->flood.vecs.refproj[i] = 0.0;
}
}
/* set starting projections for linsam */
rad_project(edi, xstart, &edi->vecs.linacc, cr);
rad_project(edi, xstart, &edi->vecs.linfix, cr);
/* Output to file, set the step to -1 so that write_edo knows it was called from init_edsam */
if (ed->edo)
write_edo(nr_edi, edi, ed, -1, 0);
/* Prepare for the next edi data set: */
edi=edi->next_edi;
}
/* Cleaning up on the master node: */
sfree(x_pbc);
sfree(xfit);
sfree(xstart);
} /* end of MASTER only section */
if (PAR(cr))
{
/* First let everybody know how many ED data sets to expect */
gmx_bcast(sizeof(numedis), &numedis, cr);
/* Broadcast the essential dynamics / flooding data to all nodes */
broadcast_ed_data(cr, ed, numedis);
}
else
{
/* In the single-CPU case, point the local atom numbers pointers to the global
* one, so that we can use the same notation in serial and parallel case: */
/* Loop over all ED data sets (usually only one, though) */
edi=ed->edpar;
for (nr_edi = 1; nr_edi <= numedis; nr_edi++)
{
edi->sref.anrs_loc = edi->sref.anrs;
edi->sav.anrs_loc = edi->sav.anrs;
edi->star.anrs_loc = edi->star.anrs;
edi->sori.anrs_loc = edi->sori.anrs;
/* For the same reason as above, make a dummy c_ind array: */
snew(edi->sav.c_ind, edi->sav.nr);
/* Initialize the array */
for (i=0; i<edi->sav.nr; i++)
edi->sav.c_ind[i] = i;
/* In the general case we will need a different-sized array for the reference indices: */
if (!edi->bRefEqAv)
{
snew(edi->sref.c_ind, edi->sref.nr);
for (i=0; i<edi->sref.nr; i++)
edi->sref.c_ind[i] = i;
}
/* Point to the very same array in case of other structures: */
edi->star.c_ind = edi->sav.c_ind;
edi->sori.c_ind = edi->sav.c_ind;
/* In the serial case, the local number of atoms is the global one: */
edi->sref.nr_loc = edi->sref.nr;
edi->sav.nr_loc = edi->sav.nr;
edi->star.nr_loc = edi->star.nr;
edi->sori.nr_loc = edi->sori.nr;
/* An on we go to the next edi dataset */
edi=edi->next_edi;
}
}
/* Allocate space for ED buffer variables */
/* Again, loop over ED data sets */
edi=ed->edpar;
for (nr_edi = 1; nr_edi <= numedis; nr_edi++)
{
/* Allocate space for ED buffer */
snew(edi->buf, 1);
snew(edi->buf->do_edsam, 1);
/* Space for collective ED buffer variables */
/* Collective coordinates of atoms with the average indices */
snew(edi->buf->do_edsam->xcoll , edi->sav.nr);
snew(edi->buf->do_edsam->shifts_xcoll , edi->sav.nr); /* buffer for xcoll shifts */
snew(edi->buf->do_edsam->extra_shifts_xcoll , edi->sav.nr);
/* Collective coordinates of atoms with the reference indices */
if (!edi->bRefEqAv)
{
snew(edi->buf->do_edsam->xc_ref , edi->sref.nr);
snew(edi->buf->do_edsam->shifts_xc_ref , edi->sref.nr); /* To store the shifts in */
snew(edi->buf->do_edsam->extra_shifts_xc_ref, edi->sref.nr);
}
/* Get memory for flooding forces */
snew(edi->flood.forces_cartesian , edi->sav.nr);
#ifdef DUMPEDI
/* Dump it all into one file per process */
dump_edi(edi, cr, nr_edi);
#endif
/* An on we go to the next edi dataset */
edi=edi->next_edi;
}
/* Flush the edo file so that the user can check some things
* when the simulation has started */
if (ed->edo)
fflush(ed->edo);
GMX_MPE_LOG(ev_edsam_finish);
}
void do_edsam(t_inputrec *ir,
int step,
t_mdatoms *md,
t_commrec *cr,
rvec xs[], /* The local current coordinates on this processor */
rvec v[], /* The velocities */
matrix box,
gmx_edsam_t ed)
{
int i,edinr,iupdate=500;
matrix rotmat; /* rotation matrix */
rvec transvec; /* translation vector */
rvec dv,dx,x_unsh; /* tmp vectors for velocity, distance, unshifted x coordinate */
real dt_1; /* 1/dt */
static bool bFirst=TRUE;
struct t_do_edsam *buf;
t_edpar *edi;
real rmsdev=-1; /* RMSD from reference structure prior to applying the constraints */
#ifdef DEBUGPRINT
static FILE *fdebug;
char fname[255];
if (bFirst)
{
sprintf(fname, "debug%2.2d", cr->nodeid);
fdebug = fopen(fname, "w");
}
#endif
/* Check if ED sampling has to be performed */
if ( ed->eEDtype==eEDnone )
return;
dt_1 = 1.0/ir->delta_t;
/* Loop over all ED datasets (usually one) */
edi = ed->edpar;
edinr = 0;
while (edi != NULL)
{
edinr++;
if (edi->bNeedDoEdsam)
{
buf=edi->buf->do_edsam;
if (bFirst)
/* initialise radacc radius for slope criterion */
buf->oldrad=calc_radius(&edi->vecs.radacc);
/* Copy the coordinates into buf->xc* arrays and after ED
* feed back corrections to the official coordinates */
/* Broadcast the ED coordinates such that every node has all of them
* Every node contributes its local coordinates xs and stores it in
* the collective buf->xcoll array. Note that for edinr > 1
* xs could already have been modified by an earlier ED */
get_coordinates(cr, buf->xcoll, buf->shifts_xcoll, buf->extra_shifts_xcoll, buf->bUpdateShifts, xs, &edi->sav, box, "XC_AVERAGE");
/* Only assembly reference coordinates if their indices differ from the average ones */
if (!edi->bRefEqAv)
get_coordinates(cr, buf->xc_ref, buf->shifts_xc_ref, buf->extra_shifts_xc_ref, buf->bUpdateShifts, xs, &edi->sref, box, "XC_REFERENCE");
/* If bUpdateShifts was TRUE then the shifts have just been updated in get_coordinates.
* We do not need to uptdate the shifts until the next NS step */
buf->bUpdateShifts = FALSE;
/* Now all nodes have all of the ED coordinates in edi->sav->xcoll,
* as well as the indices in edi->sav.anrs */
/* Fit the reference indices to the reference structure */
if (edi->bRefEqAv)
fit_to_reference(buf->xcoll , transvec, rotmat, edi);
else
fit_to_reference(buf->xc_ref, transvec, rotmat, edi);
/* Now apply the translation and rotation to the ED structure */
translate_and_rotate(buf->xcoll, edi->sav.nr, transvec, rotmat);
/* Find out how well we fit to the reference (just for output steps) */
if (do_per_step(step,edi->outfrq) && MASTER(cr))
{
if (edi->bRefEqAv)
{
/* Indices of reference and average structures are identical,
* thus we can calculate the rmsd to SREF using xcoll */
rmsdev = rmsd_from_structure(buf->xcoll,&edi->sref);
}
else
{
/* We have to translate & rotate the reference atoms first */
translate_and_rotate(buf->xc_ref, edi->sref.nr, transvec, rotmat);
rmsdev = rmsd_from_structure(buf->xc_ref,&edi->sref);
}
}
/* update radsam references, when required */
if (do_per_step(step,edi->maxedsteps) && step > edi->presteps)
{
project(buf->xcoll, edi);
rad_project(edi, buf->xcoll, &edi->vecs.radacc, cr);
rad_project(edi, buf->xcoll, &edi->vecs.radfix, cr);
buf->oldrad=-1.e5;
}
/* update radacc references, when required */
if (do_per_step(step,iupdate) && step > edi->presteps)
{
edi->vecs.radacc.radius = calc_radius(&edi->vecs.radacc);
if (edi->vecs.radacc.radius - buf->oldrad < edi->slope)
{
project(buf->xcoll, edi);
rad_project(edi, buf->xcoll, &edi->vecs.radacc, cr);
buf->oldrad = 0.0;
} else
buf->oldrad = edi->vecs.radacc.radius;
}
/* apply the constraints */
if (step > edi->presteps && ed_constraints(ed->eEDtype, edi))
ed_apply_constraints(buf->xcoll, edi, step, cr);
/* write to edo, when required */
if (do_per_step(step,edi->outfrq))
{
project(buf->xcoll, edi);
if (MASTER(cr))
write_edo(edinr, edi, ed, step, rmsdev);
}
/* Copy back the coordinates unless monitoring only */
if (ed_constraints(ed->eEDtype, edi))
{
/* remove fitting */
rmfit(edi->sav.nr, buf->xcoll, transvec, rotmat);
/* Copy the ED corrected coordinates into the coordinate array */
/* Each node copies its local part. In the serial case, nat_loc is the
* total number of ED atoms */
for (i=0; i<edi->sav.nr_loc; i++)
{
/* Unshift local ED coordinate and store in x_unsh */
ed_unshift_single_coord(box, buf->xcoll[edi->sav.c_ind[i]],
buf->shifts_xcoll[edi->sav.c_ind[i]], x_unsh);
/* dx is the ED correction to the coordinates: */
rvec_sub(x_unsh, xs[edi->sav.anrs_loc[i]], dx);
/* dv is the ED correction to the velocity: */
svmul(dt_1, dx, dv);
/* apply the velocity correction: */
rvec_inc(v[edi->sav.anrs_loc[i]], dv);
/* Finally apply the position correction due to ED: */
copy_rvec(x_unsh, xs[edi->sav.anrs_loc[i]]);
}
}
} /* END of if (edi->bNeedDoEdsam) */
/* Prepare for the next ED dataset */
edi = edi->next_edi;
} /* END of loop over ED datasets */
bFirst = FALSE;
GMX_MPE_LOG(ev_edsam_finish);
}
|
tectronics/force-distribution-analysis.gromacs-4-0-5
|
src/mdlib/edsam.c
|
C
|
gpl-2.0
| 88,744
|
/*
Theme Name: Website
Adding support for language written in a Right To Left (RTL) direction is easy -
it's just a matter of overwriting all the horizontal positioning attributes
of your CSS stylesheet in a separate stylesheet file named rtl.css.
http://codex.wordpress.org/Right_to_Left_Language_Support
*/
/*
body {
direction: rtl;
unicode-bidi: embed;
}
*/
|
surajajency/wordpress
|
wp-content/themes/website/rtl.css
|
CSS
|
gpl-2.0
| 366
|
/*
* Android display memory setup for OMAP4+ displays
*
* Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
*
* Author: Lajos Molnar
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed "as is" WITHOUT ANY WARRANTY of any
* kind, whether express or implied; without even the implied warranty
* of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/platform_device.h>
#include <video/omapdss.h>
#include <mach/tiler.h>
#include <plat/android-display.h>
#include <plat/dsscomp.h>
#include <plat/vram.h>
struct omap_android_display_data {
/* members with default values */
u32 width;
u32 height;
u32 bpp; /* must be 2 or 4 */
/* members with no default value */
u32 tiler1d_mem;
};
/*
* We need to peek at omapdss settings so that we have enough memory for swap
* chain and vram. While this could be done by omapdss, omapdss could be
* compiled as a module, which is too late to get this information.
*/
static char default_display[16];
static int __init get_default_display(char *str)
{
strncpy(default_display, str, sizeof(default_display));
if (strlen(str) >= sizeof(default_display))
pr_warn("android_display: cannot set default display larger "
"than %d characters", sizeof(default_display) - 1);
default_display[sizeof(default_display) - 1] = '\0';
return 0;
}
early_param("omapdss.def_disp", get_default_display);
bool omap_android_display_is_default(struct omap_dss_device *device)
{
if (!strcmp(default_display, device->name))
return true;
else
return false;
}
static unsigned int hdmi_width, hdmi_height;
static int __init get_hdmi_options(char *str)
{
unsigned int width, height;
char dummy;
if (sscanf(str, "%ux%u%c", &width, &height, &dummy) == 2) {
hdmi_width = width;
hdmi_height = height;
}
return 0;
}
early_param("omapdss.hdmi_options", get_hdmi_options);
static void get_display_size(struct omap_dss_board_info *info,
struct omap_android_display_data *mem)
{
struct omap_dss_device *device = NULL;
int i;
if (!info)
goto done;
device = info->default_device;
for (i = 0; i < info->num_devices; i++) {
if (!strcmp(default_display, info->devices[i]->name)) {
device = info->devices[i];
break;
}
}
if (!device)
goto done;
if (device->type == OMAP_DISPLAY_TYPE_HDMI &&
hdmi_width && hdmi_height) {
mem->width = hdmi_width;
mem->height = hdmi_height;
} else if (device->panel.timings.x_res && device->panel.timings.y_res) {
mem->width = device->panel.timings.x_res;
mem->height = device->panel.timings.y_res;
}
if (device->ctrl.pixel_size)
mem->bpp = ALIGN(device->ctrl.pixel_size, 16) >> 3;
pr_info("android_display: setting default resolution %u*%u, bpp=%u\n",
mem->width, mem->height, mem->bpp);
done:
return;
}
static void set_tiler1d_slot_size(struct dsscomp_platform_data *dsscomp,
struct omap_android_display_data *mem)
{
struct dsscomp_platform_data data = {
.tiler1d_slotsz = 0,
};
if (dsscomp)
data = *dsscomp;
/* do not change board specified value if given */
if (data.tiler1d_slotsz)
goto done;
/*
* 4 bytes per pixel, and ICS factor of 4. The ICS factor
* is chosen somewhat arbitrarily to support the home screen layers
* to be displayed by DSS. The size of the home screen layers is
* roughly (1 + 2.5 + 0.1 + 0.1) * size_of_the_screen
* for the icons, wallpaper, status bar and navigation bar. Boards
* that wish to use a different factor should supply their tiler1D
* slot size directly.
*/
data.tiler1d_slotsz =
PAGE_ALIGN(mem->width * mem->height * 4 * 4);
done:
if (dsscomp)
*dsscomp = data;
dsscomp_set_platform_data(&data);
/* remember setting for ion carveouts */
mem->tiler1d_mem =
NUM_ANDROID_TILER1D_SLOTS * data.tiler1d_slotsz;
pr_info("android_display: tiler1d %u\n", mem->tiler1d_mem);
}
static u32 vram_size(struct omap_android_display_data *mem)
{
/* calculate required VRAM */
return PAGE_ALIGN(ALIGN(mem->width, 64) * mem->height * mem->bpp);
}
static void set_vram_sizes(struct sgx_omaplfb_config *sgx_config,
struct omapfb_platform_data *fb,
struct omap_android_display_data *mem)
{
u32 num_vram_buffers = 0;
u32 vram = 0;
int i;
if (fb && fb->mem_desc.region_cnt >= 1) {
/* Need at least 1 VRAM buffer for fb0 */
num_vram_buffers = 1;
}
if (sgx_config) {
#if defined(CONFIG_GCBV)
/* Add 2 extra VRAM buffers for gc320 composition - 4470 only*/
/* TODO: cpu_is_omap447x() is not returning the proper value
at this stage. Need to fix it */
if (1/*cpu_is_omap447x()*/)
sgx_config->vram_buffers += 2;
#endif
vram += sgx_config->vram_reserve;
num_vram_buffers = max(sgx_config->vram_buffers,
num_vram_buffers);
}
vram += num_vram_buffers * vram_size(mem);
if (fb) {
/* set fb0 vram needs */
if (fb->mem_desc.region_cnt >= 1) {
fb->mem_desc.region[0].size = vram;
pr_info("android_display: setting fb0.vram to %u\n",
vram);
}
/* set global vram needs incl. additional regions specified */
for (i = 1; i < fb->mem_desc.region_cnt; i++)
if (!fb->mem_desc.region[i].paddr)
vram += fb->mem_desc.region[i].size;
}
pr_info("android_display: setting vram to %u\n", vram);
omap_vram_set_sdram_vram(vram, 0);
}
static void set_ion_carveouts(struct sgx_omaplfb_config *sgx_config,
struct omap_ion_platform_data *ion,
struct omap_android_display_data *mem)
{
u32 alloc_pages, width;
enum tiler_fmt fmt;
u32 num_buffers = 2;
BUG_ON(!mem || (mem->bpp == 0));
if (sgx_config)
num_buffers = sgx_config->tiler2d_buffers;
/* width must be aligned to 128 bytes */
width = ALIGN(mem->width, 128 / mem->bpp);
fmt = mem->bpp <= 2 ? TILFMT_16BIT : TILFMT_32BIT;
/* max pages used from TILER2D container */
alloc_pages = tiler_backpages(fmt,
ALIGN(width, PAGE_SIZE / mem->bpp),
mem->height);
/* actual pages used is the same */
ion->nonsecure_tiler2d_size = alloc_pages * PAGE_SIZE * num_buffers;
ion->tiler2d_size = SZ_128M;
/* min pages used from TILER2D container */
alloc_pages = tiler_backpages(fmt,
ALIGN(width, PAGE_SIZE / mem->bpp),
mem->height * num_buffers);
ion->tiler2d_size -= alloc_pages * PAGE_SIZE;
/*
* On OMAP4 tiler1d and tiler2d are in the same container. However,
* leftover space must be in 32-page bands
*/
ion->tiler2d_size -= ALIGN(mem->tiler1d_mem, PAGE_SIZE * 32);
pr_info("android_display: ion carveouts: %u tiler2d, %u nonsecure\n",
ion->tiler2d_size, ion->nonsecure_tiler2d_size);
}
/* coordinate between sgx, omapdss, dsscomp and ion needs */
void omap_android_display_setup(struct omap_dss_board_info *dss,
struct dsscomp_platform_data *dsscomp,
struct sgx_omaplfb_platform_data *sgx,
struct omapfb_platform_data *fb,
struct omap_ion_platform_data *ion)
{
struct sgx_omaplfb_config *p_sgx_config = NULL;
struct omap_android_display_data mem = {
.bpp = 4,
.width = 428,
.height = 240,
};
if (!sgx || !sgx->configs)
p_sgx_config = sgx_omaplfb_get(0);
else
p_sgx_config = &(sgx->configs[0]);
get_display_size(dss, &mem);
set_tiler1d_slot_size(dsscomp, &mem);
set_vram_sizes(p_sgx_config, fb, &mem);
if (ion)
set_ion_carveouts(p_sgx_config, ion, &mem);
sgx_omaplfb_set(0, p_sgx_config);
}
|
ReconInstruments/jet_kernel
|
arch/arm/plat-omap/android-display.c
|
C
|
gpl-2.0
| 7,608
|
function gp_loadMediaButtonPopup(widget_name, shortcode, widget_title)
{
gp_hide_all_widget_popups();
var tag = jQuery('<div id="gold_plugins_popup"></div>');
var params = {
action: 'gold_plugins_insert_widget_popup',
widget_name: widget_name,
shortcode: shortcode
};
if (typeof(widget_title) == 'undefined') {
widget_title = '';
}
jQuery.ajax({
url: ajaxurl,
data: params,
success: function(data) {
dialog_options = {
modal: true,
width: 450,
title: widget_title
};
tag.html(data).dialog(dialog_options).dialog('open');
}
});
gp_hide_all_media_button_menus();
//gp_close_popup_on_outside_click();
}
function gp_insertWidgetIntoPost()
{
// get the real names of all fields
var frm = jQuery('#gold_plugins_popup form');
var shortcode = frm.data('shortcode');
if (shortcode.length == 0) {
return; // invalid shortcode, abort
}
var output = '[' + shortcode;
frm.find(':input').each(function ()
{
var val = jQuery(this).val();
// skip values that are marked 'shortcode-hidden'
if (jQuery(this).data('shortcode-hidden')) {
return true;
}
// skip checkboxes and radio buttons that are not checked
if (jQuery(this).attr('type') == 'radio' && jQuery(this).attr('checked') != 'checked' ) {
return true;
}
else if (jQuery(this).attr('type') == 'checkbox' && jQuery(this).attr('checked') != 'checked' ) {
if(!jQuery(this).data('shortcode-value-if-unchecked')){
return true;
} else {
// if this is a checkbox and it has the value if unchecked data attribute
// set the value used in the composed string to the value of this attribute
val = jQuery(this).data('shortcode-value-if-unchecked');
}
}
else if (jQuery(this).attr('type') == 'hidden') {
return true;
}
var old_name = jQuery(this).attr('name');
var override_name = jQuery(this).data('shortcode-key');
var real_name = '';
if (override_name) {
real_name = override_name;
} else {
var pos = old_name.indexOf('[__i__]');
if (pos > 0) {
var real_name = old_name.substr(pos + 8);
real_name = real_name.substr(0, real_name.length - 1); // chip off trailing '['
}
}
if (real_name && val.length > 0) {
output += ' ' + real_name + '="' + val + '"';
}
});
output += ']'; // close the shortcode
wp.media.editor.insert(output)
gp_hide_all_widget_popups();
gp_hide_all_media_button_menus();
}
var gp_hide_all_media_button_menus = function (button_group) {
jQuery('.gp_media_button_group_dropdown').css('display', 'none');
};
var gp_hide_all_widget_popups = function (button_group) {
jQuery('#gold_plugins_popup').each(function () {
jQuery(this).dialog('destroy');
});
};
var gp_toggle_media_button_menu = function (button_group) {
button_group = jQuery(button_group);
if (!button_group.is('.gp_media_button_group')) {
button_group = button_group.parents('.gp_media_button_group:first');
}
var dd = button_group.find('.gp_media_button_group_dropdown');
if (dd.is(':visible')) {
dd.css('display', 'none');
} else {
dd.css('display', 'block');
}
};
jQuery(function () {
var groups = jQuery('.gp_media_button_group');
groups.on('click', '.gp_media_button_group_toggle', function () {
gp_toggle_media_button_menu(this);
});
});
|
divakar111/codoid.com
|
wp-content/plugins/easy-testimonials/include/lib/GP_Media_Button/js/gp_media_button_v1.js
|
JavaScript
|
gpl-2.0
| 3,414
|
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/bug.h>
#include <linux/completion.h>
#include <linux/delay.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/list.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
#include <linux/string.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/slab.h>
#include <linux/workqueue.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_platform.h>
#include <linux/rbtree.h>
#include <soc/qcom/rpm-notifier.h>
#include <soc/qcom/rpm-smd.h>
#include <soc/qcom/smd.h>
#define CREATE_TRACE_POINTS
#include <trace/events/trace_rpm_smd.h>
/* Debug Definitions */
enum {
MSM_RPM_LOG_REQUEST_PRETTY = BIT(0),
MSM_RPM_LOG_REQUEST_RAW = BIT(1),
MSM_RPM_LOG_REQUEST_SHOW_MSG_ID = BIT(2),
};
static int msm_rpm_debug_mask;
module_param_named(
debug_mask, msm_rpm_debug_mask, int, S_IRUGO | S_IWUSR
);
struct msm_rpm_driver_data {
const char *ch_name;
uint32_t ch_type;
smd_channel_t *ch_info;
struct work_struct work;
spinlock_t smd_lock_write;
spinlock_t smd_lock_read;
struct completion smd_open;
};
#define DEFAULT_BUFFER_SIZE 256
#define DEBUG_PRINT_BUFFER_SIZE 512
#define MAX_SLEEP_BUFFER 128
#define GFP_FLAG(noirq) (noirq ? GFP_ATOMIC : GFP_KERNEL)
#define INV_RSC "resource does not exist"
#define ERR "err\0"
#define MAX_ERR_BUFFER_SIZE 128
#define MAX_WAIT_ON_ACK 24
#define INIT_ERROR 1
static ATOMIC_NOTIFIER_HEAD(msm_rpm_sleep_notifier);
static bool standalone;
static int probe_status = -EPROBE_DEFER;
int msm_rpm_register_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&msm_rpm_sleep_notifier, nb);
}
int msm_rpm_unregister_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_unregister(&msm_rpm_sleep_notifier, nb);
}
static struct workqueue_struct *msm_rpm_smd_wq;
enum {
MSM_RPM_MSG_REQUEST_TYPE = 0,
MSM_RPM_MSG_TYPE_NR,
};
static const uint32_t msm_rpm_request_service[MSM_RPM_MSG_TYPE_NR] = {
0x716572, /* 'req\0' */
};
/*the order of fields matter and reflect the order expected by the RPM*/
struct rpm_request_header {
uint32_t service_type;
uint32_t request_len;
};
struct rpm_message_header {
uint32_t msg_id;
enum msm_rpm_set set;
uint32_t resource_type;
uint32_t resource_id;
uint32_t data_len;
};
struct kvp {
unsigned int k;
unsigned int s;
};
struct msm_rpm_kvp_data {
uint32_t key;
uint32_t nbytes; /* number of bytes */
uint8_t *value;
bool valid;
};
struct slp_buf {
struct rb_node node;
char ubuf[MAX_SLEEP_BUFFER];
char *buf;
bool valid;
};
static struct rb_root tr_root = RB_ROOT;
static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq);
static uint32_t msm_rpm_get_next_msg_id(void);
static inline unsigned int get_rsc_type(char *buf)
{
struct rpm_message_header *h;
h = (struct rpm_message_header *)
(buf + sizeof(struct rpm_request_header));
return h->resource_type;
}
static inline unsigned int get_rsc_id(char *buf)
{
struct rpm_message_header *h;
h = (struct rpm_message_header *)
(buf + sizeof(struct rpm_request_header));
return h->resource_id;
}
#define get_data_len(buf) \
(((struct rpm_message_header *) \
(buf + sizeof(struct rpm_request_header)))->data_len)
#define get_req_len(buf) \
(((struct rpm_request_header *)(buf))->request_len)
#define get_msg_id(buf) \
(((struct rpm_message_header *) \
(buf + sizeof(struct rpm_request_header)))->msg_id)
static inline int get_buf_len(char *buf)
{
return get_req_len(buf) + sizeof(struct rpm_request_header);
}
static inline struct kvp *get_first_kvp(char *buf)
{
return (struct kvp *)(buf + sizeof(struct rpm_request_header)
+ sizeof(struct rpm_message_header));
}
static inline struct kvp *get_next_kvp(struct kvp *k)
{
return (struct kvp *)((void *)k + sizeof(*k) + k->s);
}
static inline void *get_data(struct kvp *k)
{
return (void *)k + sizeof(*k);
}
static void delete_kvp(char *msg, struct kvp *d)
{
struct kvp *n;
int dec;
uint32_t size;
n = get_next_kvp(d);
dec = (void *)n - (void *)d;
size = get_data_len(msg) - ((void *)n - (void *)get_first_kvp(msg));
memcpy((void *)d, (void *)n, size);
get_data_len(msg) -= dec;
get_req_len(msg) -= dec;
}
static inline void update_kvp_data(struct kvp *dest, struct kvp *src)
{
memcpy(get_data(dest), get_data(src), src->s);
}
static void add_kvp(char *buf, struct kvp *n)
{
uint32_t inc = sizeof(*n) + n->s;
BUG_ON((get_req_len(buf) + inc) > MAX_SLEEP_BUFFER);
memcpy(buf + get_buf_len(buf), n, inc);
get_data_len(buf) += inc;
get_req_len(buf) += inc;
}
static struct slp_buf *tr_search(struct rb_root *root, char *slp)
{
unsigned int type = get_rsc_type(slp);
unsigned int id = get_rsc_id(slp);
struct rb_node *node = root->rb_node;
while (node) {
struct slp_buf *cur = rb_entry(node, struct slp_buf, node);
unsigned int ctype = get_rsc_type(cur->buf);
unsigned int cid = get_rsc_id(cur->buf);
if (type < ctype)
node = node->rb_left;
else if (type > ctype)
node = node->rb_right;
else if (id < cid)
node = node->rb_left;
else if (id > cid)
node = node->rb_right;
else
return cur;
}
return NULL;
}
static int tr_insert(struct rb_root *root, struct slp_buf *slp)
{
unsigned int type = get_rsc_type(slp->buf);
unsigned int id = get_rsc_id(slp->buf);
struct rb_node **node = &(root->rb_node), *parent = NULL;
while (*node) {
struct slp_buf *curr = rb_entry(*node, struct slp_buf, node);
unsigned int ctype = get_rsc_type(curr->buf);
unsigned int cid = get_rsc_id(curr->buf);
parent = *node;
if (type < ctype)
node = &((*node)->rb_left);
else if (type > ctype)
node = &((*node)->rb_right);
else if (id < cid)
node = &((*node)->rb_left);
else if (id > cid)
node = &((*node)->rb_right);
else
return -EINVAL;
}
rb_link_node(&slp->node, parent, node);
rb_insert_color(&slp->node, root);
slp->valid = true;
return 0;
}
#define for_each_kvp(buf, k) \
for (k = (struct kvp *)get_first_kvp(buf); \
((void *)k - (void *)get_first_kvp(buf)) < get_data_len(buf);\
k = get_next_kvp(k))
static void tr_update(struct slp_buf *s, char *buf)
{
struct kvp *e, *n;
for_each_kvp(buf, n) {
bool found = false;
for_each_kvp(s->buf, e) {
if (n->k == e->k) {
found = true;
if (n->s == e->s) {
void *e_data = get_data(e);
void *n_data = get_data(n);
if (memcmp(e_data, n_data, n->s)) {
update_kvp_data(e, n);
s->valid = true;
}
} else {
delete_kvp(s->buf, e);
add_kvp(s->buf, n);
s->valid = true;
}
break;
}
}
if (!found) {
add_kvp(s->buf, n);
s->valid = true;
}
}
}
int msm_rpm_smd_buffer_request(char *buf, uint32_t size, gfp_t flag)
{
struct slp_buf *slp;
static DEFINE_SPINLOCK(slp_buffer_lock);
unsigned long flags;
if (size > MAX_SLEEP_BUFFER)
return -ENOMEM;
spin_lock_irqsave(&slp_buffer_lock, flags);
slp = tr_search(&tr_root, buf);
if (!slp) {
slp = kzalloc(sizeof(struct slp_buf), GFP_ATOMIC);
if (!slp) {
spin_unlock_irqrestore(&slp_buffer_lock, flags);
return -ENOMEM;
}
slp->buf = PTR_ALIGN(&slp->ubuf[0], sizeof(u32));
memcpy(slp->buf, buf, size);
if (tr_insert(&tr_root, slp))
pr_err("%s(): Error updating sleep request\n",
__func__);
} else {
/* handle unsent requests */
tr_update(slp, buf);
}
spin_unlock_irqrestore(&slp_buffer_lock, flags);
return 0;
}
static void msm_rpm_print_sleep_buffer(struct slp_buf *s)
{
char buf[DEBUG_PRINT_BUFFER_SIZE] = {0};
int pos;
int buflen = DEBUG_PRINT_BUFFER_SIZE;
char ch[5] = {0};
u32 type;
struct kvp *e;
if (!s)
return;
if (!s->valid)
return;
type = get_rsc_type(s->buf);
memcpy(ch, &type, sizeof(u32));
pos = scnprintf(buf, buflen,
"Sleep request type = 0x%08x(%s)",
get_rsc_type(s->buf), ch);
pos += scnprintf(buf + pos, buflen - pos, " id = 0%x",
get_rsc_id(s->buf));
for_each_kvp(s->buf, e) {
uint32_t i;
char *data = get_data(e);
memcpy(ch, &e->k, sizeof(u32));
pos += scnprintf(buf + pos, buflen - pos,
"\n\t\tkey = 0x%08x(%s)",
e->k, ch);
pos += scnprintf(buf + pos, buflen - pos,
" sz= %d data =", e->s);
for (i = 0; i < e->s; i++)
pos += scnprintf(buf + pos, buflen - pos,
" 0x%02X", data[i]);
}
pos += scnprintf(buf + pos, buflen - pos, "\n");
printk(buf);
}
static struct msm_rpm_driver_data msm_rpm_data = {
.smd_open = COMPLETION_INITIALIZER(msm_rpm_data.smd_open),
};
static int msm_rpm_flush_requests(bool print)
{
struct rb_node *t;
int ret;
int pkt_sz;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
int count = 0;
for (t = rb_first(&tr_root); t; t = rb_next(t)) {
struct slp_buf *s = rb_entry(t, struct slp_buf, node);
if (!s->valid)
continue;
if (print)
msm_rpm_print_sleep_buffer(s);
get_msg_id(s->buf) = msm_rpm_get_next_msg_id();
ret = msm_rpm_send_smd_buffer(s->buf,
get_buf_len(s->buf), true);
WARN_ON(ret != get_buf_len(s->buf));
s->valid = false;
count++;
trace_rpm_send_message(true, MSM_RPM_CTX_SLEEP_SET,
get_rsc_type(s->buf),
get_rsc_id(s->buf),
get_msg_id(s->buf));
/*
* RPM acks need to be handled here if we have sent 24
* messages such that we do not overrun SMD buffer. Since
* we expect only sleep sets at this point (RPM PC would be
* disallowed if we had pending active requests), we need not
* process these sleep set acks.
*/
if (count >= MAX_WAIT_ON_ACK) {
int len;
int timeout = 10;
while (timeout) {
if (smd_is_pkt_avail(msm_rpm_data.ch_info))
break;
/*
* Sleep for 50us at a time before checking
* for packet availability. The 50us is based
* on the the time rpm could take to process
* and send an ack for the sleep set request.
*/
udelay(50);
timeout--;
}
/*
* On timeout return an error and exit the spinlock
* control on this cpu. This will allow any other
* core that has wokenup and trying to acquire the
* spinlock from being locked out.
*/
if (!timeout) {
pr_err("%s: Timed out waiting for RPM ACK\n",
__func__);
return -EAGAIN;
}
pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
len = smd_read(msm_rpm_data.ch_info, buf, pkt_sz);
count--;
}
}
return 0;
}
static atomic_t msm_rpm_msg_id = ATOMIC_INIT(0);
struct msm_rpm_request {
struct rpm_request_header req_hdr;
struct rpm_message_header msg_hdr;
struct msm_rpm_kvp_data *kvp;
uint32_t num_elements;
uint32_t write_idx;
uint8_t *buf;
uint32_t numbytes;
};
/*
* Data related to message acknowledgement
*/
LIST_HEAD(msm_rpm_wait_list);
struct msm_rpm_wait_data {
struct list_head list;
uint32_t msg_id;
bool ack_recd;
int errno;
struct completion ack;
};
DEFINE_SPINLOCK(msm_rpm_list_lock);
struct msm_rpm_ack_msg {
uint32_t req;
uint32_t req_len;
uint32_t rsc_id;
uint32_t msg_len;
uint32_t id_ack;
};
LIST_HEAD(msm_rpm_ack_list);
static DECLARE_COMPLETION(data_ready);
static void msm_rpm_notify_sleep_chain(struct rpm_message_header *hdr,
struct msm_rpm_kvp_data *kvp)
{
struct msm_rpm_notifier_data notif;
notif.rsc_type = hdr->resource_type;
notif.rsc_id = hdr->resource_id;
notif.key = kvp->key;
notif.size = kvp->nbytes;
notif.value = kvp->value;
atomic_notifier_call_chain(&msm_rpm_sleep_notifier, 0, ¬if);
}
static int msm_rpm_add_kvp_data_common(struct msm_rpm_request *handle,
uint32_t key, const uint8_t *data, int size, bool noirq)
{
uint32_t i;
uint32_t data_size, msg_size;
if (probe_status)
return probe_status;
if (!handle) {
pr_err("%s(): Invalid handle\n", __func__);
return -EINVAL;
}
if (size < 0)
return -EINVAL;
data_size = ALIGN(size, SZ_4);
msg_size = data_size + sizeof(struct rpm_request_header);
for (i = 0; i < handle->write_idx; i++) {
if (handle->kvp[i].key != key)
continue;
if (handle->kvp[i].nbytes != data_size) {
kfree(handle->kvp[i].value);
handle->kvp[i].value = NULL;
} else {
if (!memcmp(handle->kvp[i].value, data, data_size))
return 0;
}
break;
}
if (i >= handle->num_elements) {
pr_err("%s(): Number of resources exceeds max allocated\n",
__func__);
return -ENOMEM;
}
if (i == handle->write_idx)
handle->write_idx++;
if (!handle->kvp[i].value) {
handle->kvp[i].value = kzalloc(data_size, GFP_FLAG(noirq));
if (!handle->kvp[i].value) {
pr_err("%s(): Failed malloc\n", __func__);
return -ENOMEM;
}
} else {
/* We enter the else case, if a key already exists but the
* data doesn't match. In which case, we should zero the data
* out.
*/
memset(handle->kvp[i].value, 0, data_size);
}
if (!handle->kvp[i].valid)
handle->msg_hdr.data_len += msg_size;
else
handle->msg_hdr.data_len += (data_size - handle->kvp[i].nbytes);
handle->kvp[i].nbytes = data_size;
handle->kvp[i].key = key;
memcpy(handle->kvp[i].value, data, size);
handle->kvp[i].valid = true;
return 0;
}
static struct msm_rpm_request *msm_rpm_create_request_common(
enum msm_rpm_set set, uint32_t rsc_type, uint32_t rsc_id,
int num_elements, bool noirq)
{
struct msm_rpm_request *cdata;
if (probe_status)
return ERR_PTR(probe_status);
cdata = kzalloc(sizeof(struct msm_rpm_request),
GFP_FLAG(noirq));
if (!cdata) {
pr_err("%s():Cannot allocate memory for client data\n",
__func__);
goto cdata_alloc_fail;
}
cdata->msg_hdr.set = set;
cdata->msg_hdr.resource_type = rsc_type;
cdata->msg_hdr.resource_id = rsc_id;
cdata->msg_hdr.data_len = 0;
cdata->num_elements = num_elements;
cdata->write_idx = 0;
cdata->kvp = kzalloc(sizeof(struct msm_rpm_kvp_data) * num_elements,
GFP_FLAG(noirq));
if (!cdata->kvp) {
pr_warn("%s(): Cannot allocate memory for key value data\n",
__func__);
goto kvp_alloc_fail;
}
cdata->buf = kzalloc(DEFAULT_BUFFER_SIZE, GFP_FLAG(noirq));
if (!cdata->buf)
goto buf_alloc_fail;
cdata->numbytes = DEFAULT_BUFFER_SIZE;
return cdata;
buf_alloc_fail:
kfree(cdata->kvp);
kvp_alloc_fail:
kfree(cdata);
cdata_alloc_fail:
return NULL;
}
void msm_rpm_free_request(struct msm_rpm_request *handle)
{
int i;
if (!handle)
return;
for (i = 0; i < handle->num_elements; i++)
kfree(handle->kvp[i].value);
kfree(handle->kvp);
kfree(handle->buf);
kfree(handle);
}
EXPORT_SYMBOL(msm_rpm_free_request);
struct msm_rpm_request *msm_rpm_create_request(
enum msm_rpm_set set, uint32_t rsc_type,
uint32_t rsc_id, int num_elements)
{
return msm_rpm_create_request_common(set, rsc_type, rsc_id,
num_elements, false);
}
EXPORT_SYMBOL(msm_rpm_create_request);
struct msm_rpm_request *msm_rpm_create_request_noirq(
enum msm_rpm_set set, uint32_t rsc_type,
uint32_t rsc_id, int num_elements)
{
return msm_rpm_create_request_common(set, rsc_type, rsc_id,
num_elements, true);
}
EXPORT_SYMBOL(msm_rpm_create_request_noirq);
int msm_rpm_add_kvp_data(struct msm_rpm_request *handle,
uint32_t key, const uint8_t *data, int size)
{
return msm_rpm_add_kvp_data_common(handle, key, data, size, false);
}
EXPORT_SYMBOL(msm_rpm_add_kvp_data);
int msm_rpm_add_kvp_data_noirq(struct msm_rpm_request *handle,
uint32_t key, const uint8_t *data, int size)
{
return msm_rpm_add_kvp_data_common(handle, key, data, size, true);
}
EXPORT_SYMBOL(msm_rpm_add_kvp_data_noirq);
/* Runs in interrupt context */
static void msm_rpm_notify(void *data, unsigned event)
{
struct msm_rpm_driver_data *pdata = (struct msm_rpm_driver_data *)data;
BUG_ON(!pdata);
if (!(pdata->ch_info))
return;
switch (event) {
case SMD_EVENT_DATA:
complete(&data_ready);
break;
case SMD_EVENT_OPEN:
complete(&pdata->smd_open);
break;
case SMD_EVENT_CLOSE:
case SMD_EVENT_STATUS:
case SMD_EVENT_REOPEN_READY:
break;
default:
pr_info("Unknown SMD event\n");
}
}
bool msm_rpm_waiting_for_ack(void)
{
bool ret;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
ret = list_empty(&msm_rpm_wait_list);
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
return !ret;
}
static struct msm_rpm_wait_data *msm_rpm_get_entry_from_msg_id(uint32_t msg_id)
{
struct list_head *ptr;
struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
list_for_each(ptr, &msm_rpm_wait_list) {
elem = list_entry(ptr, struct msm_rpm_wait_data, list);
if (elem && (elem->msg_id == msg_id))
break;
elem = NULL;
}
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
return elem;
}
static uint32_t msm_rpm_get_next_msg_id(void)
{
uint32_t id;
/*
* A message id of 0 is used by the driver to indicate a error
* condition. The RPM driver uses a id of 1 to indicate unsent data
* when the data sent over hasn't been modified. This isn't a error
* scenario and wait for ack returns a success when the message id is 1.
*/
do {
id = atomic_inc_return(&msm_rpm_msg_id);
} while ((id == 0) || (id == 1) || msm_rpm_get_entry_from_msg_id(id));
return id;
}
static int msm_rpm_add_wait_list(uint32_t msg_id)
{
unsigned long flags;
struct msm_rpm_wait_data *data =
kzalloc(sizeof(struct msm_rpm_wait_data), GFP_ATOMIC);
if (!data)
return -ENOMEM;
init_completion(&data->ack);
data->ack_recd = false;
data->msg_id = msg_id;
data->errno = INIT_ERROR;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
list_add(&data->list, &msm_rpm_wait_list);
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
return 0;
}
static void msm_rpm_free_list_entry(struct msm_rpm_wait_data *elem)
{
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
list_del(&elem->list);
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
kfree(elem);
}
static void msm_rpm_process_ack(uint32_t msg_id, int errno)
{
struct list_head *ptr;
struct msm_rpm_wait_data *elem = NULL;
unsigned long flags;
spin_lock_irqsave(&msm_rpm_list_lock, flags);
list_for_each(ptr, &msm_rpm_wait_list) {
elem = list_entry(ptr, struct msm_rpm_wait_data, list);
if (elem && (elem->msg_id == msg_id)) {
elem->errno = errno;
elem->ack_recd = true;
complete(&elem->ack);
break;
}
elem = NULL;
}
/* Special case where the sleep driver doesn't
* wait for ACKs. This would decrease the latency involved with
* entering RPM assisted power collapse.
*/
if (!elem)
trace_rpm_ack_recd(0, msg_id);
spin_unlock_irqrestore(&msm_rpm_list_lock, flags);
}
struct msm_rpm_kvp_packet {
uint32_t id;
uint32_t len;
uint32_t val;
};
static inline uint32_t msm_rpm_get_msg_id_from_ack(uint8_t *buf)
{
return ((struct msm_rpm_ack_msg *)buf)->id_ack;
}
static inline int msm_rpm_get_error_from_ack(uint8_t *buf)
{
uint8_t *tmp;
uint32_t req_len = ((struct msm_rpm_ack_msg *)buf)->req_len;
int rc = -ENODEV;
req_len -= sizeof(struct msm_rpm_ack_msg);
req_len += 2 * sizeof(uint32_t);
if (!req_len)
return 0;
tmp = buf + sizeof(struct msm_rpm_ack_msg);
BUG_ON(memcmp(tmp, ERR, sizeof(uint32_t)));
tmp += 2 * sizeof(uint32_t);
if (!(memcmp(tmp, INV_RSC, min_t(uint32_t, req_len,
sizeof(INV_RSC))-1))) {
pr_err("%s(): RPM NACK Unsupported resource\n", __func__);
rc = -EINVAL;
} else {
pr_err("%s(): RPM NACK Invalid header\n", __func__);
}
return rc;
}
static int msm_rpm_read_smd_data(char *buf)
{
int pkt_sz;
int bytes_read = 0;
pkt_sz = smd_cur_packet_size(msm_rpm_data.ch_info);
if (!pkt_sz)
return -EAGAIN;
BUG_ON(pkt_sz > MAX_ERR_BUFFER_SIZE);
if (pkt_sz != smd_read_avail(msm_rpm_data.ch_info))
return -EAGAIN;
do {
int len;
len = smd_read(msm_rpm_data.ch_info, buf + bytes_read, pkt_sz);
pkt_sz -= len;
bytes_read += len;
} while (pkt_sz > 0);
BUG_ON(pkt_sz < 0);
return 0;
}
static void msm_rpm_smd_work(struct work_struct *work)
{
uint32_t msg_id;
int errno;
char buf[MAX_ERR_BUFFER_SIZE] = {0};
while (1) {
wait_for_completion_interruptible(&data_ready);
spin_lock(&msm_rpm_data.smd_lock_read);
while (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
if (msm_rpm_read_smd_data(buf))
break;
msg_id = msm_rpm_get_msg_id_from_ack(buf);
errno = msm_rpm_get_error_from_ack(buf);
msm_rpm_process_ack(msg_id, errno);
}
spin_unlock(&msm_rpm_data.smd_lock_read);
}
}
static void msm_rpm_log_request(struct msm_rpm_request *cdata)
{
char buf[DEBUG_PRINT_BUFFER_SIZE];
size_t buflen = DEBUG_PRINT_BUFFER_SIZE;
char name[5];
u32 value;
uint32_t i;
int j, prev_valid;
int valid_count = 0;
int pos = 0;
name[4] = 0;
for (i = 0; i < cdata->write_idx; i++)
if (cdata->kvp[i].valid)
valid_count++;
pos += scnprintf(buf + pos, buflen - pos, "%sRPM req: ", KERN_INFO);
if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_SHOW_MSG_ID)
pos += scnprintf(buf + pos, buflen - pos, "msg_id=%u, ",
cdata->msg_hdr.msg_id);
pos += scnprintf(buf + pos, buflen - pos, "s=%s",
(cdata->msg_hdr.set == MSM_RPM_CTX_ACTIVE_SET ? "act" : "slp"));
if ((msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY)
&& (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_RAW)) {
/* Both pretty and raw formatting */
memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
pos += scnprintf(buf + pos, buflen - pos,
", rsc_type=0x%08X (%s), rsc_id=%u; ",
cdata->msg_hdr.resource_type, name,
cdata->msg_hdr.resource_id);
for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
if (!cdata->kvp[i].valid)
continue;
memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
pos += scnprintf(buf + pos, buflen - pos,
"[key=0x%08X (%s), value=%s",
cdata->kvp[i].key, name,
(cdata->kvp[i].nbytes ? "0x" : "null"));
for (j = 0; j < cdata->kvp[i].nbytes; j++)
pos += scnprintf(buf + pos, buflen - pos,
"%02X ",
cdata->kvp[i].value[j]);
if (cdata->kvp[i].nbytes)
pos += scnprintf(buf + pos, buflen - pos, "(");
for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
value = 0;
memcpy(&value, &cdata->kvp[i].value[j],
min_t(uint32_t, sizeof(uint32_t),
cdata->kvp[i].nbytes - j));
pos += scnprintf(buf + pos, buflen - pos, "%u",
value);
if (j + 4 < cdata->kvp[i].nbytes)
pos += scnprintf(buf + pos,
buflen - pos, " ");
}
if (cdata->kvp[i].nbytes)
pos += scnprintf(buf + pos, buflen - pos, ")");
pos += scnprintf(buf + pos, buflen - pos, "]");
if (prev_valid + 1 < valid_count)
pos += scnprintf(buf + pos, buflen - pos, ", ");
prev_valid++;
}
} else if (msm_rpm_debug_mask & MSM_RPM_LOG_REQUEST_PRETTY) {
/* Pretty formatting only */
memcpy(name, &cdata->msg_hdr.resource_type, sizeof(uint32_t));
pos += scnprintf(buf + pos, buflen - pos, " %s %u; ", name,
cdata->msg_hdr.resource_id);
for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
if (!cdata->kvp[i].valid)
continue;
memcpy(name, &cdata->kvp[i].key, sizeof(uint32_t));
pos += scnprintf(buf + pos, buflen - pos, "%s=%s",
name, (cdata->kvp[i].nbytes ? "" : "null"));
for (j = 0; j < cdata->kvp[i].nbytes; j += 4) {
value = 0;
memcpy(&value, &cdata->kvp[i].value[j],
min_t(uint32_t, sizeof(uint32_t),
cdata->kvp[i].nbytes - j));
pos += scnprintf(buf + pos, buflen - pos, "%u",
value);
if (j + 4 < cdata->kvp[i].nbytes)
pos += scnprintf(buf + pos,
buflen - pos, " ");
}
if (prev_valid + 1 < valid_count)
pos += scnprintf(buf + pos, buflen - pos, ", ");
prev_valid++;
}
} else {
/* Raw formatting only */
pos += scnprintf(buf + pos, buflen - pos,
", rsc_type=0x%08X, rsc_id=%u; ",
cdata->msg_hdr.resource_type,
cdata->msg_hdr.resource_id);
for (i = 0, prev_valid = 0; i < cdata->write_idx; i++) {
if (!cdata->kvp[i].valid)
continue;
pos += scnprintf(buf + pos, buflen - pos,
"[key=0x%08X, value=%s",
cdata->kvp[i].key,
(cdata->kvp[i].nbytes ? "0x" : "null"));
for (j = 0; j < cdata->kvp[i].nbytes; j++) {
pos += scnprintf(buf + pos, buflen - pos,
"%02X",
cdata->kvp[i].value[j]);
if (j + 1 < cdata->kvp[i].nbytes)
pos += scnprintf(buf + pos,
buflen - pos, " ");
}
pos += scnprintf(buf + pos, buflen - pos, "]");
if (prev_valid + 1 < valid_count)
pos += scnprintf(buf + pos, buflen - pos, ", ");
prev_valid++;
}
}
pos += scnprintf(buf + pos, buflen - pos, "\n");
printk(buf);
}
static int msm_rpm_send_smd_buffer(char *buf, uint32_t size, bool noirq)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
ret = smd_write_avail(msm_rpm_data.ch_info);
while ((ret = smd_write_avail(msm_rpm_data.ch_info)) < size) {
if (ret < 0)
break;
if (!noirq) {
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write,
flags);
cpu_relax();
spin_lock_irqsave(&msm_rpm_data.smd_lock_write, flags);
} else
udelay(5);
}
if (ret < 0) {
pr_err("%s(): SMD not initialized\n", __func__);
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
return ret;
}
ret = smd_write(msm_rpm_data.ch_info, buf, size);
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_write, flags);
return ret;
}
static int msm_rpm_send_data(struct msm_rpm_request *cdata,
int msg_type, bool noirq)
{
uint8_t *tmpbuff;
int ret;
uint32_t i;
uint32_t msg_size;
int req_hdr_sz, msg_hdr_sz;
if (probe_status)
return probe_status;
if (!cdata->msg_hdr.data_len)
return 1;
req_hdr_sz = sizeof(cdata->req_hdr);
msg_hdr_sz = sizeof(cdata->msg_hdr);
cdata->req_hdr.service_type = msm_rpm_request_service[msg_type];
cdata->req_hdr.request_len = cdata->msg_hdr.data_len + msg_hdr_sz;
msg_size = cdata->req_hdr.request_len + req_hdr_sz;
/* populate data_len */
if (msg_size > cdata->numbytes) {
kfree(cdata->buf);
cdata->numbytes = msg_size;
cdata->buf = kzalloc(msg_size, GFP_FLAG(noirq));
}
if (!cdata->buf) {
pr_err("%s(): Failed malloc\n", __func__);
return 0;
}
tmpbuff = cdata->buf;
tmpbuff += req_hdr_sz + msg_hdr_sz;
for (i = 0; (i < cdata->write_idx); i++) {
/* Sanity check */
BUG_ON((tmpbuff - cdata->buf) > cdata->numbytes);
if (!cdata->kvp[i].valid)
continue;
memcpy(tmpbuff, &cdata->kvp[i].key, sizeof(uint32_t));
tmpbuff += sizeof(uint32_t);
memcpy(tmpbuff, &cdata->kvp[i].nbytes, sizeof(uint32_t));
tmpbuff += sizeof(uint32_t);
memcpy(tmpbuff, cdata->kvp[i].value, cdata->kvp[i].nbytes);
tmpbuff += cdata->kvp[i].nbytes;
if (cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET)
msm_rpm_notify_sleep_chain(&cdata->msg_hdr,
&cdata->kvp[i]);
}
memcpy(cdata->buf, &cdata->req_hdr, req_hdr_sz + msg_hdr_sz);
if ((cdata->msg_hdr.set == MSM_RPM_CTX_SLEEP_SET) &&
!msm_rpm_smd_buffer_request(cdata->buf, msg_size,
GFP_FLAG(noirq)))
return 1;
cdata->msg_hdr.msg_id = msm_rpm_get_next_msg_id();
memcpy(cdata->buf + req_hdr_sz, &cdata->msg_hdr, msg_hdr_sz);
if (msm_rpm_debug_mask
& (MSM_RPM_LOG_REQUEST_PRETTY | MSM_RPM_LOG_REQUEST_RAW))
msm_rpm_log_request(cdata);
if (standalone) {
for (i = 0; (i < cdata->write_idx); i++)
cdata->kvp[i].valid = false;
cdata->msg_hdr.data_len = 0;
ret = cdata->msg_hdr.msg_id;
return ret;
}
msm_rpm_add_wait_list(cdata->msg_hdr.msg_id);
ret = msm_rpm_send_smd_buffer(&cdata->buf[0], msg_size, noirq);
if (ret == msg_size) {
trace_rpm_send_message(noirq, cdata->msg_hdr.set,
cdata->msg_hdr.resource_type,
cdata->msg_hdr.resource_id,
cdata->msg_hdr.msg_id);
for (i = 0; (i < cdata->write_idx); i++)
cdata->kvp[i].valid = false;
cdata->msg_hdr.data_len = 0;
ret = cdata->msg_hdr.msg_id;
} else if (ret < msg_size) {
struct msm_rpm_wait_data *rc;
ret = 0;
pr_err("Failed to write data msg_size:%d ret:%d\n",
msg_size, ret);
rc = msm_rpm_get_entry_from_msg_id(cdata->msg_hdr.msg_id);
if (rc)
msm_rpm_free_list_entry(rc);
}
return ret;
}
int msm_rpm_send_request(struct msm_rpm_request *handle)
{
int ret;
static DEFINE_MUTEX(send_mtx);
mutex_lock(&send_mtx);
ret = msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, false);
mutex_unlock(&send_mtx);
return ret;
}
EXPORT_SYMBOL(msm_rpm_send_request);
int msm_rpm_send_request_noirq(struct msm_rpm_request *handle)
{
return msm_rpm_send_data(handle, MSM_RPM_MSG_REQUEST_TYPE, true);
}
EXPORT_SYMBOL(msm_rpm_send_request_noirq);
int msm_rpm_wait_for_ack(uint32_t msg_id)
{
struct msm_rpm_wait_data *elem;
int rc = 0;
if (!msg_id) {
pr_err("%s(): Invalid msg id\n", __func__);
return -ENOMEM;
}
if (msg_id == 1)
return rc;
if (standalone)
return rc;
elem = msm_rpm_get_entry_from_msg_id(msg_id);
if (!elem)
return rc;
if (!wait_for_completion_timeout(&elem->ack, 10*HZ)) {
pr_err("%s TIMEOUT msg_id %d\n", __func__, msg_id);
BUG();
}
trace_rpm_ack_recd(0, msg_id);
rc = elem->errno;
msm_rpm_free_list_entry(elem);
return rc;
}
EXPORT_SYMBOL(msm_rpm_wait_for_ack);
int msm_rpm_wait_for_ack_noirq(uint32_t msg_id)
{
struct msm_rpm_wait_data *elem;
unsigned long flags;
int rc = 0;
uint32_t id = 0;
if (!msg_id) {
pr_err("%s(): Invalid msg id\n", __func__);
return -ENOMEM;
}
if (msg_id == 1)
return 0;
if (standalone)
return 0;
spin_lock_irqsave(&msm_rpm_data.smd_lock_read, flags);
elem = msm_rpm_get_entry_from_msg_id(msg_id);
if (!elem)
/* Should this be a bug
* Is it ok for another thread to read the msg?
*/
goto wait_ack_cleanup;
if (elem->errno != INIT_ERROR) {
rc = elem->errno;
msm_rpm_free_list_entry(elem);
goto wait_ack_cleanup;
}
while (id != msg_id) {
if (smd_is_pkt_avail(msm_rpm_data.ch_info)) {
int errno;
char buf[MAX_ERR_BUFFER_SIZE] = {};
msm_rpm_read_smd_data(buf);
id = msm_rpm_get_msg_id_from_ack(buf);
errno = msm_rpm_get_error_from_ack(buf);
msm_rpm_process_ack(id, errno);
}
}
rc = elem->errno;
trace_rpm_ack_recd(1, msg_id);
msm_rpm_free_list_entry(elem);
wait_ack_cleanup:
spin_unlock_irqrestore(&msm_rpm_data.smd_lock_read, flags);
if (smd_is_pkt_avail(msm_rpm_data.ch_info))
complete(&data_ready);
return rc;
}
EXPORT_SYMBOL(msm_rpm_wait_for_ack_noirq);
int msm_rpm_send_message(enum msm_rpm_set set, uint32_t rsc_type,
uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
{
int i, rc;
struct msm_rpm_request *req =
msm_rpm_create_request(set, rsc_type, rsc_id, nelems);
if (IS_ERR(req))
return PTR_ERR(req);
if (!req)
return -ENOMEM;
for (i = 0; i < nelems; i++) {
rc = msm_rpm_add_kvp_data(req, kvp[i].key,
kvp[i].data, kvp[i].length);
if (rc)
goto bail;
}
rc = msm_rpm_wait_for_ack(msm_rpm_send_request(req));
bail:
msm_rpm_free_request(req);
return rc;
}
EXPORT_SYMBOL(msm_rpm_send_message);
int msm_rpm_send_message_noirq(enum msm_rpm_set set, uint32_t rsc_type,
uint32_t rsc_id, struct msm_rpm_kvp *kvp, int nelems)
{
int i, rc;
struct msm_rpm_request *req =
msm_rpm_create_request_noirq(set, rsc_type, rsc_id, nelems);
if (IS_ERR(req))
return PTR_ERR(req);
if (!req)
return -ENOMEM;
for (i = 0; i < nelems; i++) {
rc = msm_rpm_add_kvp_data_noirq(req, kvp[i].key,
kvp[i].data, kvp[i].length);
if (rc)
goto bail;
}
rc = msm_rpm_wait_for_ack_noirq(msm_rpm_send_request_noirq(req));
bail:
msm_rpm_free_request(req);
return rc;
}
EXPORT_SYMBOL(msm_rpm_send_message_noirq);
/**
* During power collapse, the rpm driver disables the SMD interrupts to make
* sure that the interrupt doesn't wakes us from sleep.
*/
int msm_rpm_enter_sleep(bool print, const struct cpumask *cpumask)
{
int ret = 0;
if (standalone)
return 0;
ret = smd_mask_receive_interrupt(msm_rpm_data.ch_info, true, cpumask);
if (!ret) {
ret = msm_rpm_flush_requests(print);
if (ret)
smd_mask_receive_interrupt(msm_rpm_data.ch_info,
false, NULL);
}
return ret;
}
EXPORT_SYMBOL(msm_rpm_enter_sleep);
/**
* When the system resumes from power collapse, the SMD interrupt disabled by
* enter function has to reenabled to continue processing SMD message.
*/
void msm_rpm_exit_sleep(void)
{
if (standalone)
return;
smd_mask_receive_interrupt(msm_rpm_data.ch_info, false, NULL);
}
EXPORT_SYMBOL(msm_rpm_exit_sleep);
static int msm_rpm_dev_probe(struct platform_device *pdev)
{
char *key = NULL;
int ret = 0;
key = "rpm-channel-name";
ret = of_property_read_string(pdev->dev.of_node, key,
&msm_rpm_data.ch_name);
if (ret) {
pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
pdev->dev.of_node->full_name, key);
goto fail;
}
key = "rpm-channel-type";
ret = of_property_read_u32(pdev->dev.of_node, key,
&msm_rpm_data.ch_type);
if (ret) {
pr_err("%s(): Failed to read node: %s, key=%s\n", __func__,
pdev->dev.of_node->full_name, key);
goto fail;
}
key = "rpm-standalone";
standalone = of_property_read_bool(pdev->dev.of_node, key);
if (standalone) {
probe_status = 0;
goto skip_smd_init;
}
ret = smd_named_open_on_edge(msm_rpm_data.ch_name,
msm_rpm_data.ch_type,
&msm_rpm_data.ch_info,
&msm_rpm_data,
msm_rpm_notify);
if (ret) {
if (ret != -EPROBE_DEFER) {
pr_err("%s: Cannot open RPM channel %s %d\n",
__func__, msm_rpm_data.ch_name,
msm_rpm_data.ch_type);
}
goto fail;
}
spin_lock_init(&msm_rpm_data.smd_lock_write);
spin_lock_init(&msm_rpm_data.smd_lock_read);
INIT_WORK(&msm_rpm_data.work, msm_rpm_smd_work);
wait_for_completion(&msm_rpm_data.smd_open);
smd_disable_read_intr(msm_rpm_data.ch_info);
msm_rpm_smd_wq = alloc_workqueue("rpm-smd",
WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_HIGHPRI, 1);
if (!msm_rpm_smd_wq) {
pr_err("%s: Unable to alloc rpm-smd workqueue\n", __func__);
ret = -EINVAL;
goto fail;
}
queue_work(msm_rpm_smd_wq, &msm_rpm_data.work);
probe_status = ret;
skip_smd_init:
of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
if (standalone)
pr_info("%s: RPM running in standalone mode\n", __func__);
fail:
return probe_status;
}
static struct of_device_id msm_rpm_match_table[] = {
{.compatible = "qcom,rpm-smd"},
{},
};
static struct platform_driver msm_rpm_device_driver = {
.probe = msm_rpm_dev_probe,
.driver = {
.name = "rpm-smd",
.owner = THIS_MODULE,
.of_match_table = msm_rpm_match_table,
},
};
int __init msm_rpm_driver_init(void)
{
static bool registered;
if (registered)
return 0;
registered = true;
return platform_driver_register(&msm_rpm_device_driver);
}
EXPORT_SYMBOL(msm_rpm_driver_init);
arch_initcall(msm_rpm_driver_init);
|
SlimLG2/android_kernel_motorola_msm8992
|
drivers/soc/qcom/rpm-smd.c
|
C
|
gpl-2.0
| 34,820
|
package com.tinkerlad.chemistry.utils.vector;
import com.google.common.io.ByteArrayDataInput;
import com.tinkerlad.chemistry.utils.interfaces.IVector2;
import net.minecraft.nbt.NBTTagCompound;
public class Vector2 implements Cloneable, IVector2 {
public double x;
public double z;
public Vector2() {
this(0, 0);
}
public Vector2(double x, double z) {
this.x = x;
this.z = z;
}
public Vector2(IVector2 vec) {
this.x = vec.x();
this.z = vec.z();
}
public Vector2(ByteArrayDataInput data) {
this(data.readDouble(), data.readDouble());
}
public Vector2(NBTTagCompound nbt) {
this(nbt.getDouble("x"), nbt.getDouble("z"));
}
public static double distance(Vector2 point1, Vector2 point2) {
return point1.clone().distance(point2);
}
public static double slope(Vector2 point1, Vector2 point2) {
double xDifference = point1.x - point2.x;
double yDiference = point1.z - point2.z;
return yDiference / xDifference;
}
@Override
public double x() {
return this.x;
}
@Override
public double z() {
return this.z();
}
public int intX() {
return (int) Math.floor(this.x);
}
public int intY() {
return (int) Math.floor(this.z);
}
/**
* Makes a new copy of this Vector. Prevents variable referencing problems.
*/
@Override
public Vector2 clone() {
return new Vector2(this.x, this.z);
}
/**
* ---------------------- MAGNITUDE FUNCTIONS ----------------------------
*/
public double getMagnitude() {
return Math.sqrt(this.getMagnitudeSquared());
}
public double getMagnitudeSquared() {
return this.x * this.x + this.z * this.z;
}
public Vector2 normalize() {
double d = this.getMagnitude();
if (d != 0) {
this.scale(1 / d);
}
return this;
}
public Vector2 midPoint(Vector2 pos) {
return new Vector2((x + pos.x) / 2, (z + pos.z) / 2);
}
public double distance(Vector2 target) {
Vector2 difference = this.clone().subtract(target);
return difference.getMagnitude();
}
public Vector2 add(Vector2 par1) {
this.x += par1.x;
this.z += par1.z;
return this;
}
public Vector2 add(double par1) {
this.x += par1;
this.z += par1;
return this;
}
public Vector2 subtract(Vector2 par1) {
this.x -= par1.x;
this.z -= par1.z;
return this;
}
public Vector2 invert() {
this.scale(-1);
return this;
}
public Vector2 scale(double amount) {
this.x *= amount;
this.z *= amount;
return this;
}
@Deprecated
public Vector2 multiply(double amount) {
return this.scale(amount);
}
public Vector2 round() {
return new Vector2(Math.round(this.x), Math.round(this.z));
}
public Vector2 ceil() {
return new Vector2(Math.ceil(this.x), Math.ceil(this.z));
}
public Vector2 floor() {
return new Vector2(Math.floor(this.x), Math.floor(this.z));
}
@Override
public int hashCode() {
long x = Double.doubleToLongBits(this.x);
long y = Double.doubleToLongBits(this.z);
return 31 * (int) (x ^ (x >>> 32)) + (int) (y ^ (y >>> 32));
}
@Override
public boolean equals(Object o) {
if (o instanceof Vector2) {
Vector2 vector = (Vector2) o;
return this.x == vector.x && this.z == vector.z;
}
return false;
}
@Override
public String toString() {
return "Vector2 [" + this.x + "," + this.z + "]";
}
}
|
Tinkerlad0/Periodic-Production
|
src/main/java/com/tinkerlad/chemistry/utils/vector/Vector2.java
|
Java
|
gpl-2.0
| 3,794
|
<?php
/**
* Export support class
*
* PHP version 5
*
* Copyright (C) Villanova University 2010.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* @category VuFind2
* @package Export
* @author Demian Katz <demian.katz@villanova.edu>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link http://vufind.org Main Site
*/
namespace VuFind;
use VuFind\SimpleXML, Zend\Config\Config;
/**
* Export support class
*
* @category VuFind2
* @package Export
* @author Demian Katz <demian.katz@villanova.edu>
* @license http://opensource.org/licenses/gpl-2.0.php GNU General Public License
* @link http://vufind.org Main Site
*/
class Export
{
/**
* Main VuFind configuration
*
* @var Config
*/
protected $mainConfig;
/**
* Export-specific configuration
*
* @var Config
*/
protected $exportConfig;
/**
* Constructor
*
* @param Config $mainConfig Main VuFind configuration
* @param Config $exportConfig Export-specific configuration
*/
public function __construct(Config $mainConfig, Config $exportConfig)
{
$this->mainConfig = $mainConfig;
$this->exportConfig = $exportConfig;
}
/**
* Get bulk export options.
*
* @return array
*/
public function getBulkOptions()
{
static $options = false;
if ($options === false) {
$options = array();
if (isset($this->mainConfig->BulkExport->enabled)
&& isset($this->mainConfig->BulkExport->options)
&& $this->mainConfig->BulkExport->enabled
) {
$config = explode(':', $this->mainConfig->BulkExport->options);
foreach ($config as $option) {
if (isset($this->mainConfig->Export->$option)
&& $this->mainConfig->Export->$option == true
) {
$options[] = $option;
}
}
}
}
return $options;
}
/**
* Get the URL for bulk export.
*
* @param \Zend\View\Renderer\RendererInterface $view View object (needed for
* URL generation)
* @param string $format Export format being used
* @param array $ids Array of IDs to export
* (in source|id format)
*
* @return string
*/
public function getBulkUrl($view, $format, $ids)
{
$params = array();
$params[] = 'f=' . urlencode($format);
foreach ($ids as $id) {
$params[] = urlencode('i[]') . '=' . urlencode($id);
}
// $serverUrlHelper = $view->plugin('serverurl');
$urlHelper = $view->plugin('url');
// $url = $serverUrlHelper($urlHelper('cart-doexport'))
// . '?' . implode('&', $params);
$url = $this->mainConfig->Site->url .
$urlHelper('cart-doexport') . '?' . implode('&', $params);
return $this->needsRedirect($format)
? $this->getRedirectUrl($format, $url) : $url;
}
/**
* Build callback URL for export.
*
* @param string $format Export format being used
* @param string $callback Callback URL for retrieving record(s)
*
* @return string
*/
public function getRedirectUrl($format, $callback)
{
// Fill in special tokens in template:/*
$template = $this->exportConfig->$format->redirectUrl;
preg_match_all('/\{([^}]+)\}/', $template, $matches);
foreach ($matches[1] as $current) {
$parts = explode('|', $current);
switch ($parts[0]) {
case 'config':
case 'encodedConfig':
if (isset($this->mainConfig->{$parts[1]}->{$parts[2]})) {
$value = $this->mainConfig->{$parts[1]}->{$parts[2]};
} else {
$value = $parts[3];
}
if ($parts[0] == 'encodedConfig') {
$value = urlencode($value);
}
$template = str_replace('{' . $current . '}', $value, $template);
break;
case 'encodedCallback':
$template = str_replace(
'{' . $current . '}', urlencode($callback), $template
);
break;
}
}
return $template;
}
/**
* Does the requested format require a redirect?
*
* @param string $format Format to check
*
* @return bool
*/
public function needsRedirect($format)
{
return isset($this->exportConfig->$format->redirectUrl);
}
/**
* Convert an array of individual records into a single string for display.
*
* @param string $format Format of records to process
* @param array $parts Multiple records to process
*
* @return string
*/
public function processGroup($format, $parts)
{
// If we're in XML mode, we need to do some special processing:
if (isset($this->exportConfig->$format->combineXpath)) {
$ns = isset($this->exportConfig->$format->combineNamespaces)
? $this->exportConfig->$format->combineNamespaces->toArray()
: array();
$ns = array_map(
function ($current) {
return explode('|', $current, 2);
}, $ns
);
foreach ($parts as $part) {
// Convert text into XML object:
$current = simplexml_load_string($part);
// The first record gets kept as-is; subsequent records get merged
// in based on the configured XPath (currently only one level is
// supported)...
if (!isset($retVal)) {
$retVal = $current;
} else {
foreach ($ns as $n) {
$current->registerXPathNamespace($n[0], $n[1]);
}
$matches = $current->xpath(
$this->exportConfig->$format->combineXpath
);
foreach ($matches as $match) {
SimpleXML::appendElement($retVal, $match);
}
}
}
return $retVal->asXML();
} else {
// Not in XML mode -- just concatenate everything together:
return implode('', $parts);
}
}
/**
* Does the specified record support the specified export format?
*
* @param \VuFind\RecordDriver\AbstractBase $driver Record driver
* @param string $format Format to check
*
* @return bool
*/
public function recordSupportsFormat($driver, $format)
{
// Check if the driver explicitly disallows the format:
if ($driver->tryMethod('exportDisabled', array($format))) {
return false;
}
// Check the requirements for export in the requested format:
if (isset($this->exportConfig->$format)) {
if (isset($this->exportConfig->$format->requiredMethods)) {
foreach ($this->exportConfig->$format->requiredMethods as $method) {
// If a required method is missing, give up now:
if (!is_callable(array($driver, $method))) {
return false;
}
}
}
// If we got this far, we didn't encounter a problem, and the
// requested export format is valid, so we can report success!
return true;
}
// If we got this far, we couldn't find evidence of support:
return false;
}
/**
* Get an array of strings representing formats in which a specified record's
* data may be exported (empty if none). Legal values: "BibTeX", "EndNote",
* "MARC", "MARCXML", "RDF", "RefWorks".
*
* @param \VuFind\RecordDriver\AbstractBase $driver Record driver
*
* @return array Strings representing export formats.
*/
public function getFormatsForRecord($driver)
{
// Get an array of enabled export formats (from config, or use defaults
// if nothing in config array).
$active = isset($this->mainConfig->Export)
? $this->mainConfig->Export->toArray()
: array('RefWorks' => true, 'EndNote' => true);
// Loop through all possible formats:
$formats = array();
foreach (array_keys($this->exportConfig->toArray()) as $format) {
if (isset($active[$format]) && $active[$format]
&& $this->recordSupportsFormat($driver, $format)
) {
$formats[] = $format;
}
}
// Send back the results:
return $formats;
}
/**
* Same return value as getFormatsForRecord(), but filtered to reflect bulk
* export configuration and to list only values supported by a set of records.
*
* @param array $drivers Array of record drivers
*
* @return array
*/
public function getFormatsForRecords($drivers)
{
$formats = $this->getBulkOptions();
foreach ($drivers as $driver) {
// Filter out unsupported export formats:
$newFormats = array();
foreach ($formats as $current) {
if ($this->recordSupportsFormat($driver, $current)) {
$newFormats[] = $current;
}
}
$formats = $newFormats;
}
return $formats;
}
/**
* Get headers for the requested format.
*
* @param string $format Selected export format
*
* @return array
*/
public function getHeaders($format)
{
return isset($this->exportConfig->$format->headers)
? $this->exportConfig->$format->headers : array();
}
}
|
IISH/vufind
|
module/VuFind/src/VuFind/Export.php
|
PHP
|
gpl-2.0
| 10,752
|
ucodes = {
"U0001" : "High Speed CAN Communication Bus" ,
"U0002" : "High Speed CAN Communication Bus (Performance)" ,
"U0003" : "High Speed CAN Communication Bus (Open)" ,
"U0004" : "High Speed CAN Communication Bus (Low)" ,
"U0005" : "High Speed CAN Communication Bus (High)" ,
"U0006" : "High Speed CAN Communication Bus (Open)" ,
"U0007" : "High Speed CAN Communication Bus (Low)" ,
"U0008" : "High Speed CAN Communication Bus (High)" ,
"U0009" : "High Speed CAN Communication Bus (shorted to Bus)" ,
"U0010" : "Medium Speed CAN Communication Bus" ,
"U0011" : "Medium Speed CAN Communication Bus (Performance)" ,
"U0012" : "Medium Speed CAN Communication Bus (Open)" ,
"U0013" : "Medium Speed CAN Communication Bus (Low)" ,
"U0014" : "Medium Speed CAN Communication Bus (High)" ,
"U0015" : "Medium Speed CAN Communication Bus (Open)" ,
"U0016" : "Medium Speed CAN Communication Bus (Low)" ,
"U0017" : "Medium Speed CAN Communication Bus (High)" ,
"U0018" : "Medium Speed CAN Communication Bus (shorted to Bus)" ,
"U0019" : "Low Speed CAN Communication Bus" ,
"U0020" : "Low Speed CAN Communication Bus (Performance)" ,
"U0021" : "Low Speed CAN Communication Bus (Open)" ,
"U0022" : "Low Speed CAN Communication Bus (Low)" ,
"U0023" : "Low Speed CAN Communication Bus (High)" ,
"U0024" : "Low Speed CAN Communication Bus (Open)" ,
"U0025" : "Low Speed CAN Communication Bus (Low)" ,
"U0026" : "Low Speed CAN Communication Bus (High)" ,
"U0027" : "Low Speed CAN Communication Bus (shorted to Bus)" ,
"U0028" : "Vehicle Communication Bus A" ,
"U0029" : "Vehicle Communication Bus A (Performance)" ,
"U0030" : "Vehicle Communication Bus A (Open)" ,
"U0031" : "Vehicle Communication Bus A (Low)" ,
"U0032" : "Vehicle Communication Bus A (High)" ,
"U0033" : "Vehicle Communication Bus A (Open)" ,
"U0034" : "Vehicle Communication Bus A (Low)" ,
"U0035" : "Vehicle Communication Bus A (High)" ,
"U0036" : "Vehicle Communication Bus A (shorted to Bus A)" ,
"U0037" : "Vehicle Communication Bus B" ,
"U0038" : "Vehicle Communication Bus B (Performance)" ,
"U0039" : "Vehicle Communication Bus B (Open)" ,
"U0040" : "Vehicle Communication Bus B (Low)" ,
"U0041" : "Vehicle Communication Bus B (High)" ,
"U0042" : "Vehicle Communication Bus B (Open)" ,
"U0043" : "Vehicle Communication Bus B (Low)" ,
"U0044" : "Vehicle Communication Bus B (High)" ,
"U0045" : "Vehicle Communication Bus B (shorted to Bus B)" ,
"U0046" : "Vehicle Communication Bus C" ,
"U0047" : "Vehicle Communication Bus C (Performance)" ,
"U0048" : "Vehicle Communication Bus C (Open)" ,
"U0049" : "Vehicle Communication Bus C (Low)" ,
"U0050" : "Vehicle Communication Bus C (High)" ,
"U0051" : "Vehicle Communication Bus C (Open)" ,
"U0052" : "Vehicle Communication Bus C (Low)" ,
"U0053" : "Vehicle Communication Bus C (High)" ,
"U0054" : "Vehicle Communication Bus C (shorted to Bus C)" ,
"U0055" : "Vehicle Communication Bus D" ,
"U0056" : "Vehicle Communication Bus D (Performance)" ,
"U0057" : "Vehicle Communication Bus D (Open)" ,
"U0058" : "Vehicle Communication Bus D (Low)" ,
"U0059" : "Vehicle Communication Bus D (High)" ,
"U0060" : "Vehicle Communication Bus D (Open)" ,
"U0061" : "Vehicle Communication Bus D (Low)" ,
"U0062" : "Vehicle Communication Bus D (High)" ,
"U0063" : "Vehicle Communication Bus D (shorted to Bus D)" ,
"U0064" : "Vehicle Communication Bus E" ,
"U0065" : "Vehicle Communication Bus E (Performance)" ,
"U0066" : "Vehicle Communication Bus E (Open)" ,
"U0067" : "Vehicle Communication Bus E (Low)" ,
"U0068" : "Vehicle Communication Bus E (High)" ,
"U0069" : "Vehicle Communication Bus E (Open)" ,
"U0070" : "Vehicle Communication Bus E (Low)" ,
"U0071" : "Vehicle Communication Bus E (High)" ,
"U0072" : "Vehicle Communication Bus E (shorted to Bus E)" ,
"U0073" : "Control Module Communication Bus Off" ,
"U0074" : "Reserved by J2012" ,
"U0075" : "Reserved by J2012" ,
"U0076" : "Reserved by J2012" ,
"U0077" : "Reserved by J2012" ,
"U0078" : "Reserved by J2012" ,
"U0079" : "Reserved by J2012" ,
"U0080" : "Reserved by J2012" ,
"U0081" : "Reserved by J2012" ,
"U0082" : "Reserved by J2012" ,
"U0083" : "Reserved by J2012" ,
"U0084" : "Reserved by J2012" ,
"U0085" : "Reserved by J2012" ,
"U0086" : "Reserved by J2012" ,
"U0087" : "Reserved by J2012" ,
"U0088" : "Reserved by J2012" ,
"U0089" : "Reserved by J2012" ,
"U0090" : "Reserved by J2012" ,
"U0091" : "Reserved by J2012" ,
"U0092" : "Reserved by J2012" ,
"U0093" : "Reserved by J2012" ,
"U0094" : "Reserved by J2012" ,
"U0095" : "Reserved by J2012" ,
"U0096" : "Reserved by J2012" ,
"U0097" : "Reserved by J2012" ,
"U0098" : "Reserved by J2012" ,
"U0099" : "Reserved by J2012" ,
"U0100" : "Lost Communication With ECM/PCM A" ,
"U0101" : "Lost Communication with TCM" ,
"U0102" : "Lost Communication with Transfer Case Control Module" ,
"U0103" : "Lost Communication With Gear Shift Module" ,
"U0104" : "Lost Communication With Cruise Control Module" ,
"U0105" : "Lost Communication With Fuel Injector Control Module" ,
"U0106" : "Lost Communication With Glow Plug Control Module" ,
"U0107" : "Lost Communication With Throttle Actuator Control Module" ,
"U0108" : "Lost Communication With Alternative Fuel Control Module" ,
"U0109" : "Lost Communication With Fuel Pump Control Module" ,
"U0110" : "Lost Communication With Drive Motor Control Module" ,
"U0111" : "Lost Communication With Battery Energy Control Module 'A'" ,
"U0112" : "Lost Communication With Battery Energy Control Module 'B'" ,
"U0113" : "Lost Communication With Emissions Critical Control Information" ,
"U0114" : "Lost Communication With Four-Wheel Drive Clutch Control Module" ,
"U0115" : "Lost Communication With ECM/PCM B" ,
"U0116" : "Reserved by J2012" ,
"U0117" : "Reserved by J2012" ,
"U0118" : "Reserved by J2012" ,
"U0119" : "Reserved by J2012" ,
"U0120" : "Reserved by J2012" ,
"U0121" : "Lost Communication With Anti-Lock Brake System (ABS) Control Module" ,
"U0122" : "Lost Communication With Vehicle Dynamics Control Module" ,
"U0123" : "Lost Communication With Yaw Rate Sensor Module" ,
"U0124" : "Lost Communication With Lateral Acceleration Sensor Module" ,
"U0125" : "Lost Communication With Multi-axis Acceleration Sensor Module" ,
"U0126" : "Lost Communication With Steering Angle Sensor Module" ,
"U0127" : "Lost Communication With Tire Pressure Monitor Module" ,
"U0128" : "Lost Communication With Park Brake Control Module" ,
"U0129" : "Lost Communication With Brake System Control Module" ,
"U0130" : "Lost Communication With Steering Effort Control Module" ,
"U0131" : "Lost Communication With Power Steering Control Module" ,
"U0132" : "Lost Communication With Ride Level Control Module" ,
"U0133" : "Reserved by J2012" ,
"U0134" : "Reserved by J2012" ,
"U0135" : "Reserved by J2012" ,
"U0136" : "Reserved by J2012" ,
"U0137" : "Reserved by J2012" ,
"U0138" : "Reserved by J2012" ,
"U0139" : "Reserved by J2012" ,
"U0140" : "Lost Communication With Body Control Module" ,
"U0141" : "Lost Communication With Body Control Module 'A'" ,
"U0142" : "Lost Communication With Body Control Module 'B'" ,
"U0143" : "Lost Communication With Body Control Module 'C'" ,
"U0144" : "Lost Communication With Body Control Module 'D'" ,
"U0145" : "Lost Communication With Body Control Module 'E'" ,
"U0146" : "Lost Communication With Gateway 'A'" ,
"U0147" : "Lost Communication With Gateway 'B'" ,
"U0148" : "Lost Communication With Gateway 'C'" ,
"U0149" : "Lost Communication With Gateway 'D'" ,
"U0150" : "Lost Communication With Gateway 'E'" ,
"U0151" : "Lost Communication With Restraints Control Module" ,
"U0152" : "Lost Communication With Side Restraints Control Module Left" ,
"U0153" : "Lost Communication With Side Restraints Control Module Right" ,
"U0154" : "Lost Communication With Restraints Occupant Sensing Control Module" ,
"U0155" : "Lost Communication With Instrument Panel Cluster (IPC) Control Module" ,
"U0156" : "Lost Communication With Information Center 'A'" ,
"U0157" : "Lost Communication With Information Center 'B'" ,
"U0158" : "Lost Communication With Head Up Display" ,
"U0159" : "Lost Communication With Parking Assist Control Module" ,
"U0160" : "Lost Communication With Audible Alert Control Module" ,
"U0161" : "Lost Communication With Compass Module" ,
"U0162" : "Lost Communication With Navigation Display Module" ,
"U0163" : "Lost Communication With Navigation Control Module" ,
"U0164" : "Lost Communication With HVAC Control Module" ,
"U0165" : "Lost Communication With HVAC Control Module Rear" ,
"U0166" : "Lost Communication With Auxiliary Heater Control Module" ,
"U0167" : "Lost Communication With Vehicle Immobilizer Control Module" ,
"U0168" : "Lost Communication With Vehicle Security Control Module" ,
"U0169" : "Lost Communication With Sunroof Control Module" ,
"U0170" : "Lost Communication With 'Restraints System Sensor A'" ,
"U0171" : "Lost Communication With 'Restraints System Sensor B'" ,
"U0172" : "Lost Communication With 'Restraints System Sensor C'" ,
"U0173" : "Lost Communication With 'Restraints System Sensor D'" ,
"U0174" : "Lost Communication With 'Restraints System Sensor E'" ,
"U0175" : "Lost Communication With 'Restraints System Sensor F'" ,
"U0176" : "Lost Communication With 'Restraints System Sensor G'" ,
"U0177" : "Lost Communication With 'Restraints System Sensor H'" ,
"U0178" : "Lost Communication With 'Restraints System Sensor I'" ,
"U0179" : "Lost Communication With 'Restraints System Sensor J'" ,
"U0180" : "Lost Communication With Automatic Lighting Control Module" ,
"U0181" : "Lost Communication With Headlamp Leveling Control Module" ,
"U0182" : "Lost Communication With Lighting Control Module Front" ,
"U0183" : "Lost Communication With Lighting Control Module Rear" ,
"U0184" : "Lost Communication With Radio" ,
"U0185" : "Lost Communication With Antenna Control Module" ,
"U0186" : "Lost Communication With Audio Amplifier" ,
"U0187" : "Lost Communication With Digital Disc Player/Changer Module 'A'" ,
"U0188" : "Lost Communication With Digital Disc Player/Changer Module 'B'" ,
"U0189" : "Lost Communication With Digital Disc Player/Changer Module 'C'" ,
"U0190" : "Lost Communication With Digital Disc Player/Changer Module 'D'" ,
"U0191" : "Lost Communication With Television" ,
"U0192" : "Lost Communication With Personal Computer" ,
"U0193" : "Lost Communication With 'Digital Audio Control Module A'" ,
"U0194" : "Lost Communication With 'Digital Audio Control Module B'" ,
"U0195" : "Lost Communication With Subscription Entertainment Receiver Module" ,
"U0196" : "Lost Communication With Rear Seat Entertainment Control Module" ,
"U0197" : "Lost Communication With Telephone Control Module" ,
"U0198" : "Lost Communication With Telematic Control Module" ,
"U0199" : "Lost Communication With 'Door Control Module A'" ,
"U0200" : "Lost Communication With 'Door Control Module B'" ,
"U0201" : "Lost Communication With 'Door Control Module C'" ,
"U0202" : "Lost Communication With 'Door Control Module D'" ,
"U0203" : "Lost Communication With 'Door Control Module E'" ,
"U0204" : "Lost Communication With 'Door Control Module F'" ,
"U0205" : "Lost Communication With 'Door Control Module G'" ,
"U0206" : "Lost Communication With Folding Top Control Module" ,
"U0207" : "Lost Communication With Moveable Roof Control Module" ,
"U0208" : "Lost Communication With 'Seat Control Module A'" ,
"U0209" : "Lost Communication With 'Seat Control Module B'" ,
"U0210" : "Lost Communication With 'Seat Control Module C'" ,
"U0211" : "Lost Communication With 'Seat Control Module D'" ,
"U0212" : "Lost Communication With Steering Column Control Module" ,
"U0213" : "Lost Communication With Mirror Control Module" ,
"U0214" : "Lost Communication With Remote Function Actuation" ,
"U0215" : "Lost Communication With 'Door Switch A'" ,
"U0216" : "Lost Communication With 'Door Switch B'" ,
"U0217" : "Lost Communication With 'Door Switch C'" ,
"U0218" : "Lost Communication With 'Door Switch D'" ,
"U0219" : "Lost Communication With 'Door Switch E'" ,
"U0220" : "Lost Communication With 'Door Switch F'" ,
"U0221" : "Lost Communication With 'Door Switch G'" ,
"U0222" : "Lost Communication With 'Door Window Motor A'" ,
"U0223" : "Lost Communication With 'Door Window Motor B'" ,
"U0224" : "Lost Communication With 'Door Window Motor C'" ,
"U0225" : "Lost Communication With 'Door Window Motor D'" ,
"U0226" : "Lost Communication With 'Door Window Motor E'" ,
"U0227" : "Lost Communication With 'Door Window Motor F'" ,
"U0228" : "Lost Communication With 'Door Window Motor G'" ,
"U0229" : "Lost Communication With Heated Steering Wheel Module" ,
"U0230" : "Lost Communication With Rear Gate Module" ,
"U0231" : "Lost Communication With Rain Sensing Module" ,
"U0232" : "Lost Communication With Side Obstacle Detection Control Module Left" ,
"U0233" : "Lost Communication With Side Obstacle Detection Control Module Right" ,
"U0234" : "Lost Communication With Convenience Recall Module" ,
"U0235" : "Lost Communication With Cruise Control Front Distance Range Sensor" ,
"U0300" : "Internal Control Module Software Incompatibility" ,
"U0301" : "Software Incompatibility with ECM/PCM" ,
"U0302" : "Software Incompatibility with Transmission Control Module" ,
"U0303" : "Software Incompatibility with Transfer Case Control Module" ,
"U0304" : "Software Incompatibility with Gear Shift Control Module" ,
"U0305" : "Software Incompatibility with Cruise Control Module" ,
"U0306" : "Software Incompatibility with Fuel Injector Control Module" ,
"U0307" : "Software Incompatibility with Glow Plug Control Module" ,
"U0308" : "Software Incompatibility with Throttle Actuator Control Module" ,
"U0309" : "Software Incompatibility with Alternative Fuel Control Module" ,
"U0310" : "Software Incompatibility with Fuel Pump Control Module" ,
"U0311" : "Software Incompatibility with Drive Motor Control Module" ,
"U0312" : "Software Incompatibility with Battery Energy Control Module A" ,
"U0313" : "Software Incompatibility with Battery Energy Control Module B" ,
"U0314" : "Software Incompatibility with Four-Wheel Drive Clutch Control Module" ,
"U0315" : "Software Incompatibility with Anti-Lock Brake System Control Module" ,
"U0316" : "Software Incompatibility with Vehicle Dynamics Control Module" ,
"U0317" : "Software Incompatibility with Park Brake Control Module" ,
"U0318" : "Software Incompatibility with Brake System Control Module" ,
"U0319" : "Software Incompatibility with Steering Effort Control Module" ,
"U0320" : "Software Incompatibility with Power Steering Control Module" ,
"U0321" : "Software Incompatibility with Ride Level Control Module" ,
"U0322" : "Software Incompatibility with Body Control Module" ,
"U0323" : "Software Incompatibility with Instrument Panel Control Module" ,
"U0324" : "Software Incompatibility with HVAC Control Module" ,
"U0325" : "Software Incompatibility with Auxiliary Heater Control Module" ,
"U0326" : "Software Incompatibility with Vehicle Immobilizer Control Module" ,
"U0327" : "Software Incompatibility with Vehicle Security Control Module" ,
"U0328" : "Software Incompatibility with Steering Angle Sensor Module" ,
"U0329" : "Software Incompatibility with Steering Column Control Module" ,
"U0330" : "Software Incompatibility with Tire Pressure Monitor Module" ,
"U0331" : "Software Incompatibility with Body Control Module 'A'" ,
"U0400" : "Invalid Data Received" ,
"U0401" : "Invalid Data Received From ECM/PCM" ,
"U0402" : "Invalid Data Received From Transmission Control Module" ,
"U0403" : "Invalid Data Received From Transfer Case Control Module" ,
"U0404" : "Invalid Data Received From Gear Shift Control Module" ,
"U0405" : "Invalid Data Received From Cruise Control Module" ,
"U0406" : "Invalid Data Received From Fuel Injector Control Module" ,
"U0407" : "Invalid Data Received From Glow Plug Control Module" ,
"U0408" : "Invalid Data Received From Throttle Actuator Control Module" ,
"U0409" : "Invalid Data Received From Alternative Fuel Control Module" ,
"U0410" : "Invalid Data Received From Fuel Pump Control Module" ,
"U0411" : "Invalid Data Received From Drive Motor Control Module" ,
"U0412" : "Invalid Data Received From Battery Energy Control Module A" ,
"U0413" : "Invalid Data Received From Battery Energy Control Module B" ,
"U0414" : "Invalid Data Received From Four-Wheel Drive Clutch Control Module" ,
"U0415" : "Invalid Data Received From Anti-Lock Brake System Control Module" ,
"U0416" : "Invalid Data Received From Vehicle Dynamics Control Module" ,
"U0417" : "Invalid Data Received From Park Brake Control Module" ,
"U0418" : "Invalid Data Received From Brake System Control Module" ,
"U0419" : "Invalid Data Received From Steering Effort Control Module" ,
"U0420" : "Invalid Data Received From Power Steering Control Module" ,
"U0421" : "Invalid Data Received From Ride Level Control Module" ,
"U0422" : "Invalid Data Received From Body Control Module" ,
"U0423" : "Invalid Data Received From Instrument Panel Control Module" ,
"U0424" : "Invalid Data Received From HVAC Control Module" ,
"U0425" : "Invalid Data Received From Auxiliary Heater Control Module" ,
"U0426" : "Invalid Data Received From Vehicle Immobilizer Control Module" ,
"U0427" : "Invalid Data Received From Vehicle Security Control Module" ,
"U0428" : "Invalid Data Received From Steering Angle Sensor Module" ,
"U0429" : "Invalid Data Received From Steering Column Control Module" ,
"U0430" : "Invalid Data Received From Tire Pressure Monitor Module" ,
"U0431" : "Invalid Data Received From Body Control Module 'A'"
}
|
lkarsten/pyobd
|
network_codes.py
|
Python
|
gpl-2.0
| 18,096
|
#ifndef KBS_STRING_H
#define KBS_STRING_H
#include "kbs_Alphabet.h"
#include "kbs_Types.h"
typedef struct kbs_ustring{
Kbs_Ulong strLength; /** length of the string */
Kbs_Uchar *str; /** character array of length strLength plus terminating 0 */
Kbs_Alphabet *alphabet; /** the alphabet of the string, if it is determined previously */
}Kbs_Ustring;
#define KBS_STRING_EXTENSION_SIZE 32
/*----------------------------------------------------------------------------*/
/**
* Gets the Kbs_Ustring from a given file without the alphabet
* @param filename - file containing the string.
* @return Kbs_Ustring located in filename
* @see kbs_getUstringWithAlphabet_FromFile
*/
Kbs_Ustring* kbs_getUstring_FromFile(const Kbs_Char *const filename);
/*----------------------------------------------------------------------------*/
/**
* Gets the Kbs_Ustring with its alphabet from a given file
* @param filename - file containing the string.
* @return Kbs_Ustring located in filename
* @see kbs_getUstring_FromFile
*/
Kbs_Ustring* kbs_getUstringWithAlphabet_FromFile(Kbs_Char *filename);
/*----------------------------------------------------------------------------*/
/**
* frees a Kbs_Ustring
* @param oldStr - Kbs_Ustring string to free
*/
void kbs_delete_Ustring(Kbs_Ustring* oldStr);
/**
* Shows the contents of Kbs_Ustring on standard out
* @param thisString string to be shown
* @see Kbs_Ustring
*/
void kbs_get_AlphabetForUstring(Kbs_Ustring *thisString);
#endif
|
anadon/BPR2
|
src/Standard/kbs_String.h
|
C
|
gpl-2.0
| 1,520
|
#
# Makefile for misc devices that really don't fit anywhere else.
#
obj-$(CONFIG_IBM_ASM) += ibmasm/
obj-$(CONFIG_AD525X_DPOT) += ad525x_dpot.o
obj-$(CONFIG_AD525X_DPOT_I2C) += ad525x_dpot-i2c.o
obj-$(CONFIG_AD525X_DPOT_SPI) += ad525x_dpot-spi.o
obj-$(CONFIG_INTEL_MID_PTI) += pti.o
obj-$(CONFIG_ATMEL_PWM) += atmel_pwm.o
obj-$(CONFIG_ATMEL_SSC) += atmel-ssc.o
obj-$(CONFIG_ATMEL_TCLIB) += atmel_tclib.o
obj-$(CONFIG_BMP085) += bmp085.o
obj-$(CONFIG_BMP085_I2C) += bmp085-i2c.o
obj-$(CONFIG_BMP085_SPI) += bmp085-spi.o
obj-$(CONFIG_DUMMY_IRQ) += dummy-irq.o
obj-$(CONFIG_ICS932S401) += ics932s401.o
obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
obj-$(CONFIG_SENSORS_BH1780) += bh1780gli.o
obj-$(CONFIG_SENSORS_BH1770) += bh1770glc.o
obj-$(CONFIG_SENSORS_APDS990X) += apds990x.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KGDB_TESTS) += kgdbts.o
obj-$(CONFIG_SGI_XP) += sgi-xp/
obj-$(CONFIG_SGI_GRU) += sgi-gru/
obj-$(CONFIG_CS5535_MFGPT) += cs5535-mfgpt.o
obj-$(CONFIG_HP_ILO) += hpilo.o
obj-$(CONFIG_APDS9802ALS) += apds9802als.o
obj-$(CONFIG_ISL29003) += isl29003.o
obj-$(CONFIG_ISL29020) += isl29020.o
obj-$(CONFIG_SENSORS_TSL2550) += tsl2550.o
obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
obj-$(CONFIG_DS1682) += ds1682.o
obj-$(CONFIG_TI_DAC7512) += ti_dac7512.o
obj-$(CONFIG_UID_STAT) += uid_stat.o
obj-$(CONFIG_C2PORT) += c2port/
obj-$(CONFIG_HMC6352) += hmc6352.o
obj-y += eeprom/
obj-y += cb710/
obj-$(CONFIG_SPEAR13XX_PCIE_GADGET) += spear13xx_pcie_gadget.o
obj-$(CONFIG_VMWARE_BALLOON) += vmw_balloon.o
obj-$(CONFIG_ARM_CHARLCD) += arm-charlcd.o
obj-$(CONFIG_PCH_PHUB) += pch_phub.o
obj-y += ti-st/
obj-y += lis3lv02d/
obj-y += carma/
obj-$(CONFIG_USB_SWITCH_FSA9480) += fsa9480.o
obj-$(CONFIG_ALTERA_STAPL) +=altera-stapl/
obj-$(CONFIG_INTEL_MEI) += mei/
obj-$(CONFIG_VMWARE_VMCI) += vmw_vmci/
obj-$(CONFIG_LATTICE_ECP3_CONFIG) += lattice-ecp3-config.o
obj-$(CONFIG_SRAM) += sram.o
obj-$(CONFIG_SENSORS_MMA8452) += mma8452.o
obj-$(CONFIG_SENSORS_MMA7660) += mma7660.o
obj-$(CONFIG_SENSORS_LIS3DH) += lis3dh_acc.o
obj-$(CONFIG_SENSORS_MMC328x) += mmc328x.o
obj-$(CONFIG_ECOMPASS) += mecs.o
obj-$(CONFIG_BOSCH_BMA250) += bma250.o
obj-$(CONFIG_BOSCH_BMA250_MEMSIC) += bma250_memsic.o
obj-$(CONFIG_BOSCH_BMA222) += bma222.o
obj-$(CONFIG_SENSORS_MC32X0) += mc32x0.o
obj-$(CONFIG_SENSORS_DMARD06) += dmard06.o
obj-$(CONFIG_SENSORS_AFA750) += afa750.o
obj-$(CONFIG_CALI_NEGATIVE) += afa750_cali_i2c.o
obj-$(CONFIG_CALI_POSITIVE) += afa750_cali_i2c_positive.o
obj-$(CONFIG_SENSORS_MMA865X) += mma865x.o
obj-$(CONFIG_SENSORS_CM36283) += cm36283.o
obj-$(CONFIG_SENSORS_CM3232) += cm3232.o
obj-$(CONFIG_SENSORS_MMC328xMA) += mmc328x.o
obj-$(CONFIG_SENSORS_LSM303D) += lsm303d.o
obj-$(CONFIG_LIGHTSENSOR_EPL6814)+= elan_epl6814.o
obj-$(CONFIG_SENSORS_STK8313) += stk831x.o
obj-$(CONFIG_SENSORS_STK8312) += stk831x.o
obj-$(CONFIG_SENSORS_DMARD10) += dmt10.o
obj-$(CONFIG_SENSORS_KXTJ9) += kionix_accel.o
obj-$(CONFIG_SENSORS_LTR501) += ltr501_als.o
obj-$(CONFIG_MPU3050_v4_1_2) += inv_mpu/
obj-$(CONFIG_SENSORS_MM3A310) += mm3a310.o
|
jjm2473/AMLOGIC_M8
|
drivers/misc/Makefile
|
Makefile
|
gpl-2.0
| 3,280
|
<?php
$obj_ambulance = new Hmgt_ambulance();
if(isset($_REQUEST['save_ambulance']))
{
if(isset($_REQUEST['action']) && ($_REQUEST['action'] == 'insert' || $_REQUEST['action'] == 'edit'))
{
$result = $obj_ambulance->hmgt_add_ambulance($_POST);
if($result)
{
if($_REQUEST['action'] == 'edit')
{
?><div id="message" class="updated below-h2"><?php
_e("Record updated successfully",'hospital_mgt');
?>
</div>
<?php }
else
{?>
<div id="message" class="updated below-h2">
<?php
_e('Record inserted successfully','hospital_mgt');
?></div><?php }
}
}
}
if(isset($_REQUEST['save_ambulance_request']))
{
if(isset($_REQUEST['action']) && ($_REQUEST['action'] == 'insert' || $_REQUEST['action'] == 'edit'))
{
$result = $obj_ambulance->hmgt_add_ambulance_request($_POST);
if($result)
{
if($_REQUEST['action'] == 'edit')
{
wp_redirect ( home_url() . '?dashboard=user&page=ambulance&tab=ambulance_req_list&message=2');
}
else
{
wp_redirect ( home_url() . '?dashboard=user&page=ambulance&tab=ambulance_req_list&message=1');
}
}
}
}
if(isset($_REQUEST['action']) && $_REQUEST['action'] == 'delete')
{
if($_GET['page'] == 'ambulance')
{
$result = $obj_ambulance->delete_ambulance_req($_REQUEST['amb_req_id']);
}
if($result)
{
wp_redirect ( home_url() . '?dashboard=user&page=ambulance&tab=ambulance_req_list&message=3');
}
}
$edit=0;
if(isset($_REQUEST['action']) && $_REQUEST['action'] == 'edit'){
$edit=1;
$result= $obj_ambulance->get_single_ambulance_req($_REQUEST['amb_req_id']);
}
if(isset($_REQUEST['message']))
{
$message =$_REQUEST['message'];
if($message == 1)
{?>
<div id="message" class="updated below-h2 ">
<p>
<?php
_e('Record inserted successfully','hospital_mgt');
?></p></div>
<?php
}
elseif($message == 2)
{?><div id="message" class="updated below-h2 "><p><?php
_e("Record updated successfully.",'hospital_mgt');
?></p>
</div>
<?php
}
elseif($message == 3)
{?>
<div id="message" class="updated below-h2"><p>
<?php
_e('Record deleted successfully','hospital_mgt');
?></div></p><?php
}
}
$active_tab = isset($_GET['tab'])?$_GET['tab']:'ambulance_req_list';
?>
<script type="text/javascript">
$(document).ready(function() {
$('.request_time').timepicker();
$('.dispatch_time').timepicker();
$('#request_date').datepicker({
changeMonth: true,
changeYear: true,
dateFormat: 'yy-mm-dd',
yearRange:'-65:+0',
onChangeMonthYear: function(year, month, inst) {
$(this).val(month + "/" + year);
}
});
} );
</script>
<div class="panel-body panel-white">
<ul class="nav nav-tabs panel_tabs" role="tablist">
<li class="<?php if($active_tab == 'ambulance_req_list'){?>active<?php }?>">
<a href="?dashboard=user&page=ambulance&tab=ambulance_req_list">
<i class="fa fa-align-justify"></i> <?php _e('Request List', 'hospital_mgt'); ?></a>
</a>
</li>
<li class="<?php if($active_tab == 'add_ambulance_req'){?>active<?php }?>">
<a href="?dashboard=user&page=ambulance&tab=add_ambulance_req">
<i class="fa fa-plus-circle"></i>
<?php
if(isset($_REQUEST['action']) && $_REQUEST['action'] =='edit')
_e('Edit Request', 'hospital_mgt');
else
_e('Add Request', 'hospital_mgt');
?></a>
</li>
</ul>
<script type="text/javascript">
$(document).ready(function() {
jQuery('#ambulance_list').DataTable({
"order": [[ 2, "Desc" ]],
"aoColumns":[
{"bSortable": true},
{"bSortable": true},
{"bSortable": true},
{"bSortable": true},
{"bSortable": true},
{"bSortable": false}
]
});
} );
</script>
<div class="tab-content">
<?php if($active_tab == 'ambulance_req_list'){?>
<div class="tab-pane fade active in" id="prescription">
<?php
// $retrieve_class = get_all_data($tablename);
?>
<div class="panel-body">
<div class="table-responsive">
<table id="ambulance_list" class="display dataTable" cellspacing="0" width="100%">
<thead>
<tr>
<th><?php _e( 'Ambulance', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Patient', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Date', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Time', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Dispatch Time', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Action', 'hospital_mgt' ) ;?></th>
</tr>
</thead>
<tfoot>
<tr>
<th><?php _e( 'Ambulance', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Patient', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Date', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Time', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Dispatch Time', 'hospital_mgt' ) ;?></th>
<th><?php _e( 'Action', 'hospital_mgt' ) ;?></th>
</tr>
</tfoot>
<tbody>
<?php
$ambulancereq_data=$obj_ambulance->get_all_ambulance_request();
if(!empty($ambulancereq_data))
{
foreach ($ambulancereq_data as $retrieved_data){
$patient_data = get_user_detail_byid($retrieved_data->patient_id);?>
<tr>
<td class="ambulanceid"><?php echo $obj_ambulance->get_ambulance_id($retrieved_data->ambulance_id);?></td>
<td class="patient"><?php echo $patient_data['first_name']." ".$patient_data['last_name']."(".$patient_data['patient_id'].")";?></td>
<td class="date"><?php echo $retrieved_data->request_date;?></td>
<td class="time"><?php echo $retrieved_data->request_time;?></td>
<td class="dispatchtime"><?php echo $retrieved_data->dispatch_time;?></td>
<td class="action">
<a href="?dashboard=user&page=ambulance&tab=add_ambulance_req&action=edit&amb_req_id=<?php echo $retrieved_data->amb_req_id;?>" class="btn btn-info"> <?php _e('Edit', 'hospital_mgt' ) ;?></a>
<a href="?dashboard=user&page=ambulance&tab=ambulance_req_list&action=delete&amb_req_id=<?php echo $retrieved_data->amb_req_id;?>" class="btn btn-danger"
onclick="return confirm('<?php _e('Are you sure you want to delete this record?','hospital_mgt');?>');">
<?php _e( 'Delete', 'hospital_mgt' ) ;?> </a>
</tr>
<?php }
}?>
</tbody>
</table>
</div>
</div>
</div>
<?php }
if($active_tab == 'add_ambulance_req'){
?>
<div class="tab-pane fade active in" id="add_req">
<script type="text/javascript">
$(document).ready(function() {
$('#patient_form').validationEngine();
$('#request_date').datepicker({
changeMonth: true,
changeYear: true,
dateFormat: 'yy-mm-dd',
yearRange:'-65:+0',
onChangeMonthYear: function(year, month, inst) {
$(this).val(month + "/" + year);
}
});
} );
</script>
<div class="panel-body">
<form name="patient_form" action="" method="post" class="form-horizontal" id="patient_form">
<?php $action = isset($_REQUEST['action'])?$_REQUEST['action']:'insert';?>
<input type="hidden" name="action" value="<?php echo $action;?>">
<input type="hidden" name="amb_req_id" value="<?php if(isset($_REQUEST['amb_req_id']))echo $_REQUEST['amb_req_id'];?>" />
<div class="form-group">
<label class="col-sm-2 control-label" for="ambulance_id"><?php _e('Ambulance','hospital_mgt');?><span class="require-field">*</span></label>
<div class="col-sm-8">
<select name="ambulance_id" class="form-control validate[required] " id="ambulance_id">
<option value=""><?php _e('select Ambulance','hospital_mgt');?></option>
<?php
if($edit)
$amb_id = $result->ambulance_id;
elseif(isset($_REQUEST['ambulance_id']))
$amb_id = $_REQUEST['ambulance_id'];
else
$amb_id = "";
$ambulance_data=$obj_ambulance->get_all_ambulance();
if(!empty($ambulance_data))
{
foreach ($ambulance_data as $retrieved_data)
{
echo '<option value = '.$retrieved_data->amb_id.' '.selected($amb_id,$retrieved_data->amb_id).'>'.$retrieved_data->ambulance_id.'</option>';
}
}
?>
</select>
</div>
</div>
<div class="form-group">
<label class="col-sm-2 control-label" for="patient_id"><?php _e('Patient','hospital_mgt');?></label>
<div class="col-sm-8">
<select name="patient_id" id="patient_id" class="form-control validate[required] ">
<option><?php _e('Select Patient','hospital_mgt');?></option>
<?php
if($edit)
$patient_id1 = $result->patient_id;
elseif(isset($_REQUEST['patient_id']))
$patient_id1 = $_REQUEST['patient_id'];
else
$patient_id1 = "";
$patients = hmgt_patientid_list();
//print_r($patient);
if(!empty($patients))
{
foreach($patients as $patient)
{
echo '<option value="'.$patient['id'].'" '.selected($patient_id1,$patient['id']).'>'.$patient['patient_id'].' - '.$patient['first_name'].' '.$patient['last_name'].'</option>';
}
}
?>
</select>
</div>
</div>
<div class="form-group">
<label class="col-sm-2 control-label" for="address"><?php _e('Address','hospital_mgt');?><span class="require-field">*</span></label>
<div class="col-sm-8">
<textarea name = "address" id="address" class="form-control validate[required]"><?php if($edit){ echo $result->address;}elseif(isset($_POST['address'])) echo $_POST['address'];?></textarea>
</div>
</div>
<div class="form-group">
<label class="col-sm-2 control-label" for="charges"><?php _e('Charges','hospital_mgt');?><span class="require-field">*</span></label>
<div class="col-sm-8">
<input id="charges" class="form-control validate[required]" type="text" value="<?php if($edit){ echo $result->charge;}elseif(isset($_POST['charge'])) echo $_POST['charge'];?>" name="charge">
</div>
</div>
<div class="form-group">
<label class="col-sm-2 control-label" for="request_date"><?php _e('Request Date','hospital_mgt');?><span class="require-field">*</span></label>
<div class="col-sm-8">
<input id="request_date" class="form-control validate[required]" type="text" value="<?php if($edit){ echo $result->request_date;}elseif(isset($_POST['request_date'])) echo $_POST['request_date'];?>" name="request_date">
</div>
</div>
<div class="form-group">
<label class="col-sm-2 control-label" for="request_time"><?php _e('Request Time','hospital_mgt');?></label>
<div class="col-sm-8">
<input id="request_time" class="form-control request_time" type="text" data-show-meridian="false" data-default-time="00:15" value="<?php if($edit){ echo $result->request_time;}elseif(isset($_POST['request_time'])) echo $_POST['request_time'];?>" name="request_time">
</div>
</div>
<div class="form-group">
<label class="col-sm-2 control-label" for="dispatch_time"><?php _e('Dispatch Time','hospital_mgt');?></label>
<div class="col-sm-8">
<input id="dispatch_time" class="form-control dispatch_time" data-show-meridian="false" data-minute-step="15"type="text" value="<?php if($edit){ echo $result->dispatch_time;}elseif(isset($_POST['dispatch_time'])) echo $_POST['dispatch_time'];?>" name="dispatch_time">
</div>
</div>
<div class="col-sm-offset-2 col-sm-8">
<input type="submit" value="<?php if($edit){ _e('Save Request','hospital_mgt'); }else{ _e('Add Ambulance Request','hospital_mgt');}?>" name="save_ambulance_request" class="btn btn-success"/>
</div>
</form>
</div>
</div>
<?php }?>
</div>
</div>
<?php ?>
|
ahmedalaahagag/HospitalManagementSystem
|
wp-content/plugins/hospital-management/template/ambulance.php
|
PHP
|
gpl-2.0
| 11,992
|
<?php
defined('_JEXEC') or die;
if(!defined('DS'))
{
define( 'DS', DIRECTORY_SEPARATOR );
}
?>
<?php
JLoader::import('joomla.filesystem.file');
define( 'YOURBASEPATH', dirname(__FILE__) );
require(YOURBASEPATH . DS . "includes/var.php");
?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="<?php echo $this->language; ?>" lang="<?php echo $this->language; ?>" dir="<?php echo $this->direction; ?>">
<head>
<meta name="viewport" content="width=device-width, initial-scale=1.0" />
<jdoc:include type="head" />
<?php
require(YOURBASEPATH . DS . "includes/var_js.php");
require(YOURBASEPATH . DS . "includes/ict_slide_load.php");
require(YOURBASEPATH . DS . "includes/css_style.php");
require(YOURBASEPATH . DS . "includes/css3_style.php");
?>
</head>
<body class="site <?php echo $option . " view-" . $view . " layout-" . $layout . " task-" . $task . " itemid-" . $itemid . " ";?> <?php if ($this->params->get('fluidContainer')) { echo "fluid"; } ?>">
<!-- Body -->
<div class="body">
<div class="container<?php if ($this->params->get('fluidContainer')) { echo "-fluid"; } ?>">
<!-- ///////////////// START extra CONTAINER INNER div ////////////////// -->
<!-- //////////////////////////////////////////////////////////////////// -->
<div id="container_inner">
<!-- ///////////////// START HEADER ////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<div class="header">
<div class="header-inner">
<a class="brand pull-left" href="<?php echo $this->baseurl; ?>">
<?php echo $logo;?> <?php if ($this->params->get('sitedescription'))
{ echo '<div class="site-description">'. htmlspecialchars($this->params->get('sitedescription')) .'</div>'; } ?>
</a>
<div class="header-search pull-right">
<jdoc:include type="modules" name="search" style="none" />
</div>
<div class="clearfix"></div>
</div>
</div>
<!-- ///////////////// END HEADER ////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<!-- ///////////////// START SOCIAL MEDIA ////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<?php
require(YOURBASEPATH . DS . "includes/socialmedia.php");
?>
<!-- ///////////////// END SOCIAL MEDIA ////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<!-- ///////////////////// START HOR MENU /////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<div id="hor_nav_outer">
<div id="hor_nav"><?php echo $top_hornav; ?></div>
</div>
<jdoc:include type="modules" name="banner" style="xhtml" />
<div class="row-fluid">
<!-- ///////////////////// START LEFT COLUMN ///////////// -->
<!-- //////////////////////////////////////////////////// -->
<?php if ($mod_left) { ?>
<div id="sidebar" class="span3">
<div class="sidebar-nav">
<jdoc:include type="modules" name="left" style="well" />
<jdoc:include type="modules" name="dropdown" style="style_hd" headerLevel="3" state="0" />
</div>
</div>
<?php } ?>
<!-- ///////////////////// END LEFT COLUMN ///////////// -->
<!-- //////////////////////////////////////////////////// -->
<!-- ///////////////// START CONTENT ////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<div id="content" class="<?php echo $span;?>">
<!-- ///////////////// START SLIDESHOW ////////////////// -->
<?php
if ($menu->getActive() == $menu->getDefault())
{
require(YOURBASEPATH . DS . "includes/ict_slideshow.php");
}
?>
<!-- ///////////////// END SLIDESHOW ///////////////////// -->
<jdoc:include type="message" />
<!--
<?php if ($this->countModules('breadcrumbs')) : ?>
<jdoc:include type="modules" name="breadcrumbs" style="none" />
<?php endif; ?>
-->
<!-- //////////////// START COMPONENT /////////////////// -->
<?php if ($ict_frontpage == "yes") { ?>
<jdoc:include type="component" />
<?php } ?>
<?php if ($this->countModules('tabs')): ?>
<jdoc:include type="modules" name="tabs" style="style_tb" headerLevel="2" id="3" />
<?php endif; ?>
<!-- //////////////// END COMPONENT /////////////////// -->
</div>
<!-- ///////////////// END CONTENT ////////////////// -->
<!-- //////////////////////////////////////////////////// -->
<!-- ///////////////////// START RIGHT COLUMN ///////////// -->
<!-- //////////////////////////////////////////////////// -->
<?php if ($mod_right || $mod_dropdown) { ?>
<div id="aside" class="span3">
<jdoc:include type="modules" name="right" style="well" />
</div>
<?php } ?>
</div>
<!-- ///////////////////// END RIGHT COLUMN ///////////// -->
<!-- //////////////////////////////////////////////////// -->
<?php if ($this->countModules('shownews')): ?>
<jdoc:include type="modules" name="shownews" style="well"/>
<?php endif; ?>
<!-- start footer 960px -->
<!-- ///////////////// START BOTTOM MODULES ////////////////// -->
<?php if ($this->countModules('bottom1 + bottom2 + bottom3 + bottom4')) { ?>
<div id="footer_inner" class="row">
<div class="<?php echo $footerspan;?>">
<jdoc:include type="modules" name="bottom1" style="well" />
</div>
<div class="<?php echo $footerspan;?>">
<jdoc:include type="modules" name="bottom2" style="well" />
</div>
<div class="<?php echo $footerspan;?>">
<jdoc:include type="modules" name="bottom3" style="well" />
</div>
<div class="<?php echo $footerspan;?>">
<jdoc:include type="modules" name="bottom4" style="well" />
</div>
</div>
<?php } ?>
<!-- END BOTTOM MODULES -->
<!-- end footer 960px -->
</div>
<!-- ///////////////// END extra CONTAINER INNER div //////////////////// -->
<!-- //////////////////////////////////////////////////////////////////// -->
</div>
</div>
<!-- ///////////////// START FOOTER ////////////////// -->
<!-- ///////////////////////////////////////////////// -->
<div class="footer">
<div class="container<?php if ($this->params->get('fluidContainer')) { echo "-fluid"; } ?>">
<!-- start footer 100% -->
<!-- end footer 100% -->
<hr />
<jdoc:include type="modules" name="footer" style="none" />
<p class="pull-right"><a href="#top" id="back-top"><?php echo JText::_('TPL_BACKTOTOP'); ?></a></p>
<p>© <?php echo $sitename; ?> <?php echo date('Y');?></p>
</div>
</div>
<!-- ///////////////// END FOOTTR////////////////// -->
<!-- ////////////////////////////////////////////// -->
<jdoc:include type="modules" name="debug" style="none" />
</body>
</html>
|
tomlagier/SLORTA
|
templates/at_newscenter/index.php
|
PHP
|
gpl-2.0
| 6,315
|
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<title>QuaZIP: Member List</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<link href="doxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<!-- Generated by Doxygen 1.7.4 -->
<div id="top">
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<td style="padding-left: 0.5em;">
<div id="projectname">QuaZIP <span id="projectnumber">quazip-0-4-4</span></div>
</td>
</tr>
</tbody>
</table>
</div>
<div id="navrow1" class="tabs">
<ul class="tablist">
<li><a href="index.html"><span>Main Page</span></a></li>
<li><a href="pages.html"><span>Related Pages</span></a></li>
<li class="current"><a href="annotated.html"><span>Classes</span></a></li>
<li><a href="files.html"><span>Files</span></a></li>
<li><a href="dirs.html"><span>Directories</span></a></li>
</ul>
</div>
<div id="navrow2" class="tabs2">
<ul class="tablist">
<li><a href="annotated.html"><span>Class List</span></a></li>
<li><a href="inherits.html"><span>Class Hierarchy</span></a></li>
<li><a href="functions.html"><span>Class Members</span></a></li>
</ul>
</div>
</div>
<div class="header">
<div class="headertitle">
<div class="title">JlCompress Member List</div> </div>
</div>
<div class="contents">
This is the complete list of members for <a class="el" href="classJlCompress.html">JlCompress</a>, including all inherited members.<table>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#a8708eafcadc5c192a1d492e784cfc98f">compressDir</a>(QString fileCompressed, QString dir=QString(), bool recursive=true)</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#a4a4de9c62ecf161bb658d4d80495ea97">compressFile</a>(QString fileCompressed, QString file)</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#a9cdb92d29a94c6b13a718a3249685846">compressFiles</a>(QString fileCompressed, QStringList files)</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#a365a153baa4c11812d93cbca60b6a293">extractDir</a>(QString fileCompressed, QString dir=QString())</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#a38c0d58bfe3bbbcb3cf4e98d126633a3">extractFile</a>(QString fileCompressed, QString fileName, QString fileDest=QString())</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#a309e9ee366719a4d8aa28f837fab73ae">extractFiles</a>(QString fileCompressed, QStringList files, QString dir=QString())</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
<tr class="memlist"><td><a class="el" href="classJlCompress.html#ab42422be913f817d7e04c1b1cd5d0156">getFileList</a>(QString fileCompressed)</td><td><a class="el" href="classJlCompress.html">JlCompress</a></td><td><code> [static]</code></td></tr>
</table></div>
<hr class="footer"/><address class="footer"><small>Generated on Sat Jan 14 2012 14:54:06 for QuaZIP by 
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.7.4 </small></address>
</body>
</html>
|
Kurios/mudlet-dev
|
src/lib/quazip-0.4.4/doc/html/classJlCompress-members.html
|
HTML
|
gpl-2.0
| 3,984
|
<?php
/**
* cc.elearning.dev.com (elearning-Seite zum Erstellen/Bearbeiten von Kursanträgen)
* Indexdokument zum Entgegennehmen aller Requests
* @author Robert Leonhardt
* @date 2014/08/12
* @version 0.2
*/
## Session starten
$Session = new Session;
## Moodle API starten
$MoodleAPI = new MoodleAPI;
## Für die Verarbeitung nutzbare Requestkonstanten definieren
define( 'REQUEST_DO', Request::DATA( 'do', false ) );
define( 'REQUEST_VIEW', Request::DATA( 'view', 'main' ) );
## Controller laden, ?do=bla&view=blubb übergeben, Validierung übernimmt Klasse
require_once( new Controller( REQUEST_DO, REQUEST_VIEW ) );
?>
|
relecand/moodle-course-order
|
index.php
|
PHP
|
gpl-2.0
| 668
|
\documentclass[cjk,dvipdfmx,12pt,%
hyperref={bookmarks=true,bookmarksnumbered=true,bookmarksopen=false,%
colorlinks=false,%
pdftitle={第 57 回 関西 Debian 勉強会},%
pdfauthor={倉敷・のがた・河田・佐々木},%
%pdfinstitute={関西 Debian 勉強会},%
pdfsubject={資料},%
}]{beamer}
\title{第 57 回 関西 Debian 勉強会}
\subtitle{{\scriptsize 資料}}
\author[佐々木 洋平]{{\large\bf 倉敷・のがた・河田・佐々木}}
\institute[Debian JP]{{\normalsize\tt 関西 Debian 勉強会}}
\date{{\small 2012 年 3 月 25 日}}
%\usepackage{amsmath}
%\usepackage{amssymb}
\usepackage{graphicx}
\usepackage{moreverb}
\usepackage[varg]{txfonts}
\AtBeginDvi{\special{pdf:tounicode EUC-UCS2}}
\usetheme{Kyoto}
\def\museincludegraphics{%
\begingroup
\catcode`\|=0
\catcode`\\=12
\catcode`\#=12
\includegraphics[width=0.9\textwidth]}
%\renewcommand{\familydefault}{\sfdefault}
%\renewcommand{\kanjifamilydefault}{\sfdefault}
\begin{document}
\settitleslide
\begin{frame}
\titlepage
\end{frame}
\setdefaultslide
\begin{frame}[fragile]
\frametitle{Agenda}
\tableofcontents
\end{frame}
\section{最近の Debian 関係のイベント}
\takahashi[40]{最近の Debian\\関係のイベント}
\begin{frame}[fragile]
\frametitle{第 56 回関西 Debian 勉強会}
\begin{itemize}
\item 日時: 2 月 26 日
\item 於: 大阪福島区民センター
\end{itemize}
\begin{block}{内容}
\begin{itemize}
\item Autofs と pam\_chroot で作るマルチユーザ環境
\item 月刊(?) t-code
\item 月刊 Debian Policy
\end{itemize}
\end{block}
ネタ出しは随時行なっております! 皆様よろしく!!
\end{frame}
\begin{frame}[fragile]
\frametitle{第 86 回 東京エリア Debian 勉強会}
\begin{itemize}
\item 日時: 3月 17 日
\item 於: OSC 2012 Tokyo Spring
\end{itemize}
\begin{block}{内容}
\begin{itemize}
\item ブース展示
\item セッション: 「Apache2/HTTP サーバから始めるDebian」
\begin{itemize}
\item 『Webサーバとして世界で一番採用されているLinuxディストリビュー
ションであるDebianのApache2について「ユーザ視点」から語る勉強会』
\end{itemize}
\end{itemize}
\end{block}
\end{frame}
\takahashi[50]{そんな\\こんなで}
\takahashi[120]{次}
\section{事前課題発表}
\takahashi[50]{事前課題}
\begin{frame}[fragile]
\frametitle{事前課題}
\begin{block}{今回の事前課題}
\begin{description}
\item[事前課題1] Debian Policy の第5章を読んできて下さい。
\item[事前課題2] 何か一つパッケージを取得して
debian/control ファイルを読んできてください。
そのパッケージの control ファイルの内容について簡単に、
わからない箇所は調べて、当日紹介して下さい。
\item[事前課題3] 勉強会開催をどのような媒体、方法で告知を出してもらいた
いですか。
\end{description}
\end{block}
\end{frame}
\takahashi[50]{事前課題\\発表}
\begin{frame}[fragile]
\frametitle{ 榎真治 }
\begin{center}
(無回答)
\end{center}
\end{frame}
\begin{frame}[fragile]
\frametitle{ 山下康成 }
\begin{center}
げほげほっ!
\end{center}
\end{frame}
\begin{frame}[fragile]
\frametitle{ 川江 }
\begin{enumerate}
\item 読んできます
\item 努力します
\item 今のままでいいと思いますが、個人的な事情として「多忙」な人が気軽
に『参加』できるような「方法」があればありがたいです。
\end{enumerate}
\end{frame}
\begin{frame}[fragile]
\frametitle{ かわだてつたろう }
\begin{enumerate}
\item 読んでおきます。
\item cpp の control を読みました。
\begin{itemize}
\item sid:amd64 で "Multi-Arch: allowd" となっているのはこのパッケージだけのようです。どのような場合に指定するとよいのか今一つわかっていません。
\end{itemize}
\item Debian JP のサイト。。。
\end{enumerate}
\end{frame}
\begin{frame}[fragile]
\frametitle{ kozo2 }
\begin{enumerate}
\item 読みます
\item t-codeの読みます
\item 媒体,方法 twitter, ML 位しか思い浮かばんとです
\end{enumerate}
\end{frame}
\begin{frame}[fragile]
\frametitle{ 山田 洋平 }
\begin{enumerate}
\item 当日までに読んで来ます。
\item 当日までに読んで来ます。
\item カレンダーのイベント
\end{enumerate}
\end{frame}
\begin{frame}[fragile]
\frametitle{ 酒井 忠紀 }
\begin{enumerate}
\item ざっくり読みました
\item ruby1.8 の debian/controlファイルをざっくり読みました。
今回、発表させて頂く Konoha に関連することですが、
debian/controlファイルに関する疑問点を以下に記述します。(次のスライドへ)
\item 現状のままでもよいと思いますが、ATNDなどを使用すると、より多くの人の目に留まるかもしれません。
\end{enumerate}
\end{frame}
\begin{frame}[fragile]
\frametitle{ 酒井 忠紀 }
\begin{description}
\item[Maintainer]
emailアドレスの名前にピリオドが含まれていると問題が
あるようですが、以下以外であれば、問題ない認識で
よいでしょうか?
\begin{itemize}
\item 連続してピリオドを使用している
\item '@' の直前にピリオドを使用している
\end{itemize}
\item[Priority]
新規パッケージで他と競合しないものは、optional で
よい認識で問題ないでしょうか?
\item[Architecture]
ここで指定するアーキテクチャが、実際に自動ビルドされて
Debianアーカイブに置かれるアーキテクチャになる認識でよいで
しょうか?
\end{description}
\end{frame}
\begin{frame}[fragile]
\frametitle{ 山城の国の住人 久保博 }
\begin{itemize}
\item はい、読みます
\item 当日までに何とか。
\item 発表に関係の深い単語のハッシュタグをつけて twitter で
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{ よしだともひろ }
\begin{itemize}
\item Debian JP Projectの日本語訳に一通り目を通しました。
\item e2wmのdebian/controlを見ました
\item Debian JPのメーリングリストでよいと思います。
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{ のがたじゅん }
OSC愛媛から日曜の朝、鈍行で帰るのでたどり着けませーん。
\begin{itemize}
\item 前はmixiにも告知を流してたけど、今ならぐぐたすかfacebookか…といっても、あまりフィットする感じではないのでtwitterぐらいなのかなぁと思ったり。
\end{itemize}
\end{frame}
\begin{frame}[fragile]
\frametitle{ yyatsuo }
3. 今のままで特に不都合無いです
\end{frame}
\begin{frame}[fragile]
\frametitle{ 佐々木洋平 }
\begin{itemize}
\item 了解しました。
\item {\scriptsize{\tt{git.debian.org:/git/pkg-ruby-extras/pkg-ruby-extras.git}}}
以下にある control はひととおり目を通しています。
\item twitter の通知の自動化はともかくとして、他になにか無いかなぁ.
(人力ではなくて、適当に自動化できることが前提だけれど). 告知と、締切前の再告知とかできると良いのだけれど.
新入生獲得(?)に繋がりそうな他のアイデアないかねぇ.
\end{itemize}
\end{frame}
\takahashi[50]{そんな\\こんなで}
\takahashi[120]{次}
\section{Konoha の Debianパッケージ化について}
\takahashi[25]{Konoha の Debianパッケージ化について\\by\\酒井 忠紀}
\takahashi[50]{そんな\\こんなで}
\takahashi[120]{次}
\section{月刊 t-code パッケージ修正}
\takahashi[25]{月刊 t-code パッケージ修正 \\ by \\西田 孝三}
\section{月刊 Debian Policy 第2回 「Controlファイルについて 」}
\takahashi[25]{月刊 Debian Policy 第2回 「Controlファイルについて 」 \\by\\ 八津尾 雄介}
\section{新年度からのネタ/スケジュールに関して}
\takahashi[25]{新年度からのネタ/スケジュールに関して \\by\\司会: 佐々木洋平}
\takahashi[25]{$<$閑話休題$>$}
\takahashi[50]{pxdvi のパッケージができましたYo!!}
\begin{frame}[fragile]
\frametitle{pxdvi: build}
unstable 使いは是非テストして下さい
\begin{commandline}
$ aptitutte install git-buildpackage
$ gbp-clone http://dennou-k.gfd-dennou.org/member/uwabami/tmp/pxdvi.git
$ cd pxdvi
$ git-buildpackage
\end{commandline}
\end{frame}
\takahashi[25]{$<$/閑話休題$>$}
\begin{frame}[fragile]
\frametitle{ネタ/スケジュール/告知}
\begin{itemize}
\item ネタ
\begin{itemize}
\item 配布資料参照
\end{itemize}
\item スケジュールと告知
\begin{itemize}
\item ML: 一週間前, 締切直前.
\begin{itemize}
\item そもそも締切って? 印刷の都合だけ?
\item OSC-Kansai や Ubuntu-JP はどうだろう?
\end{itemize}
\item Web告知, twitter. ついでに IT 勉強会は?
\end{itemize}
\end{itemize}
\end{frame}
\takahashi[50]{そんな\\こんなで}
\takahashi[120]{次}
\begin{frame}[fragile]
\frametitle{今後の予定}
\begin{block}{第 58 回関西 Debian 勉強会}
\begin{itemize}
\item 日時: 4 月 22 日
\item 会場: 福島区民センター
\item 内容: 月刊Debian Policy, 月刊t-code(?), ライセンスと著作権(?)
\end{itemize}
\end{block}
\end{frame}
\takahashi[50]{ }
\end{document}
%%% Local Variables:
%%% mode: japanese-latex
%%% TeX-master: t
%%% End:
|
dancerj/monthly-report
|
2012/debianmeetingresume201203-kansai-presentation.tex
|
TeX
|
gpl-2.0
| 9,803
|
# server-based syntax
# ======================
# Defines a single server with a list of roles and multiple properties.
# You can define all roles on a single server, or split them:
server 'louisecrow.com', user: 'deploy', roles: %w{app db web}
# server 'example.com', user: 'deploy', roles: %w{app web}, other_property: :other_value
# server 'db.example.com', user: 'deploy', roles: %w{db}
# role-based syntax
# ==================
# Defines a role with one or multiple servers. The primary server in each
# group is considered to be the first unless any hosts have the primary
# property set. Specify the username and a domain or IP for the server.
# Don't use `:all`, it's a meta role.
# role :app, %w{deploy@example.com}, my_property: :my_value
# role :web, %w{user1@primary.com user2@additional.com}, other_property: :other_value
# role :db, %w{deploy@example.com}
# Configuration
# =============
# You can set any configuration variable like in config/deploy.rb
# These variables are then only loaded and set in this stage.
# For available Capistrano configuration variables see the documentation page.
# http://capistranorb.com/documentation/getting-started/configuration/
# Feel free to add new variables to customise your setup.
# Custom SSH Options
# ==================
# You may pass any option but keep in mind that net/ssh understands a
# limited set of options, consult the Net::SSH documentation.
# http://net-ssh.github.io/net-ssh/classes/Net/SSH.html#method-c-start
#
# Global options
# --------------
# set :ssh_options, {
# keys: %w(/home/rlisowski/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(password)
# }
#
# The server-based syntax can be used to override options:
# ------------------------------------
# server 'example.com',
# user: 'user_name',
# roles: %w{web app},
# ssh_options: {
# user: 'user_name', # overrides user setting above
# keys: %w(/home/user_name/.ssh/id_rsa),
# forward_agent: false,
# auth_methods: %w(publickey password)
# # password: 'please use keys'
# }
|
crowbot/planningalerts-app
|
config/deploy/production.rb
|
Ruby
|
gpl-2.0
| 2,066
|
#define IDL4_INTERNAL
#include <idl4/test.h>
#define dprintf(a...)
void pager_thread(void)
{
int r;
l4_threadid_t src;
l4_umword_t dw0, dw1;
l4_msgdope_t result;
unsigned int dummyint;
l4_threadid_t mypagerid, dummyid, myid;
dprintf("Pager starting up (%X.%d)\n", l4_myself().id.task, l4_myself().id.lthread);
// *** find out the threadID of the boss pager (this would be sigma0)
myid = l4_myself();
dummyid = mypagerid = L4_INVALID_ID;
l4_thread_ex_regs(myid,
0xffffffff, /* invalid eip */
0xffffffff, /* invalid esp */
&dummyid, &mypagerid,
&dummyint, &dummyint, &dummyint);
// *** wait for the first pagefault IPC to occur
while (1)
{
r = l4_i386_ipc_wait(&src, (void*)L4_IPC_OPEN_IPC, &dw0, &dw1,
L4_IPC_NEVER, &result);
while (1)
{
if (r != 0)
panic("error on pagefault IPC");
dprintf("Handling pagefault from %X.%X, dw0=%xh\n", src.lh.high, src.lh.low, dw0);
l4_i386_ipc_call(mypagerid,L4_IPC_SHORT_MSG,dw0,dw1,(void*)((L4_WHOLE_ADDRESS_SPACE<<2) + (int)L4_IPC_SHORT_FPAGE),
&dw0,&dw1,L4_IPC_NEVER,&result);
dprintf("Sigma0 replies: %xh, %xh (dope %xh)\n", dw0, dw1, result.msgdope);
// *** apply the mapping and wait for next fault
r = l4_i386_ipc_reply_and_wait(src, L4_IPC_SHORT_FPAGE, dw0, dw1,
&src, (void*)L4_IPC_OPEN_IPC, &dw0, &dw1,
L4_IPC_NEVER, &result);
}
}
}
|
l4ka/idl4
|
src/test/api/v2/pager.c
|
C
|
gpl-2.0
| 1,629
|
#ifndef VECTOR_H
#define VECTOR_H
typedef struct Vector Vector;
// Allocates Vector |v| with bytes per element |bs|.
int VecNew(Vector **v, size_t bs);
// Adds element to end of Vector |v|.
int VecPush(Vector *v, void *data);
// Removes last element in Vector |v|.
int VecPop(Vector *v);
// Overwrites element at |index| with |data| in Vector |v|.
int VecReplace(Vector *v, size_t index, void *data);
// Shifts right elements then overwrites element at |index| with |data| in
// Vector |v|.
int VecInsert(Vector *v, size_t index, void *data);
// Removes element at |index| in Vector |v|.
int VecRemove(Vector *v, size_t index);
// Copies |get| into Vector |v| at |index|.
int VecGet(Vector *v, size_t index, void *get);
// Removes all elements in Vector |v|.
int VecClear(Vector *v);
// Clears and frees Vector |v|.
int VecDelete(Vector **v);
// Returns the number of elements of Vector |v|.
int VecSize(Vector *v);
// Returns the element size of Vector |v|.
int VecElemSize(Vector *v);
// Returns the memory capacity of Vector |v|.
int VecCapacity(Vector *v);
// Returns an iterator to the first element of Vector |v|.
void *VecBegin(Vector *v);
// Returns an iterator to the last element of Vector |v|.
void *VecEnd(Vector *v);
#endif
|
jneg/Vector
|
Vector.h
|
C
|
gpl-2.0
| 1,253
|
<?php
if(IN_MANAGER_MODE!="true") die("<b>INCLUDE_ORDERING_ERROR</b><br /><br />Please use the MODX Content Manager instead of accessing this file directly.");
if(!$modx->hasPermission('exec_module')) {
$modx->webAlertAndQuit($_lang["error_no_privileges"]);
}
$id = isset($_GET['id'])? intval($_GET['id']) : 0;
if($id==0) {
$modx->webAlertAndQuit($_lang["error_no_id"]);
}
// check if user has access permission, except admins
if($_SESSION['mgrRole']!=1){
$rs = $modx->db->select(
'sma.usergroup,mg.member',
$modx->getFullTableName("site_module_access")." sma
LEFT JOIN ".$modx->getFullTableName("member_groups")." mg ON mg.user_group = sma.usergroup AND member='".$modx->getLoginUserID()."'",
"sma.module = '{$id}'"
);
//initialize permission to -1, if it stays -1 no permissions
//attached so permission granted
$permissionAccessInt = -1;
while ($row = $modx->db->getRow($rs)) {
if($row["usergroup"] && $row["member"]) {
//if there are permissions and this member has permission, ofcourse
//this is granted
$permissionAccessInt = 1;
} elseif ($permissionAccessInt==-1) {
//if there are permissions but this member has no permission and the
//variable was still in init state we set permission to 0; no permissions
$permissionAccessInt = 0;
}
}
if($permissionAccessInt==0) {
$modx->webAlertAndQuit("You do not sufficient privileges to execute this module.", "index.php?a=106");
}
}
// get module data
$rs = $modx->db->select('*', $modx->getFullTableName("site_modules"), "id='{$id}'");
$content = $modx->db->getRow($rs);
if(!$content) {
$modx->webAlertAndQuit("No record found for id {$id}.", "index.php?a=106");
}
if($content['disabled']) {
$modx->webAlertAndQuit("This module is disabled and cannot be executed.", "index.php?a=106");
}
// Set the item name for logger
$_SESSION['itemname'] = $content['name'];
// load module configuration
$parameter = array();
if(!empty($content["properties"])){
$tmpParams = explode("&",$content["properties"]);
for($x=0; $x<count($tmpParams); $x++) {
$pTmp = explode("=", $tmpParams[$x]);
$pvTmp = explode(";", trim($pTmp[1]));
if ($pvTmp[1]=='list' && $pvTmp[3]!="") $parameter[$pTmp[0]] = $pvTmp[3]; //list default
else if($pvTmp[1]!='list' && $pvTmp[2]!="") $parameter[$pTmp[0]] = $pvTmp[2];
}
}
// Set the item name for logger
$_SESSION['itemname'] = $content['name'];
$output = evalModule($content["modulecode"],$parameter);
echo $output;
include MODX_MANAGER_PATH."includes/sysalert.display.inc.php";
// evalModule
function evalModule($moduleCode,$params){
global $modx;
$etomite = &$modx;
$modx->event->params = &$params; // store params inside event object
if(is_array($params)) {
extract($params, EXTR_SKIP);
}
ob_start();
$mod = eval($moduleCode);
$msg = ob_get_contents();
ob_end_clean();
if (isset($php_errormsg))
{
$error_info = error_get_last();
switch($error_info['type'])
{
case E_NOTICE :
$error_level = 1;
case E_USER_NOTICE :
break;
case E_DEPRECATED :
case E_USER_DEPRECATED :
case E_STRICT :
$error_level = 2;
break;
default:
$error_level = 99;
}
if($modx->config['error_reporting']==='99' || 2<$error_level)
{
$modx->messageQuit('PHP Parse Error', '', true, $error_info['type'], $error_info['file'], $_SESSION['itemname'] . ' - Module', $error_info['message'], $error_info['line'], $msg);
$modx->event->alert("An error occurred while loading. Please see the event log for more information<p>{$msg}</p>");
}
}
unset($modx->event->params);
return $mod.$msg;
}
|
fortunto2/automodx
|
modx.evo.custom-master/manager/processors/execute_module.processor.php
|
PHP
|
gpl-2.0
| 3,648
|
/* asn_public.h
*
* Copyright (C) 2006-2016 wolfSSL Inc.
*
* This file is part of wolfSSL.
*
* wolfSSL is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* wolfSSL is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1335, USA
*/
#ifndef WOLF_CRYPT_ASN_PUBLIC_H
#define WOLF_CRYPT_ASN_PUBLIC_H
#include <wolfssl/wolfcrypt/types.h>
#ifdef HAVE_ECC
#include <wolfssl/wolfcrypt/ecc.h>
#endif
#if defined(WOLFSSL_CERT_GEN) && !defined(NO_RSA)
#include <wolfssl/wolfcrypt/rsa.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
/* Certificate file Type */
enum CertType {
CERT_TYPE = 0,
PRIVATEKEY_TYPE,
DH_PARAM_TYPE,
DSA_PARAM_TYPE,
CRL_TYPE,
CA_TYPE,
ECC_PRIVATEKEY_TYPE,
DSA_PRIVATEKEY_TYPE,
CERTREQ_TYPE,
DSA_TYPE,
ECC_TYPE,
RSA_TYPE,
PUBLICKEY_TYPE,
RSA_PUBLICKEY_TYPE,
ECC_PUBLICKEY_TYPE,
TRUSTED_PEER_TYPE
};
/* Signature type, by OID sum */
enum Ctc_SigType {
CTC_SHAwDSA = 517,
CTC_MD2wRSA = 646,
CTC_MD5wRSA = 648,
CTC_SHAwRSA = 649,
CTC_SHAwECDSA = 520,
CTC_SHA224wRSA = 658,
CTC_SHA224wECDSA = 527,
CTC_SHA256wRSA = 655,
CTC_SHA256wECDSA = 524,
CTC_SHA384wRSA = 656,
CTC_SHA384wECDSA = 525,
CTC_SHA512wRSA = 657,
CTC_SHA512wECDSA = 526
};
enum Ctc_Encoding {
CTC_UTF8 = 0x0c, /* utf8 */
CTC_PRINTABLE = 0x13 /* printable */
};
enum Ctc_Misc {
CTC_COUNTRY_SIZE = 2,
CTC_NAME_SIZE = 64,
CTC_DATE_SIZE = 32,
CTC_MAX_ALT_SIZE = 16384, /* may be huge */
CTC_SERIAL_SIZE = 8,
#ifdef WOLFSSL_CERT_EXT
/* AKID could contains: hash + (Option) AuthCertIssuer,AuthCertSerialNum
* We support only hash */
CTC_MAX_SKID_SIZE = 32, /* SHA256_DIGEST_SIZE */
CTC_MAX_AKID_SIZE = 32, /* SHA256_DIGEST_SIZE */
CTC_MAX_CERTPOL_SZ = 64,
CTC_MAX_CERTPOL_NB = 2 /* Max number of Certificate Policy */
#endif /* WOLFSSL_CERT_EXT */
};
#ifdef WOLFSSL_CERT_GEN
#ifndef HAVE_ECC
typedef struct ecc_key ecc_key;
#endif
#ifdef NO_RSA
typedef struct RsaKey RsaKey;
#endif
typedef struct CertName {
char country[CTC_NAME_SIZE];
char countryEnc;
char state[CTC_NAME_SIZE];
char stateEnc;
char locality[CTC_NAME_SIZE];
char localityEnc;
char sur[CTC_NAME_SIZE];
char surEnc;
char org[CTC_NAME_SIZE];
char orgEnc;
char unit[CTC_NAME_SIZE];
char unitEnc;
char commonName[CTC_NAME_SIZE];
char commonNameEnc;
char email[CTC_NAME_SIZE]; /* !!!! email has to be last !!!! */
} CertName;
/* for user to fill for certificate generation */
typedef struct Cert {
int version; /* x509 version */
byte serial[CTC_SERIAL_SIZE]; /* serial number */
int sigType; /* signature algo type */
CertName issuer; /* issuer info */
int daysValid; /* validity days */
int selfSigned; /* self signed flag */
CertName subject; /* subject info */
int isCA; /* is this going to be a CA */
/* internal use only */
int bodySz; /* pre sign total size */
int keyType; /* public key type of subject */
#ifdef WOLFSSL_ALT_NAMES
byte altNames[CTC_MAX_ALT_SIZE]; /* altNames copy */
int altNamesSz; /* altNames size in bytes */
byte beforeDate[CTC_DATE_SIZE]; /* before date copy */
int beforeDateSz; /* size of copy */
byte afterDate[CTC_DATE_SIZE]; /* after date copy */
int afterDateSz; /* size of copy */
#endif
#ifdef WOLFSSL_CERT_EXT
byte skid[CTC_MAX_SKID_SIZE]; /* Subject Key Identifier */
int skidSz; /* SKID size in bytes */
byte akid[CTC_MAX_AKID_SIZE]; /* Authority Key Identifier */
int akidSz; /* AKID size in bytes */
word16 keyUsage; /* Key Usage */
char certPolicies[CTC_MAX_CERTPOL_NB][CTC_MAX_CERTPOL_SZ];
word16 certPoliciesNb; /* Number of Cert Policy */
#endif
#ifdef WOLFSSL_CERT_REQ
char challengePw[CTC_NAME_SIZE];
#endif
void* heap; /* heap hint */
} Cert;
/* Initialize and Set Certificate defaults:
version = 3 (0x2)
serial = 0 (Will be randomly generated)
sigType = SHA_WITH_RSA
issuer = blank
daysValid = 500
selfSigned = 1 (true) use subject as issuer
subject = blank
isCA = 0 (false)
keyType = RSA_KEY (default)
*/
WOLFSSL_API void wc_InitCert(Cert*);
WOLFSSL_API int wc_MakeCert(Cert*, byte* derBuffer, word32 derSz, RsaKey*,
ecc_key*, WC_RNG*);
#ifdef WOLFSSL_CERT_REQ
WOLFSSL_API int wc_MakeCertReq(Cert*, byte* derBuffer, word32 derSz,
RsaKey*, ecc_key*);
#endif
WOLFSSL_API int wc_SignCert(int requestSz, int sigType, byte* derBuffer,
word32 derSz, RsaKey*, ecc_key*, WC_RNG*);
WOLFSSL_API int wc_MakeSelfCert(Cert*, byte* derBuffer, word32 derSz, RsaKey*,
WC_RNG*);
WOLFSSL_API int wc_SetIssuer(Cert*, const char*);
WOLFSSL_API int wc_SetSubject(Cert*, const char*);
#ifdef WOLFSSL_ALT_NAMES
WOLFSSL_API int wc_SetAltNames(Cert*, const char*);
#endif
WOLFSSL_API int wc_SetIssuerBuffer(Cert*, const byte*, int);
WOLFSSL_API int wc_SetSubjectBuffer(Cert*, const byte*, int);
WOLFSSL_API int wc_SetAltNamesBuffer(Cert*, const byte*, int);
WOLFSSL_API int wc_SetDatesBuffer(Cert*, const byte*, int);
#ifdef WOLFSSL_CERT_EXT
WOLFSSL_API int wc_SetAuthKeyIdFromPublicKey(Cert *cert, RsaKey *rsakey,
ecc_key *eckey);
WOLFSSL_API int wc_SetAuthKeyIdFromCert(Cert *cert, const byte *der, int derSz);
WOLFSSL_API int wc_SetAuthKeyId(Cert *cert, const char* file);
WOLFSSL_API int wc_SetSubjectKeyIdFromPublicKey(Cert *cert, RsaKey *rsakey,
ecc_key *eckey);
WOLFSSL_API int wc_SetSubjectKeyId(Cert *cert, const char* file);
#ifdef HAVE_NTRU
WOLFSSL_API int wc_SetSubjectKeyIdFromNtruPublicKey(Cert *cert, byte *ntruKey,
word16 ntruKeySz);
#endif
/* Set the KeyUsage.
* Value is a string separated tokens with ','. Accepted tokens are :
* digitalSignature,nonRepudiation,contentCommitment,keyCertSign,cRLSign,
* dataEncipherment,keyAgreement,keyEncipherment,encipherOnly and decipherOnly.
*
* nonRepudiation and contentCommitment are for the same usage.
*/
WOLFSSL_API int wc_SetKeyUsage(Cert *cert, const char *value);
#endif /* WOLFSSL_CERT_EXT */
#ifdef HAVE_NTRU
WOLFSSL_API int wc_MakeNtruCert(Cert*, byte* derBuffer, word32 derSz,
const byte* ntruKey, word16 keySz,
WC_RNG*);
#endif
#endif /* WOLFSSL_CERT_GEN */
#if defined(WOLFSSL_CERT_EXT) || defined(WOLFSSL_PUB_PEM_TO_DER)
#ifndef WOLFSSL_PEMPUBKEY_TODER_DEFINED
#ifndef NO_FILESYSTEM
/* forward from wolfssl */
WOLFSSL_API int wolfSSL_PemPubKeyToDer(const char* fileName,
unsigned char* derBuf, int derSz);
#endif
/* forward from wolfssl */
WOLFSSL_API int wolfSSL_PubKeyPemToDer(const unsigned char*, int,
unsigned char*, int);
#define WOLFSSL_PEMPUBKEY_TODER_DEFINED
#endif /* WOLFSSL_PEMPUBKEY_TODER_DEFINED */
#endif /* WOLFSSL_CERT_EXT || WOLFSSL_PUB_PEM_TO_DER */
#if defined(WOLFSSL_KEY_GEN) || defined(WOLFSSL_CERT_GEN) || !defined(NO_DSA)
WOLFSSL_API int wc_DerToPem(const byte* der, word32 derSz, byte* output,
word32 outputSz, int type);
WOLFSSL_API int wc_DerToPemEx(const byte* der, word32 derSz, byte* output,
word32 outputSz, byte *cipherIno, int type);
#endif
#ifdef HAVE_ECC
/* private key helpers */
WOLFSSL_API int wc_EccPrivateKeyDecode(const byte*, word32*,
ecc_key*, word32);
WOLFSSL_API int wc_EccKeyToDer(ecc_key*, byte* output, word32 inLen);
/* public key helper */
WOLFSSL_API int wc_EccPublicKeyDecode(const byte*, word32*,
ecc_key*, word32);
#if (defined(WOLFSSL_CERT_GEN) || defined(WOLFSSL_KEY_GEN))
WOLFSSL_API int wc_EccPublicKeyToDer(ecc_key*, byte* output,
word32 inLen, int with_AlgCurve);
#endif
#endif
/* DER encode signature */
WOLFSSL_API word32 wc_EncodeSignature(byte* out, const byte* digest,
word32 digSz, int hashOID);
WOLFSSL_API int wc_GetCTC_HashOID(int type);
/* Time */
/* Returns seconds (Epoch/UTC)
* timePtr: is "time_t", which is typically "long"
* Example:
long lTime;
rc = wc_GetTime(&lTime, (word32)sizeof(lTime));
*/
WOLFSSL_API int wc_GetTime(void* timePtr, word32 timeSize);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* WOLF_CRYPT_ASN_PUBLIC_H */
|
jay/wolfssl
|
wolfssl/wolfcrypt/asn_public.h
|
C
|
gpl-2.0
| 9,857
|
/*global angular*/
(function(angular, undefined) {
"use strict";
function CardsDeckCtrl() {
var ctrl = this;
}
CardsDeckCtrl.$inject = [];
angular
.module("cards.controllers.components")
.controller("CardsDeckCtrl", CardsDeckCtrl);
})(angular);
|
Temoto-kun/cards-against-html
|
app/js/angular/controllers/components/CardsDeckCtrl.js
|
JavaScript
|
gpl-2.0
| 295
|
#ifndef __GC0307_H__
#define __GC0307_H__
static const struct regval_list gc0307_init_regs[] = {
// Initail Sequence Write In.
//========= close output
{0xf0,0x00},
{0x43,0x00},
{0x44,0xe2},
// {0x44,0xe6},
//========= close some functions
// open them after configure their parmameters
// {0x40,0x10},
// {0x41,0x00},//08
// {0x42,0x10},
// {0x47,0x00},//mode1,
{0x40,0x7e},
{0x41,0x2f},
{0x43,0x40},
// {0x44,0xE2},
// {0x44,0xE6},
{0x45, 0x26},
{0x47, 0x28},
{0x48,0xc7},//mode2, 0xc1
{0x49,0x00},//dither_mode
{0x4a,0x00},//clock_gating_en
{0x4b,0x00},//mode_reg3
{0x4E,0x23},//sync mode 23 20120410
{0x4F,0x01},//AWB, AEC, every N frame 01
//========= frame timing
{0x1C,0x00},//Vs_st
{0x1D,0x00},//Vs_et
{0x11,0x05},//row_tail, AD_pipe_number
{0x03,0x01},//row_start
{0x04,0x2c},
//========= windowing
{0x05,0x00},//row_start
{0x06,0x00},
{0x07,0x00},//col start
{0x08,0x00},
{0x09,0x01},//win height
{0x0A,0xea},//ea 0xe8 james
{0x0B,0x02},//win width, pixel array only 640
{0x0C,0x80},//0x80
//========= analog
{0x0D,0x22},//rsh_width
{0x0E,0x02},//CISCTL mode2,
{0x0F,0xa2},//CISCTL mode1
{0x12,0x70},//7 hrst, 6_4 darsg,
{0x13,0x00},//7 CISCTL_restart, 0 apwd
{0x14,0x00},//NA
{0x15,0xba},//7_4 vref
{0x16,0x13},//5to4 _coln_r, __1to0__da18
{0x17,0x52},//opa_r, ref_r, sRef_r
// {0x18,0xc0},//analog_mode, best case for left band.
{0x18,0x00},
{0x1E,0x0d},//tsp_width
{0x1F,0x32},//sh_delay
//========= offset
//{0x47,0x00}, //7__test_image, __6__fixed_pga, //__5__auto_DN,__4__CbCr_fix,
//__3to2__dark_sequence, __1__allow_pclk_vcync, __0__LSC_test_image
{0x19,0x06}, //pga_o
{0x1a,0x06}, //pga_e
{0x31,0x00}, //4 //pga_oFFset , high 8bits of 11bits
{0x3B,0x00}, //global_oFFset, low 8bits of 11bits
{0x59,0x0f}, //offset_mode
{0x58,0x88}, //DARK_VALUE_RATIO_G, DARK_VALUE_RATIO_RB
{0x57,0x08}, //DARK_CURRENT_RATE
{0x56,0x77}, //PGA_OFFSET_EVEN_RATIO, PGA_OFFSET_ODD_RATIO
//========= blk
{0x35,0xd8}, //blk_mode
{0x36,0x40},
{0x3C,0x00},
{0x3D,0x00},
{0x3E,0x00},
{0x3F,0x00},
{0xb5,0x70},
{0xb6,0x40},
{0xb7,0x00},
{0xb8,0x38},
{0xb9,0xc3},
{0xba,0x0f},
{0x7e,0x45},
{0x7f,0x66},
{0x5c,0x38},//78 48
{0x5d,0x48},//88 58
//========= manual_gain
{0x61,0x80},//manual_gain_g1
{0x63,0x80},//manual_gain_r
{0x65,0x98},//manual_gai_b, 0xa0=1.25, 0x98=1.1875
{0x67,0x80},//manual_gain_g2
{0x68,0x18},//global_manual_gain 2.4bits
//=========CC _R
{0x69,0x58}, //54//58
{0x6A,0xf6}, //ff
{0x6B,0xfb}, //fe
{0x6C,0xf4}, //ff
{0x6D,0x5a}, //5f
{0x6E,0xe6}, //e1
{0x6f,0x00},
//=========lsc
{0x70,0x14},
{0x71,0x1c},
{0x72,0x20},
{0x73,0x10},
{0x74,0x3c},//480/8
{0x75,0x52},//640/8
//=========dn
{0x7d,0x2f},//dn_mode
{0x80,0x0c},//when auto_dn, check 7e,7f
{0x81,0x0c},
{0x82,0x44},
//dd
{0x83,0x18}, //DD_TH1
{0x84,0x18}, //DD_TH2
{0x85,0x04}, //DD_TH3
{0x87,0x34}, //32 b DNDD_low_range X16, DNDD_low_range_C_weight_center
//=========intp-ee
{0x88,0x04},
{0x89,0x01},
{0x8a,0x50},//60
{0x8b,0x50},//60
{0x8c,0x07},
{0x50,0x0c},
{0x5f,0x3c},
{0x8e,0x02},
{0x86,0x02},
{0x51,0x20},
{0x52,0x08},
{0x53,0x00},
//========= YCP
//contrast_center
{0x77,0x80},//contrast_center
{0x78,0x00},//fixed_Cb
{0x79,0x00},//fixed_Cr
//{0x7a,0x00},//luma_offset
{0x7b,0x40},//hue_cos
{0x7c,0x00},//hue_sin
//saturation
{0xa0,0x40},//global_saturation\
{0xa1,0x40},//luma_contrast
{0xa2,0x40},//saturation_Cb//0x34
{0xa3,0x40},// 34 saturation_Cr//0x34
{0xa4,0xc8},
{0xa5,0x02},
{0xa6,0x28},
{0xa7,0x02},
//skin
{0xa8,0xee},
{0xa9,0x12},
{0xaa,0x01},
{0xab,0x20},
{0xac,0xf0},
{0xad,0x10},
//========= ABS
{0xae,0x18},// black_pixel_target_number
{0xaf,0x74},
{0xb0,0xe0},
{0xb1,0x20},
{0xb2,0x6c},
{0xb3,0x40},
{0xb4,0x04},
//========= AWB
{0xbb,0x42},
{0xbc,0x60},
{0xbd,0x50},
{0xbe,0x50},
{0xbf,0x0c},
{0xc0,0x06},
{0xc1,0x60},
{0xc2,0xf1},//f4
{0xc3,0x40},
{0xc4,0x1c},//18
{0xc5,0x56},
{0xc6,0x1d},
{0xca,0x70},//0x70
{0xcb,0x6a},//0x70
{0xcc,0x70},//0x78
{0xcd,0x80},//R_ratio 0x80
{0xce,0x80},//G_ratio , cold_white white
{0xcf,0x86},//B_ratio 0x80
//========= aecT
{0x20,0x06},//02
{0x21,0xc0},
{0x22,0x40},
{0x23,0x88},
{0x24,0x96},
{0x25,0x30},
{0x26,0xd0},
{0x27,0x00},
/////
{0x01,0xfa},//HB
{0x02,0x70},//VB//0c
{0x10,0x01},//high 4 bits of VB, HB
{0xd6,0x64},//antiflicker_step //96
{0x28,0x02},//AEC_exp_level_1bit11to8
{0x29,0x58},//AEC_exp_level_1bit7to0
{0x2a,0x02},//AEC_exp_level_2bit11to8
{0x2b,0x58},//AEC_exp_level_2bit7to0
{0x2c,0x02},//AEC_exp_level_3bit11to8 659 - 8FPS, 8ca - 6FPS //
{0x2d,0x58},//AEC_exp_level_3bit7to0
{0x2e,0x02},//AEC_exp_level_4bit11to8 4FPS
{0x2f,0x58},//AEC_exp_level_4bit7to0
{0x30,0x20},
{0x31,0x00},
{0x32,0x1c},
{0x33,0x90},
{0x34,0x10},
{0xd0,0x34},//[2] 1 before gamma, 0 after gamma
{0xd1,0x60},//AEC_target_Y//0x50
{0xd2,0x61},//f2
{0xd4,0x4b},//96
{0xd5,0x01},// 10
{0xd7,0x03},//AEC_exp_time_min //10
{0xd8,0x02},
{0xdd,0x12},
//========= measure window
{0xe0,0x03},
{0xe1,0x02},
{0xe2,0x27},
{0xe3,0x1e},
{0xe8,0x3b},
{0xe9,0x6e},
{0xea,0x2c},
{0xeb,0x50},
{0xec,0x73},
//========= close_frame
{0xed,0x00},//close_frame_num1 ,can be use to reduce FPS
{0xee,0x00},//close_frame_num2
{0xef,0x00},//close_frame_num
// page1 /////////////////////////////////////////////
{0xf0,0x01},//select page1
{0x00,0x20},
{0x01,0x20},
{0x02,0x20},
{0x03,0x20},
{0x04,0x78},
{0x05,0x78},
{0x06,0x78},
{0x07,0x78},
{0x10,0x04},
{0x11,0x04},
{0x12,0x04},
{0x13,0x04},
{0x14,0x01},
{0x15,0x01},
{0x16,0x01},
{0x17,0x01},
{0x20,0x00},
{0x21,0x00},
{0x22,0x00},
{0x23,0x00},
{0x24,0x00},
{0x25,0x00},
{0x26,0x00},
{0x27,0x00},
{0x40,0x11},
//=============================lscP
{0x45,0x08}, // r
{0x46,0x07}, //g
{0x47,0x08}, //b
{0x48,0x04},
{0x49,0x03},
{0x4a,0x03},
{0x62,0xd8},
{0x63,0x24},
{0x64,0x24},
{0x65,0x24},
{0x66,0xd8},
{0x67,0x24},
{0x5a,0x00},
{0x5b,0x00},
{0x5c,0x00},
{0x5d,0x00},
{0x5e,0x00},
{0x5f,0x00},
//============================= ccP
{0x69,0x03},//cc_mode
//CC_G
{0x70,0x5d},
{0x71,0xed},
{0x72,0xff},
{0x73,0xe5},
{0x74,0x5f},
{0x75,0xe6},
//CC_B
{0x76,0x41},
{0x77,0xef},
{0x78,0xff},
{0x79,0xff},
{0x7a,0x5f},
{0x7b,0xfa},
//============================= AGP
{0x7e,0x00},
{0x7f,0x10}, // 30 0x40 select gamma
{0x80,0x48}, // c8
{0x81,0x06},
{0x82,0x08},
{0x83,0x23},
{0x84,0x38},
{0x85,0x4F},
{0x86,0x61},
{0x87,0x72},
{0x88,0x80},
{0x89,0x8D},
{0x8a,0xA2},
{0x8b,0xB2},
{0x8c,0xC0},
{0x8d,0xCA},
{0x8e,0xD3},
{0x8f,0xDB},
{0x90,0xE2},
{0x91,0xED},
{0x92,0xF6},
{0x93,0xFD},
//about gamma1 is hex r oct
{0x94,0x04},
{0x95,0x0E},
{0x96,0x1B},
{0x97,0x28},
{0x98,0x35},
{0x99,0x41},
{0x9a,0x4E},
{0x9b,0x67},
{0x9c,0x7E},
{0x9d,0x94},
{0x9e,0xA7},
{0x9f,0xBA},
{0xa0,0xC8},
{0xa1,0xD4},
{0xa2,0xE7},
{0xa3,0xF4},
{0xa4,0xFA},
//========= open functions
{0xf0,0x00},//set back to page0
// {0x40,0x7e},
// {0x41,0x2f},
// {0x43,0x40},
// {0x44,0xE2},
// {0x44,0xE6},
// {0x0f, 0xa2},
// {0x45, 0x26},
// {0x47, 0x28},
#if 1
//AEC
{0x03, 0x01},
{0x04, 0x85},
{0xDB, 0x3c},
{0xDC, 0x40},
//Window
{0x05, 0x00},
{0x06, 0x00},
{0x07, 0x00},
{0x08, 0x00},
{0x09, 0x01},
{0x0A, 0xe8},
{0x0B, 0x02},
{0x0C, 0x80},
#endif
{0xff,0xff},
};
#endif
|
lshw/loongson1-linux-3.0
|
drivers/media/video/ls1x_gc0307.h
|
C
|
gpl-2.0
| 7,508
|
<?php
if($permission < 4 || $username != 'admin'){
header("location: ../../index.php");
exit;
}
$time = time();
require '../../includes/main/pdoTablePrefix.php';
// "Example table."
$sql="CREATE TABLE {$skeleton}example(
`id` INT(10) NOT NULL AUTO_INCREMENT,
`example` VARCHAR(1) NOT NULL default 'y',
PRIMARY KEY (`id`)
) ENGINE=myisam DEFAULT CHARSET=utf8 ";
try {
$dbh->exec($sql);
echo "Table example created successfully.<br>";
} catch(PDOException $e){
echo $e->getMessage().' in '.$e->getFile().' on line '.$e->getLine() . "<br>";
}
$example = "n";
try {
$stmt = $dbh->prepare("INSERT INTO {$skeleton}example (example) VALUES (:example)");
$stmt->bindParam(':example', $example);
$stmt->execute();
} catch (PDOException $e) {
echo $e->getMessage().' in '.$e->getFile().' on line '.$e->getLine();
exit;
}
?>
|
eahe/zzbulletin
|
plugins/skeleton/sql/install/skeleton.php
|
PHP
|
gpl-2.0
| 882
|
var utils = require('./utils'),
path = require('path'),
semver = require('semver'),
async = require('async');
exports.load = async.memoize(function (dir, callback) {
var settings_file = path.resolve(dir, 'package.json');
utils.readJSON(settings_file, function (err, settings) {
if (err) {
callback(err);
}
try {
exports.validate(settings, settings_file);
}
catch (e) {
return callback(e);
}
callback(null, settings);
});
});
exports.validate = function (settings, filename) {
if (!settings.name) {
throw new Error('Missing name property in ' + filename);
}
if (!/^[a-zA-Z][a-zA-Z0-9_\-\.]*$/.test(settings.name)) {
throw new Error(
'Invalid name property in ' + filename + ', ' +
'package names can only contain numbers, upper or lowercase ' +
'letters and "_", "-" or ".", and must start with a letter'
);
}
if (!settings.version) {
throw new Error('Missing version property in ' + filename);
}
if (!settings.description) {
throw new Error('Missing description property in ' + filename);
}
if (!semver.valid(settings.version)) {
throw new Error(
'Invalid version number in ' + filename + '\n' +
'Version numbers should follow the format described at ' +
'http://semver.org (eg, 1.2.3 or 4.5.6-jam.1)'
);
}
};
|
berkmancenter/spectacle
|
node_modules/bbb/node_modules/jamjs/lib/settings.js
|
JavaScript
|
gpl-2.0
| 1,497
|
#include "rule.hh"
using namespace iNA::Ast;
Rule::Rule(Node::NodeType type, GiNaC::ex rule)
: Node(type), rule(rule)
{
// pass
}
Rule::~Rule()
{
}
GiNaC::ex
Rule::getRule() const
{
return this->rule;
}
void
Rule::setRule(GiNaC::ex rule)
{
this->rule = rule;
}
void
Rule::accept(Ast::Visitor &visitor) const
{
if (Rule::Visitor *rule_vis = dynamic_cast<Rule::Visitor *>(&visitor)) {
rule_vis->visit(this);
} else {
Node::accept(visitor);
}
}
void
Rule::apply(Ast::Operator &op)
{
if (Rule::Operator *rule_op = dynamic_cast<Rule::Operator *>(&op)) {
rule_op->act(this);
} else {
Node::apply(op);
}
}
/*
* Implementation of AssignmentRule
*/
AssignmentRule::AssignmentRule(GiNaC::ex rule)
: Rule(Node::ASSIGNMENT_RULE, rule)
{
// Pass.
}
void
AssignmentRule::dump(std::ostream &str)
{
str << " := " << this->rule;
}
void
AssignmentRule::accept(Ast::Visitor &visitor) const
{
if (AssignmentRule::Visitor *rule_vis = dynamic_cast<AssignmentRule::Visitor *>(&visitor)) {
rule_vis->visit(this);
} else {
Rule::accept(visitor);
}
}
void
AssignmentRule::apply(Ast::Operator &op)
{
if (AssignmentRule::Operator *rule_op = dynamic_cast<AssignmentRule::Operator *>(&op)) {
rule_op->act(this);
} else {
Rule::apply(op);
}
}
/*
* Implementation of RateRule
*/
RateRule::RateRule(GiNaC::ex rule)
: Rule(Node::RATE_RULE, rule)
{
// Pass.
}
void
RateRule::dump(std::ostream &str)
{
str << " := " << this->rule;
}
void
RateRule::accept(Ast::Visitor &visitor) const
{
if (RateRule::Visitor *rule_vis = dynamic_cast<RateRule::Visitor *>(&visitor)) {
rule_vis->visit(this);
} else {
Rule::accept(visitor);
}
}
void
RateRule::apply(Ast::Operator &op)
{
if (RateRule::Operator *rule_op = dynamic_cast<RateRule::Operator *>(&op)) {
rule_op->act(this);
} else {
Rule::apply(op);
}
}
|
hmatuschek/intrinsic-noise-analyzer
|
lib/ast/rule.cc
|
C++
|
gpl-2.0
| 1,896
|
package mx.gob.imss.cit.bp.ws.bovedapersonalcommonschema;
import javax.xml.bind.annotation.XmlAccessType;
import javax.xml.bind.annotation.XmlAccessorType;
import javax.xml.bind.annotation.XmlElement;
import javax.xml.bind.annotation.XmlSeeAlso;
import javax.xml.bind.annotation.XmlType;
import mx.gob.imss.cit.bp.ws.AddDocumentActorRequest;
import mx.gob.imss.cit.bp.ws.AllDocumentVersionsByDocRequest;
import mx.gob.imss.cit.bp.ws.AllDocumentVersionsMetadataByDocRequest;
import mx.gob.imss.cit.bp.ws.AllMetadataByMetadataRequest;
import mx.gob.imss.cit.bp.ws.CreateDocumentRequest;
import mx.gob.imss.cit.bp.ws.DeleteDocumentRequest;
import mx.gob.imss.cit.bp.ws.DocumentRequest;
import mx.gob.imss.cit.bp.ws.DocumentsByMetadataRequest;
import mx.gob.imss.cit.bp.ws.MetadataByDocRequest;
/**
* <p>Java class for BaseRequest complex type.
*
* <p>The following schema fragment specifies the expected content contained within this class.
*
* <pre>
* <complexType name="BaseRequest">
* <complexContent>
* <restriction base="{http://www.w3.org/2001/XMLSchema}anyType">
* <sequence>
* <element name="Tramite" type="{http://ws.bp.cit.imss.gob.mx/bovedaPersonalCommonSchema}Tramite"/>
* </sequence>
* </restriction>
* </complexContent>
* </complexType>
* </pre>
*
*
*/
@XmlAccessorType(XmlAccessType.FIELD)
@XmlType(name = "BaseRequest", propOrder = {
"tramite"
})
@XmlSeeAlso({
AllMetadataByMetadataRequest.class,
DocumentRequest.class,
MetadataByDocRequest.class,
AddDocumentActorRequest.class,
CreateDocumentRequest.class,
DocumentsByMetadataRequest.class,
AllDocumentVersionsByDocRequest.class,
AllDocumentVersionsMetadataByDocRequest.class,
DeleteDocumentRequest.class
})
public class BaseRequest {
@XmlElement(name = "Tramite", required = true)
protected Tramite tramite;
/**
* Gets the value of the tramite property.
*
* @return
* possible object is
* {@link Tramite }
*
*/
public Tramite getTramite() {
return tramite;
}
/**
* Sets the value of the tramite property.
*
* @param value
* allowed object is
* {@link Tramite }
*
*/
public void setTramite(Tramite value) {
this.tramite = value;
}
}
|
sidlors/BovedaPersonal
|
boveda-personal-ws/src/main/java/mx/gob/imss/cit/bp/ws/bovedapersonalcommonschema/BaseRequest.java
|
Java
|
gpl-2.0
| 2,455
|
<?php
/*
* @version $Id: softwareversion.class.php 23327 2015-02-02 17:15:33Z tsmr $
-------------------------------------------------------------------------
GLPI - Gestionnaire Libre de Parc Informatique
Copyright (C) 2003-2014 by the INDEPNET Development Team.
http://indepnet.net/ http://glpi-project.org
-------------------------------------------------------------------------
LICENSE
This file is part of GLPI.
GLPI is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
GLPI is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GLPI. If not, see <http://www.gnu.org/licenses/>.
--------------------------------------------------------------------------
*/
/** @file
* @brief
*/
if (!defined('GLPI_ROOT')) {
die("Sorry. You can't access directly to this file");
}
/**
* SoftwareVersion Class
**/
class SoftwareVersion extends CommonDBChild {
// From CommonDBTM
public $dohistory = true;
// From CommonDBChild
static public $itemtype = 'Software';
static public $items_id = 'softwares_id';
static function getTypeName($nb=0) {
return _n('Version', 'Versions', $nb);
}
function cleanDBonPurge() {
global $DB;
$csv = new Computer_SoftwareVersion();
$csv->cleanDBonItemDelete(__CLASS__, $this->fields['id']);
}
function defineTabs($options=array()) {
$ong = array();
$this->addDefaultFormTab($ong);
$this->addStandardTab('Computer_SoftwareVersion',$ong, $options);
$this->addStandardTab('Log',$ong, $options);
return $ong;
}
/**
* @since version 0.84
*
* @see CommonDBTM::getPreAdditionalInfosForName
**/
function getPreAdditionalInfosForName() {
$soft = new Software();
if ($soft->getFromDB($this->fields['softwares_id'])) {
return $soft->getName();
}
return '';
}
/**
* Print the Software / version form
*
* @param $ID integer Id of the version or the template to print
* @param $options array of possible options:
* - target form target
* - softwares_id ID of the software for add process
*
* @return true if displayed false if item not found or not right to display
*
**/
function showForm($ID, $options=array()) {
global $CFG_GLPI;
if ($ID > 0) {
$this->check($ID, READ);
$softwares_id = $this->fields['softwares_id'];
} else {
$softwares_id = $options['softwares_id'];
$this->check(-1, CREATE, $options);
}
$this->showFormHeader($options);
echo "<tr class='tab_bg_1'><td>"._n('Software', 'Software', Session::getPluralNumber())."</td>";
echo "<td>";
if ($this->isNewID($ID)) {
echo "<input type='hidden' name='softwares_id' value='$softwares_id'>";
}
echo "<a href='software.form.php?id=".$softwares_id."'>".
Dropdown::getDropdownName("glpi_softwares", $softwares_id)."</a>";
echo "</td>";
echo "<td rowspan='4' class='middle'>".__('Comments')."</td>";
echo "<td class='center middle' rowspan='4'>";
echo "<textarea cols='45' rows='3' name='comment' >".$this->fields["comment"];
echo "</textarea></td></tr>\n";
echo "<tr class='tab_bg_1'><td>".__('Name')."</td>";
echo "<td>";
Html::autocompletionTextField($this,"name");
echo "</td></tr>\n";
echo "<tr class='tab_bg_1'><td>" . __('Operating system') . "</td><td>";
OperatingSystem::dropdown(array('value' => $this->fields["operatingsystems_id"]));
echo "</td></tr>\n";
echo "<tr class='tab_bg_1'><td>" . __('Status') . "</td><td>";
State::dropdown(array('value' => $this->fields["states_id"],
'entity' => $this->fields["entities_id"],
'condition' => "`is_visible_softwareversion`"));
echo "</td></tr>\n";
// Only count softwareversions_id_buy (don't care of softwareversions_id_use if no installation)
if ((SoftwareLicense::countForVersion($ID) > 0)
|| (Computer_SoftwareVersion::countForVersion($ID) > 0)) {
$options['candel'] = false;
}
$this->showFormButtons($options);
return true;
}
function getSearchOptions() {
$tab = array();
$tab['common'] = __('Characteristics');
$tab[2]['table'] = $this->getTable();
$tab[2]['field'] = 'name';
$tab[2]['name'] = __('Name');
$tab[2]['datatype'] = 'string';
$tab[4]['table'] = 'glpi_operatingsystems';
$tab[4]['field'] = 'name';
$tab[4]['name'] = __('Operating system');
$tab[4]['datatype'] = 'dropdown';
$tab[16]['table'] = $this->getTable();
$tab[16]['field'] = 'comment';
$tab[16]['name'] = __('Comments');
$tab[16]['datatype'] = 'text';
$tab[31]['table'] = 'glpi_states';
$tab[31]['field'] = 'completename';
$tab[31]['name'] = __('Status');
$tab[31]['datatype'] = 'dropdown';
$tab[31]['condition'] = "`is_visible_softwareversion`";
return $tab;
}
/**
* Make a select box for software to install
*
* @param $options array of possible options:
* - name : string / name of the select (default is softwareversions_id)
* - softwares_id : integer / ID of the software
* - value : integer / value of the selected version
* - used : array / already used items
*
* @return nothing (print out an HTML select box)
**/
static function dropdown($options=array()) {
global $CFG_GLPI, $DB;
//$softwares_id,$value=0
$p['softwares_id'] = 0;
$p['value'] = 0;
$p['name'] = 'softwareversions_id';
$p['used'] = array();
if (is_array($options) && count($options)) {
foreach ($options as $key => $val) {
$p[$key] = $val;
}
}
$where = '';
if (count($p['used'])) {
$where = " AND `glpi_softwareversions`.`id` NOT IN (".implode(",",$p['used']).")";
}
// Make a select box
$query = "SELECT DISTINCT `glpi_softwareversions`.*,
`glpi_states`.`name` AS sname
FROM `glpi_softwareversions`
LEFT JOIN `glpi_states` ON (`glpi_softwareversions`.`states_id` = `glpi_states`.`id`)
WHERE `glpi_softwareversions`.`softwares_id` = '".$p['softwares_id']."'
$where
ORDER BY `name`";
$result = $DB->query($query);
$number = $DB->numrows($result);
$values = array(0 => Dropdown::EMPTY_VALUE);
if ($number) {
while ($data = $DB->fetch_assoc($result)) {
$ID = $data['id'];
$output = $data['name'];
if (empty($output) || $_SESSION['glpiis_ids_visible']) {
$output = sprintf(__('%1$s (%2$s)'), $output, $ID);
}
if (!empty($data['sname'])) {
$output = sprintf(__('%1$s - %2$s'), $output, $data['sname']);
}
$values[$ID] = $output;
}
}
return Dropdown::showFromArray($p['name'], $values, $p);
}
/**
* Show Versions of a software
*
* @param $soft Software object
*
* @return nothing
**/
static function showForSoftware(Software $soft) {
global $DB, $CFG_GLPI;
$softwares_id = $soft->getField('id');
if (!$soft->can($softwares_id, READ)) {
return false;
}
$canedit = $soft->canEdit($softwares_id);
echo "<div class='spaced'>";
if ($canedit) {
echo "<div class='center firstbloc'>";
echo "<a class='vsubmit' href='softwareversion.form.php?softwares_id=$softwares_id'>".
_x('button', 'Add a version')."</a>";
echo "</div>";
}
$query = "SELECT `glpi_softwareversions`.*,
`glpi_states`.`name` AS sname
FROM `glpi_softwareversions`
LEFT JOIN `glpi_states` ON (`glpi_states`.`id` = `glpi_softwareversions`.`states_id`)
WHERE `softwares_id` = '$softwares_id'
ORDER BY `name`";
Session::initNavigateListItems('SoftwareVersion',
//TRANS : %1$s is the itemtype name,
// %2$s is the name of the item (used for headings of a list)
sprintf(__('%1$s = %2$s'), Software::getTypeName(1),
$soft->getName()));
if ($result = $DB->query($query)) {
if ($DB->numrows($result)) {
echo "<table class='tab_cadre_fixehov'><tr>";
echo "<th>".self::getTypeName(Session::getPluralNumber())."</th>";
echo "<th>".__('Status')."</th>";
echo "<th>".__('Operating system')."</th>";
echo "<th>"._n('Installation', 'Installations', Session::getPluralNumber())."</th>";
echo "<th>".__('Comments')."</th>";
echo "</tr>\n";
for ($tot=$nb=0 ; $data=$DB->fetch_assoc($result) ; $tot+=$nb) {
Session::addToNavigateListItems('SoftwareVersion',$data['id']);
$nb = Computer_SoftwareVersion::countForVersion($data['id']);
echo "<tr class='tab_bg_2'>";
echo "<td><a href='softwareversion.form.php?id=".$data['id']."'>";
echo $data['name'].(empty($data['name'])?"(".$data['id'].")":"")."</a></td>";
echo "<td>".$data['sname']."</td>";
echo "<td class='right'>".Dropdown::getDropdownName('glpi_operatingsystems',
$data['operatingsystems_id']);
echo "</td>";
echo "<td class='numeric'>$nb</td>";
echo "<td>".$data['comment']."</td></tr>\n";
}
echo "<tr class='tab_bg_1 noHover'><td class='right b' colspan='3'>".__('Total')."</td>";
echo "<td class='numeric b'>$tot</td><td></td></tr>";
echo "</table>\n";
} else {
echo "<table class='tab_cadre_fixe'>";
echo "<tr><th>".__('No item found')."</th></tr>";
echo "</table>\n";
}
}
echo "</div>";
}
function getTabNameForItem(CommonGLPI $item, $withtemplate=0) {
if (!$withtemplate) {
switch ($item->getType()) {
case 'Software' :
if ($_SESSION['glpishow_count_on_tabs']) {
return self::createTabEntry(self::getTypeName(Session::getPluralNumber()),
countElementsInTable($this->getTable(),
"softwares_id
= '".$item->getID()."'"));
}
return self::getTypeName(Session::getPluralNumber());
}
}
return '';
}
static function displayTabContentForItem(CommonGLPI $item, $tabnum=1, $withtemplate=0) {
if ($item->getType() == 'Software') {
self::showForSoftware($item);
}
return true;
}
}
?>
|
dtiguarulhos/suporteguarulhos
|
inc/softwareversion.class.php
|
PHP
|
gpl-2.0
| 11,716
|
//
// PatternFormatterTest.cpp
//
// $Id: //poco/1.4/Foundation/testsuite/src/PatternFormatterTest.cpp#1 $
//
// Copyright (c) 2004-2006, Applied Informatics Software Engineering GmbH.
// and Contributors.
//
// Permission is hereby granted, free of charge, to any person or organization
// obtaining a copy of the software and accompanying documentation covered by
// this license (the "Software") to use, reproduce, display, distribute,
// execute, and transmit the Software, and to prepare derivative works of the
// Software, and to permit third-parties to whom the Software is furnished to
// do so, all subject to the following:
//
// The copyright notices in the Software and this entire statement, including
// the above license grant, this restriction and the following disclaimer,
// must be included in all copies of the Software, in whole or in part, and
// all derivative works of the Software, unless such copies or derivative
// works are solely in the form of machine-executable object code generated by
// a source language processor.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT
// SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE
// FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE,
// ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
// DEALINGS IN THE SOFTWARE.
//
#include "PatternFormatterTest.h"
#include "CppUnit/TestCaller.h"
#include "CppUnit/TestSuite.h"
#include "Poco/PatternFormatter.h"
#include "Poco/Message.h"
#include "Poco/DateTime.h"
using Poco::PatternFormatter;
using Poco::Message;
using Poco::DateTime;
PatternFormatterTest::PatternFormatterTest(const std::string& name): CppUnit::TestCase(name)
{
}
PatternFormatterTest::~PatternFormatterTest()
{
}
void PatternFormatterTest::testPatternFormatter()
{
Message msg;
PatternFormatter fmt;
msg.setSource("TestSource");
msg.setText("Test message text");
msg.setPid(1234);
msg.setTid(1);
msg.setThread("TestThread");
msg.setPriority(Message::PRIO_ERROR);
msg.setTime(DateTime(2005, 1, 1, 14, 30, 15, 500).timestamp());
msg["testParam"] = "Test Parameter";
std::string result;
fmt.setProperty("pattern", "%Y-%m-%dT%H:%M:%S [%s] %p: %t");
fmt.format(msg, result);
assert (result == "2005-01-01T14:30:15 [TestSource] Error: Test message text");
result.clear();
fmt.setProperty("pattern", "%w, %e %b %y %H:%M:%S.%i [%s:%I:%T] %q: %t");
fmt.format(msg, result);
assert (result == "Sat, 1 Jan 05 14:30:15.500 [TestSource:1:TestThread] E: Test message text");
result.clear();
fmt.setProperty("pattern", "%Y-%m-%d %H:%M:%S [%N:%P:%s]%l-%t");
fmt.format(msg, result);
assert (result.find("2005-01-01 14:30:15 [") == 0);
assert (result.find(":TestSource]3-Test message text") != std::string::npos);
result.clear();
assert (fmt.getProperty("times") == "UTC");
fmt.setProperty("times", "local");
fmt.format(msg, result);
assert (result.find("2005-01-01 ") == 0);
assert (result.find(":TestSource]3-Test message text") != std::string::npos);
result.clear();
fmt.setProperty("pattern", "%[testParam]");
fmt.format(msg, result);
assert (result == "Test Parameter");
result.clear();
fmt.setProperty("pattern", "%[testParam] %p");
fmt.format(msg, result);
assert (result == "Test Parameter Error");
}
void PatternFormatterTest::setUp()
{
}
void PatternFormatterTest::tearDown()
{
}
CppUnit::Test* PatternFormatterTest::suite()
{
CppUnit::TestSuite* pSuite = new CppUnit::TestSuite("PatternFormatterTest");
CppUnit_addTest(pSuite, PatternFormatterTest, testPatternFormatter);
return pSuite;
}
|
aleciten/foo_siesta
|
Poco/Foundation/testsuite/src/PatternFormatterTest.cpp
|
C++
|
gpl-2.0
| 3,919
|
<?php
/**
* @version SVN: <svn_id>
* @package Quick2cart
* @author Techjoomla <extensions@techjoomla.com>
* @copyright Copyright (c) 2009-2015 TechJoomla. All rights reserved.
* @license GNU General Public License version 2 or later.
*/
// No direct access.
defined('_JEXEC') or die();
?>
<div class="">
<!-- for Length & weight class option -->
<?php
//$qtc_shipping_opt_status = $params->get('shipping');
$qtc_shipping_opt_style = ($qtc_shipping_opt_status==1) ? "display:block" : "display:none";
$storeHelper = $comquick2cartHelper->loadqtcClass(JPATH_SITE.DS."components".DS."com_quick2cart".DS."helpers".DS."storeHelper.php","storeHelper");
$legthList = (array) $storeHelper->getStoreShippingLegthClassList($storeid = 0);
$weigthList = (array) $storeHelper->getStoreShippingWeigthClassList($storeid = 0);
if ($isTaxationEnabled)
{ ?>
<div class="control-group">
<label class="control-label" for="qtcTaxprofileSel">
<?php echo JHtml::tooltip(JText::_('COM_QUICK2CART_TAXPROFILE_DESC_TOOLTIP'), JText::_('COM_QUICK2CART_TAXPROFILE_DESC'), '', JText::_('COM_QUICK2CART_TAXPROFILE_DESC'));?>
</label>
<div class="controls taxprofile">
</div>
</div>
<?php
} ?>
<?php
if ($qtc_shipping_opt_status)
{
?>
<div class='control-group ' style="<?php echo $qtc_shipping_opt_style;?>">
<label class="control-label" for="qtc_item_length">
<?php echo JHtml::tooltip(JText::_('COM_QUICK2CART_PROD_DIMENSION_LENGTH_LABEL_TOOLTIP'), JText::_('COM_QUICK2CART_PROD_DIMENSION_LENGTH_LABEL'), '', JText::_('COM_QUICK2CART_PROD_DIMENSION_LENGTH_LABEL'));?>
</label>
<div class="controls">
<input type="text" class=" input-mini" Onkeyup='checkforalpha(this,46,<?php echo $entered_numerics; ?>);' name='qtc_item_length' id='qtc_item_length' value='<?php echo (!empty($minmaxstock->item_length)) ? number_format($minmaxstock->item_length, 2) : '' ?>' placeholder="<?php echo JText::_('COM_QUICK2CART_LENGTH_HINT') ?>" />
x
<input type="text" class=" input-mini" Onkeyup='checkforalpha(this,46,<?php echo $entered_numerics; ?>);' name='qtc_item_width' id='qtc_item_width' value='<?php echo (!empty($minmaxstock->item_width)) ? number_format($minmaxstock->item_width, 2) : '' ?>' placeholder="<?php echo JText::_('COM_QUICK2CART_WIDTH_HINT') ?>" />
x
<div class="input-append ">
<input type="text" class=" input-mini" Onkeyup='checkforalpha(this,46,<?php echo $entered_numerics; ?>);' name='qtc_item_height' id='qtc_item_height' value='<?php echo (!empty($minmaxstock->item_height)) ? number_format($minmaxstock->item_height, 2) : '' ?>' placeholder="<?php echo JText::_('COM_QUICK2CART_HEIGHT_HINT') ?>" />
<?php
// Get store configued length id.
// The get default value
$lenUniteId = 0;
if (isset($minmaxstock) && $minmaxstock->item_length_class_id)
{
// While edit used item class id
$lenUniteId = $minmaxstock->item_length_class_id;
}
elseif (isset($this->defaultStoreSettings['length_id']))
{
// If for store default length unite has set
$lenUniteId = $this->defaultStoreSettings['length_id'];
}
$lenUnitDetail = $storeHelper->getProductLengthDetail($lenUniteId);
?>
<span class="add-on"><?php echo $lenUnitDetail['title']; ?>
<input type='hidden' name="length_class_id" value="<?php echo $lenUnitDetail['id']; ?>"/>
</span>
</div>
</div>
</div>
<!-- weight unit-->
<div class='control-group qtc_item_weight' style="<?php echo $qtc_shipping_opt_style;?>">
<label class="control-label" for="qtc_item_weight">
<?php echo JHtml::tooltip(JText::_('COM_QUICK2CART_PROD_DIMENSION_WEIGTH_LABEL_TOOLTIP'), JText::_('COM_QUICK2CART_PROD_DIMENSION_WEIGTH_LABEL'), '', JText::_('COM_QUICK2CART_PROD_DIMENSION_WEIGTH_LABEL'));?>
</label>
<div class="controls">
<div class="input-append ">
<input type="text" class=" input-mini" Onkeyup='checkforalpha(this,46,<?php echo $entered_numerics; ?>);' name='qtc_item_weight' id="qtc_item_weight" value='<?php if (isset($minmaxstock)) echo number_format($minmaxstock->item_weight, 2);?>' />
<?php
// Get store configued length id.
// The get default value
$weightUniteId = 0;
if (isset($minmaxstock) && $minmaxstock->item_weight_class_id)
{
// While edit used item class id
$weightUniteId = $minmaxstock->item_weight_class_id;
}
elseif (isset($this->defaultStoreSettings['weight_id']))
{
// If for store default length unite has set
$weightUniteId = $this->defaultStoreSettings['weight_id'];
}
$weightUniteDetail = $storeHelper->getProductWeightDetail($weightUniteId);
?>
<span class="add-on"><?php echo $weightUniteDetail['title']; ?>
<input type='hidden' name="weigth_class_id" value="<?php echo $weightUniteDetail['id']; ?>"/>
</span>
</div>
</div>
</div>
<!-- END for Legth & weigth class option -->
<!-- Shipping Profile-->
<div class="control-group">
<label class="control-label" for="qtc_shipProfileSelList">
<?php echo JHtml::tooltip(JText::_('COM_QUICK2CART_S_SEL_SHIPPROFILE_TOOLTIP'), JText::_('COM_QUICK2CART_S_SEL_SHIPPROFILE'), '', JText::_('COM_QUICK2CART_S_SEL_SHIPPROFILE'));?>
</label>
<div class="controls qtc_shipProfileList">
<span id="qtc_shipProfileSelListWrapper">
<?php
// Here default_store_id - before saving the item, value =first store id
// While edit default_store_id- item's store id
$defaultProfile = !empty($this->itemDetail['shipProfileId']) ? $this->itemDetail['shipProfileId'] : '';
$shipDefaultStore = !empty($this->itemDetail['store_id']) ? $this->itemDetail['store_id'] : $this->store_id;
// Get qtc_shipProfileSelList
echo $shipProfileSelectList = $qtcshiphelper->qtcLoadShipProfileSelectList($shipDefaultStore, $defaultProfile);
?>
</span>
</div>
</div>
<?php
}
?>
</div>
|
BetterBetterBetter/B3App
|
components/com_quick2cart/views/product/tmpl/taxship.php
|
PHP
|
gpl-2.0
| 5,915
|
/* This file is part of the KDE project
Copyright ( C ) 2003 Nadeem Hasan <nhasan@kde.org>
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public
License as published by the Free Software Foundation; either
version 2 of the License, or ( at your option ) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; see the file COPYING. If not, write to
the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301, USA.
*/
#ifndef SENSORLOGGERSETTINGS_H
#define SENSORLOGGERSETTINGS_H
#include <kdialogbase.h>
#include <qstring.h>
#include <qcolor.h>
class SensorLoggerSettingsWidget;
class SensorLoggerSettings : public KDialogBase
{
Q_OBJECT
public:
SensorLoggerSettings( QWidget *parent=0, const char *name=0 );
QString title();
QColor foregroundColor();
QColor backgroundColor();
QColor alarmColor();
void setTitle( const QString & );
void setForegroundColor( const QColor & );
void setBackgroundColor( const QColor & );
void setAlarmColor( const QColor & );
private:
SensorLoggerSettingsWidget *m_settingsWidget;
};
#endif // SENSORLOGGERSETTINGS_H
/* vim: et sw=2 ts=2
*/
|
iegor/kdebase
|
ksysguard/gui/SensorDisplayLib/SensorLoggerSettings.h
|
C
|
gpl-2.0
| 1,556
|
# Android
Simple Android examples
|
Juanjors/Android
|
README.md
|
Markdown
|
gpl-2.0
| 35
|
/*
* Copyright © 2010 Keith Packard <keithp@keithp.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*/
package org.altusmetrum.altosuilib_2;
import com.sun.speech.freetts.Voice;
import com.sun.speech.freetts.VoiceManager;
import java.util.concurrent.LinkedBlockingQueue;
public class AltosVoice implements Runnable {
VoiceManager voice_manager;
Voice voice;
LinkedBlockingQueue<String> phrases;
Thread thread;
boolean busy;
final static String voice_name = "kevin16";
public void run() {
try {
for (;;) {
String s = phrases.take();
voice.speak(s);
synchronized(this) {
if (phrases.isEmpty()) {
busy = false;
notifyAll();
}
}
}
} catch (InterruptedException e) {
}
}
public synchronized void drain() throws InterruptedException {
while (busy)
wait();
}
public void speak_always(String s) {
try {
if (voice != null) {
synchronized(this) {
busy = true;
phrases.put(s);
}
}
} catch (InterruptedException e) {
}
}
public void speak(String s) {
if (AltosUIPreferences.voice())
speak_always(s);
}
public void speak(String format, Object... parameters) {
speak(String.format(format, parameters));
}
public AltosVoice () {
busy = false;
voice_manager = VoiceManager.getInstance();
voice = voice_manager.getVoice(voice_name);
if (voice != null) {
voice.allocate();
phrases = new LinkedBlockingQueue<String> ();
thread = new Thread(this);
thread.start();
} else {
System.out.printf("Voice manager failed to open %s\n", voice_name);
Voice[] voices = voice_manager.getVoices();
System.out.printf("Available voices:\n");
for (int i = 0; i < voices.length; i++) {
System.out.println(" " + voices[i].getName()
+ " (" + voices[i].getDomain() + " domain)");
}
}
}
}
|
ajtowns/altos
|
altosuilib/AltosVoice.java
|
Java
|
gpl-2.0
| 2,459
|
/* -*- Mode:C++; c-file-style:"gnu"; indent-tabs-mode:nil; -*- */
/*
* Copyright (c) 2005,2006 INRIA
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation;
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* Author: Mathieu Lacage <mathieu.lacage@sophia.inria.fr>
*/
#ifndef PTR_H
#define PTR_H
#include <iostream>
#include <stdint.h>
#include "assert.h"
/**
* \file
* \ingroup ptr
* ns3::Ptr smart pointer declaration and implementation.
*/
namespace ns3 {
/**
* \ingroup core
* \defgroup ptr Smart Pointer
* \brief Heap memory management.
*
* See \ref ns3::Ptr for implementation details.
*
* See \ref main-ptr.cc for example usage.
*/
/**
* \ingroup ptr
*
* \brief Smart pointer class similar to \c boost::intrusive_ptr.
*
* This smart-pointer class assumes that the underlying
* type provides a pair of \c Ref and \c Unref methods which are
* expected to increment and decrement the internal reference count
* of the object instance. You can add \c Ref and \c Unref
* to a class simply by inheriting from ns3::SimpleRefCount.
*
* This implementation allows you to manipulate the smart pointer
* as if it was a normal pointer: you can compare it with zero,
* compare it against other pointers, assign zero to it, etc.
*
* It is possible to extract the raw pointer from this
* smart pointer with the GetPointer() and PeekPointer() methods.
*
* If you want to store a \c new object into a smart pointer,
* we recommend you to use the Create() template functions
* to create the object and store it in a smart pointer to avoid
* memory leaks. These functions are really small convenience
* functions and their goal is just is save you a small
* bit of typing.
*
* \tparam T \explicit The underlying type.
*/
template <typename T>
class Ptr
{
private:
/** The pointer. */
T *m_ptr;
/** Helper to test for null pointer. */
class Tester {
private:
/** Disable delete (by virtue that this is unimplemented). */
void operator delete (void *);
};
/** Interoperate with const instances. */
friend class Ptr<const T>;
/**
* Get a permanent pointer to the underlying object.
*
* The underlying refcount is incremented prior
* to returning to the caller so the caller is
* responsible for calling Unref himself.
*
* \param [in] p Smart pointer
* \return The pointer managed by this smart pointer.
*/
template <typename U>
friend U *GetPointer (const Ptr<U> &p);
/**
* Get a temporary pointer to the underlying object.
*
* The underlying refcount is not incremented prior
* to returning to the caller so the caller is not
* responsible for calling Unref himself.
*
* \param [in] p Smart pointer
* \return The pointer managed by this smart pointer.
*/
template <typename U>
friend U *PeekPointer (const Ptr<U> &p);
/** Mark this as a a reference by incrementing the reference count. */
inline void Acquire (void) const;
public:
/** Create an empty smart pointer */
Ptr ();
/**
* Create a smart pointer which points to the object pointed to by
* the input raw pointer ptr. This method creates its own reference
* to the pointed object. The caller is responsible for Unref()'ing
* its own reference, and the smart pointer will eventually do the
* same, so that object is deleted if no more references to it
* remain.
*
* \param [in] ptr Raw pointer to manage
*/
Ptr (T *ptr);
/**
* Create a smart pointer which points to the object pointed to by
* the input raw pointer ptr.
*
* \param [in] ptr Raw pointer to manage
* \param [in] ref if set to true, this method calls Ref, otherwise,
* it does not call Ref.
*/
Ptr (T *ptr, bool ref);
/**
* Copy by referencing the same underlying object.
*
* \param [in] o The other Ptr instance.
*/
Ptr (Ptr const&o);
/**
* Copy, removing \c const qualifier.
*
* \tparam U \deduced The underlying type of the \c const object.
* \param [in] o The Ptr to copy.
*/
template <typename U>
Ptr (Ptr<U> const &o);
/** Destructor. */
~Ptr ();
/**
* Assignment operator by referencing the same underlying object.
*
* \param [in] o The other Ptr instance.
* \return A reference to self.
*/
Ptr<T> &operator = (Ptr const& o);
/**
* An rvalue member access.
* \returns A pointer to the underlying object.
*/
T *operator -> () const;
/**
* An lvalue member access.
* \returns A pointer to the underlying object.
*/
T *operator -> ();
/**
* A \c const dereference.
* \returns A pointer to the underlying object.
*/
T &operator * () const;
/**
* A dereference.
* \returns A pointer to the underlying object.
*/
T &operator * ();
/**
* Test for NULL pointer.
*
* This enables simple NULL pointer checks like
* \code
* Ptr<..> p = ...;
* if (!p) ...
* \endcode
* \returns \c true if the underlying pointer is NULL.
*/
bool operator! ();
/**
* Test for non-NULL pointer.
*
* This enables simple pointer checks like
* \code
* Ptr<...> p = ...;
* if (p) ...
* \endcode
* This also disables deleting a Ptr
*/
operator Tester * () const;
};
/**
* \ingroup ptr
* Create class instances by constructors with varying numbers
* of arguments and return them by Ptr.
*
* These methods work for any class \c T.
*
* \see CreateObject for methods to create derivatives of ns3::Object
*/
/** @{ */
/**
* \tparam T \explicit The type of class object to create.
* \return A Ptr to the newly created \c T.
*/
template <typename T>
Ptr<T> Create (void);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \param [in] a1 The first constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1>
Ptr<T> Create (T1 a1);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \tparam T2 \deduced The type of the second constructor argument.
* \param [in] a1 The first constructor argument.
* \param [in] a2 The second constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1, typename T2>
Ptr<T> Create (T1 a1, T2 a2);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \tparam T2 \deduced The type of the second constructor argument.
* \tparam T3 \deduced The type of the third constructor argument.
* \param [in] a1 The first constructor argument.
* \param [in] a2 The second constructor argument.
* \param [in] a3 The third constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1, typename T2,
typename T3>
Ptr<T> Create (T1 a1, T2 a2, T3 a3);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \tparam T2 \deduced The type of the second constructor argument.
* \tparam T3 \deduced The type of the third constructor argument.
* \tparam T4 \deduced The type of the fourth constructor argument.
* \param [in] a1 The first constructor argument.
* \param [in] a2 The second constructor argument.
* \param [in] a3 The third constructor argument.
* \param [in] a4 The fourth constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1, typename T2,
typename T3, typename T4>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \tparam T2 \deduced The type of the second constructor argument.
* \tparam T3 \deduced The type of the third constructor argument.
* \tparam T4 \deduced The type of the fourth constructor argument.
* \tparam T5 \deduced The type of the fifth constructor argument.
* \param [in] a1 The first constructor argument.
* \param [in] a2 The second constructor argument.
* \param [in] a3 The third constructor argument.
* \param [in] a4 The fourth constructor argument.
* \param [in] a5 The fifth constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1, typename T2,
typename T3, typename T4,
typename T5>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4, T5 a5);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \tparam T2 \deduced The type of the second constructor argument.
* \tparam T3 \deduced The type of the third constructor argument.
* \tparam T4 \deduced The type of the fourth constructor argument.
* \tparam T5 \deduced The type of the fifth constructor argument.
* \tparam T6 \deduced The type of the sixth constructor argument.
* \param [in] a1 The first constructor argument.
* \param [in] a2 The second constructor argument.
* \param [in] a3 The third constructor argument.
* \param [in] a4 The fourth constructor argument.
* \param [in] a5 The fifth constructor argument.
* \param [in] a6 The sixth constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1, typename T2,
typename T3, typename T4,
typename T5, typename T6>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6);
/**
* \tparam T \explicit The type of class object to create.
* \tparam T1 \deduced The type of the first constructor argument.
* \tparam T2 \deduced The type of the second constructor argument.
* \tparam T3 \deduced The type of the third constructor argument.
* \tparam T4 \deduced The type of the fourth constructor argument.
* \tparam T5 \deduced The type of the fifth constructor argument.
* \tparam T6 \deduced The type of the sixth constructor argument.
* \tparam T7 \deduced The type of the seventh constructor argument.
* \param [in] a1 The first constructor argument.
* \param [in] a2 The second constructor argument.
* \param [in] a3 The third constructor argument.
* \param [in] a4 The fourth constructor argument.
* \param [in] a5 The fifth constructor argument.
* \param [in] a6 The sixth constructor argument.
* \param [in] a7 The seventh constructor argument.
* \return A Ptr to the newly created \c T.
*/
template <typename T,
typename T1, typename T2,
typename T3, typename T4,
typename T5, typename T6,
typename T7>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7);
/** @}*/
/**
* \ingroup ptr
* Output streamer.
* \param [in,out] os The output stream.
* \param [in] p The Ptr.
* \returns The stream.
*/
template <typename T>
std::ostream &operator << (std::ostream &os, const Ptr<T> &p);
/**
* \ingroup ptr
* Equality operator.
*
* This enables code such as
* \code
* Ptr<...> p = ...;
* Ptr<...> q = ...;
* if (p == q) ...
* \endcode
*
* Note that either \c p or \c q could also be ordinary pointers
* to the underlying object.
*
* \tparam T1 \deduced Type of the object on the lhs.
* \tparam T2 \deduced Type of the object on the rhs.
* \param [in] lhs The left operand.
* \param [in] rhs The right operand.
* \return \c true if the operands point to the same underlying object.
*/
/** @{ */
template <typename T1, typename T2>
bool operator == (Ptr<T1> const &lhs, T2 const *rhs);
template <typename T1, typename T2>
bool operator == (T1 const *lhs, Ptr<T2> &rhs);
template <typename T1, typename T2>
bool operator == (Ptr<T1> const &lhs, Ptr<T2> const &rhs);
/**@}*/
/**
* \ingroup ptr
* Inequality operator.
*
* This enables code such as
* \code
* Ptr<...> p = ...;
* Ptr<...> q = ...;
* if (p != q) ...
* \endcode
*
* Note that either \c p or \c q could also be ordinary pointers
* to the underlying object.
*
* \tparam T1 \deduced Type of the object on the lhs.
* \tparam T2 \deduced Type of the object on the rhs.
* \param [in] lhs The left operand.
* \param [in] rhs The right operand.
* \return \c true if the operands point to the same underlying object.
*/
/** @{ */
template <typename T1, typename T2>
bool operator != (Ptr<T1> const &lhs, T2 const *rhs);
template <typename T1, typename T2>
bool operator != (T1 const *lhs, Ptr<T2> &rhs);
template <typename T1, typename T2>
bool operator != (Ptr<T1> const &lhs, Ptr<T2> const &rhs);
/**@}*/
/**
* \ingroup ptr
* Comparison operator applied to the underlying pointers.
*
* \param [in] lhs The left operand.
* \param [in] rhs The right operand.
* \return The comparison on the underlying pointers.
*/
/** @{ */
template <typename T>
bool operator < (const Ptr<T> &lhs, const Ptr<T> &rhs);
template <typename T>
bool operator <= (const Ptr<T> &lhs, const Ptr<T> &rhs);
template <typename T>
bool operator > (const Ptr<T> &lhs, const Ptr<T> &rhs);
template <typename T>
bool operator >= (const Ptr<T> &lhs, const Ptr<T> &rhs);
/** @} */
/**
* Return a copy of \c p with its stored pointer const casted from
* \c T2 to \c T1.
*
* \tparam T1 \deduced The type to return in a Ptr.
* \tparam T2 \deduced The type of the underlying object.
* \param [in] p The original \c const Ptr.
* \return A non-const Ptr.
*/
template <typename T1, typename T2>
Ptr<T1> const_pointer_cast (Ptr<T2> const&p);
// Duplicate of struct CallbackTraits<T> as defined in callback.h
template <typename T>
struct CallbackTraits;
/**
* \ingroup callbackimpl
*
* Trait class to convert a pointer into a reference,
* used by MemPtrCallBackImpl.
*
* This is the specialization for Ptr types.
*
* \tparam T \deduced The base object type.
*/
template <typename T>
struct CallbackTraits<Ptr<T> >
{
/**
* \param [in] p Object pointer
* \return A reference to the object pointed to by p
*/
static T & GetReference (Ptr<T> const p)
{
return *PeekPointer (p);
}
};
// Duplicate of struct EventMemberImplObjTraits<T> as defined in make-event.h
// We repeat it here to declare a specialization on Ptr<T>
// without making this header dependent on make-event.h
template <typename T>
struct EventMemberImplObjTraits;
/**
* \ingroup makeeventmemptr
* Helper for the MakeEvent functions which take a class method.
*
* This is the specialization for Ptr types.
*
* \tparam T \explicit The class type.
*/
template <typename T>
struct EventMemberImplObjTraits<Ptr<T> >
{
/**
* \param [in] p Object pointer
* \return A reference to the object pointed to by p
*/
static T &GetReference (Ptr<T> p) {
return *PeekPointer (p);
}
};
} // namespace ns3
namespace ns3 {
/*************************************************
* friend non-member function implementations
************************************************/
template <typename T>
Ptr<T> Create (void)
{
return Ptr<T> (new T (), false);
}
template <typename T, typename T1>
Ptr<T> Create (T1 a1)
{
return Ptr<T> (new T (a1), false);
}
template <typename T, typename T1, typename T2>
Ptr<T> Create (T1 a1, T2 a2)
{
return Ptr<T> (new T (a1, a2), false);
}
template <typename T, typename T1, typename T2, typename T3>
Ptr<T> Create (T1 a1, T2 a2, T3 a3)
{
return Ptr<T> (new T (a1, a2, a3), false);
}
template <typename T, typename T1, typename T2, typename T3, typename T4>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4)
{
return Ptr<T> (new T (a1, a2, a3, a4), false);
}
template <typename T, typename T1, typename T2, typename T3, typename T4, typename T5>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4, T5 a5)
{
return Ptr<T> (new T (a1, a2, a3, a4, a5), false);
}
template <typename T, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6)
{
return Ptr<T> (new T (a1, a2, a3, a4, a5, a6), false);
}
template <typename T, typename T1, typename T2, typename T3, typename T4, typename T5, typename T6, typename T7>
Ptr<T> Create (T1 a1, T2 a2, T3 a3, T4 a4, T5 a5, T6 a6, T7 a7)
{
return Ptr<T> (new T (a1, a2, a3, a4, a5, a6, a7), false);
}
template <typename U>
U * PeekPointer (const Ptr<U> &p)
{
return p.m_ptr;
}
template <typename U>
U * GetPointer (const Ptr<U> &p)
{
p.Acquire ();
return p.m_ptr;
}
template <typename T>
std::ostream &operator << (std::ostream &os, const Ptr<T> &p)
{
os << PeekPointer (p);
return os;
}
template <typename T1, typename T2>
bool
operator == (Ptr<T1> const &lhs, T2 const *rhs)
{
return PeekPointer (lhs) == rhs;
}
template <typename T1, typename T2>
bool
operator == (T1 const *lhs, Ptr<T2> &rhs)
{
return lhs == PeekPointer (rhs);
}
template <typename T1, typename T2>
bool
operator != (Ptr<T1> const &lhs, T2 const *rhs)
{
return PeekPointer (lhs) != rhs;
}
template <typename T1, typename T2>
bool
operator != (T1 const *lhs, Ptr<T2> &rhs)
{
return lhs != PeekPointer (rhs);
}
template <typename T1, typename T2>
bool
operator == (Ptr<T1> const &lhs, Ptr<T2> const &rhs)
{
return PeekPointer (lhs) == PeekPointer (rhs);
}
template <typename T1, typename T2>
bool
operator != (Ptr<T1> const &lhs, Ptr<T2> const &rhs)
{
return PeekPointer (lhs) != PeekPointer (rhs);
}
template <typename T>
bool operator < (const Ptr<T> &lhs, const Ptr<T> &rhs)
{
return PeekPointer<T> (lhs) < PeekPointer<T> (rhs);
}
template <typename T>
bool operator <= (const Ptr<T> &lhs, const Ptr<T> &rhs)
{
return PeekPointer<T> (lhs) <= PeekPointer<T> (rhs);
}
template <typename T>
bool operator > (const Ptr<T> &lhs, const Ptr<T> &rhs)
{
return PeekPointer<T> (lhs) > PeekPointer<T> (rhs);
}
template <typename T>
bool operator >= (const Ptr<T> &lhs, const Ptr<T> &rhs)
{
return PeekPointer<T> (lhs) >= PeekPointer<T> (rhs);
}
/**
* Cast a Ptr.
*
* \tparam T1 \deduced The desired type to cast to.
* \tparam T2 \deduced The type of the original Ptr.
* \param [in] p The original Ptr.
* \return The result of the cast.
*/
/** @{ */
template <typename T1, typename T2>
Ptr<T1>
ConstCast (Ptr<T2> const&p)
{
return Ptr<T1> (const_cast<T1 *> (PeekPointer (p)));
}
template <typename T1, typename T2>
Ptr<T1>
DynamicCast (Ptr<T2> const&p)
{
return Ptr<T1> (dynamic_cast<T1 *> (PeekPointer (p)));
}
template <typename T1, typename T2>
Ptr<T1>
StaticCast (Ptr<T2> const&p)
{
return Ptr<T1> (static_cast<T1 *> (PeekPointer (p)));
}
/** @} */
/**
* Return a deep copy of a Ptr.
*
* \param [in] object The object Ptr to copy.
* \returns The copy.
*/
/** @{ */
template <typename T>
Ptr<T> Copy (Ptr<T> object)
{
Ptr<T> p = Ptr<T> (new T (*PeekPointer (object)), false);
return p;
}
template <typename T>
Ptr<T> Copy (Ptr<const T> object)
{
Ptr<T> p = Ptr<T> (new T (*PeekPointer (object)), false);
return p;
}
/** @} */
/****************************************************
* Member method implementations.
***************************************************/
template <typename T>
void
Ptr<T>::Acquire (void) const
{
if (m_ptr != 0)
{
m_ptr->Ref ();
}
}
template <typename T>
Ptr<T>::Ptr ()
: m_ptr (0)
{
}
template <typename T>
Ptr<T>::Ptr (T *ptr)
: m_ptr (ptr)
{
Acquire ();
}
template <typename T>
Ptr<T>::Ptr (T *ptr, bool ref)
: m_ptr (ptr)
{
if (ref)
{
Acquire ();
}
}
template <typename T>
Ptr<T>::Ptr (Ptr const&o)
: m_ptr (PeekPointer (o))
{
Acquire ();
}
template <typename T>
template <typename U>
Ptr<T>::Ptr (Ptr<U> const &o)
: m_ptr (PeekPointer (o))
{
Acquire ();
}
template <typename T>
Ptr<T>::~Ptr ()
{
if (m_ptr != 0)
{
m_ptr->Unref ();
}
}
template <typename T>
Ptr<T> &
Ptr<T>::operator = (Ptr const& o)
{
if (&o == this)
{
return *this;
}
if (m_ptr != 0)
{
m_ptr->Unref ();
}
m_ptr = o.m_ptr;
Acquire ();
return *this;
}
template <typename T>
T *
Ptr<T>::operator -> ()
{
return m_ptr;
}
template <typename T>
T *
Ptr<T>::operator -> () const
{
return m_ptr;
}
template <typename T>
T &
Ptr<T>::operator * () const
{
return *m_ptr;
}
template <typename T>
T &
Ptr<T>::operator * ()
{
return *m_ptr;
}
template <typename T>
bool
Ptr<T>::operator! ()
{
return m_ptr == 0;
}
template <typename T>
Ptr<T>::operator Tester * () const
{
if (m_ptr == 0)
{
return 0;
}
static Tester test;
return &test;
}
} // namespace ns3
#endif /* PTR_H */
|
teto/ns-3-dev-git
|
src/core/model/ptr.h
|
C
|
gpl-2.0
| 21,074
|
/*
* GPL HEADER START
*
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 only,
* as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License version 2 for more details (a copy is included
* in the LICENSE file that accompanied this code).
*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
* http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
* Copyright (c) 2011, 2014, Intel Corporation.
*
* Copyright 2015 Cray Inc, all rights reserved.
* Author: Ben Evans.
*
* We assume all nodes are either little-endian or big-endian, and we
* always send messages in the sender's native format. The receiver
* detects the message format by checking the 'magic' field of the message
* (see lustre_msg_swabbed() below).
*
* Each wire type has corresponding 'lustre_swab_xxxtypexxx()' routines
* are implemented in ptlrpc/lustre_swab.c. These 'swabbers' convert the
* type from "other" endian, in-place in the message buffer.
*
* A swabber takes a single pointer argument. The caller must already have
* verified that the length of the message buffer >= sizeof (type).
*
* For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
* may be defined that swabs just the variable part, after the caller has
* verified that the message buffer is large enough.
*/
#ifndef _LUSTRE_SWAB_H_
#define _LUSTRE_SWAB_H_
#include <lustre/lustre_idl.h>
void lustre_swab_orphan_ent(struct lu_orphan_ent *ent);
void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
void lustre_swab_connect(struct obd_connect_data *ocd);
void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
void lustre_swab_hsm_state_set(struct hsm_state_set *hss);
void lustre_swab_obd_statfs(struct obd_statfs *os);
void lustre_swab_obd_ioobj(struct obd_ioobj *ioo);
void lustre_swab_niobuf_remote(struct niobuf_remote *nbr);
void lustre_swab_ost_lvb_v1(struct ost_lvb_v1 *lvb);
void lustre_swab_ost_lvb(struct ost_lvb *lvb);
void lustre_swab_obd_quotactl(struct obd_quotactl *q);
void lustre_swab_quota_body(struct quota_body *b);
void lustre_swab_lquota_lvb(struct lquota_lvb *lvb);
void lustre_swab_generic_32s(__u32 *val);
void lustre_swab_mdt_body(struct mdt_body *b);
void lustre_swab_mdt_ioepoch(struct mdt_ioepoch *b);
void lustre_swab_mdt_remote_perm(struct mdt_remote_perm *p);
void lustre_swab_mdt_rec_setattr(struct mdt_rec_setattr *sa);
void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
void lustre_swab_lmv_desc(struct lmv_desc *ld);
void lustre_swab_lmv_mds_md(union lmv_mds_md *lmm);
void lustre_swab_lov_desc(struct lov_desc *ld);
void lustre_swab_ldlm_res_id(struct ldlm_res_id *id);
void lustre_swab_ldlm_policy_data(union ldlm_wire_policy_data *d);
void lustre_swab_gl_desc(union ldlm_gl_desc *);
void lustre_swab_ldlm_intent(struct ldlm_intent *i);
void lustre_swab_ldlm_resource_desc(struct ldlm_resource_desc *r);
void lustre_swab_ldlm_lock_desc(struct ldlm_lock_desc *l);
void lustre_swab_ldlm_request(struct ldlm_request *rq);
void lustre_swab_ldlm_reply(struct ldlm_reply *r);
void lustre_swab_mgs_target_info(struct mgs_target_info *oinfo);
void lustre_swab_mgs_nidtbl_entry(struct mgs_nidtbl_entry *oinfo);
void lustre_swab_mgs_config_body(struct mgs_config_body *body);
void lustre_swab_mgs_config_res(struct mgs_config_res *body);
void lustre_swab_lfsck_request(struct lfsck_request *lr);
void lustre_swab_lfsck_reply(struct lfsck_reply *lr);
void lustre_swab_obdo(struct obdo *o);
void lustre_swab_ost_body(struct ost_body *b);
void lustre_swab_ost_last_id(__u64 *id);
void lustre_swab_fiemap(struct fiemap *fiemap);
void lustre_swab_lov_user_md_v1(struct lov_user_md_v1 *lum);
void lustre_swab_lov_user_md_v3(struct lov_user_md_v3 *lum);
void lustre_swab_lov_user_md_objects(struct lov_user_ost_data *lod,
int stripe_count);
void lustre_swab_lov_mds_md(struct lov_mds_md *lmm);
void lustre_swab_idx_info(struct idx_info *ii);
void lustre_swab_lip_header(struct lu_idxpage *lip);
void lustre_swab_lustre_capa(struct lustre_capa *c);
void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
void lustre_swab_fid2path(struct getinfo_fid2path *gf);
void lustre_swab_layout_intent(struct layout_intent *li);
void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
void lustre_swab_hsm_current_action(struct hsm_current_action *action);
void lustre_swab_hsm_progress_kernel(struct hsm_progress_kernel *hpk);
void lustre_swab_hsm_user_state(struct hsm_user_state *hus);
void lustre_swab_hsm_user_item(struct hsm_user_item *hui);
void lustre_swab_hsm_request(struct hsm_request *hr);
void lustre_swab_object_update(struct object_update *ou);
void lustre_swab_object_update_request(struct object_update_request *our);
void lustre_swab_out_update_header(struct out_update_header *ouh);
void lustre_swab_out_update_buffer(struct out_update_buffer *oub);
void lustre_swab_object_update_result(struct object_update_result *our);
void lustre_swab_object_update_reply(struct object_update_reply *our);
void lustre_swab_swap_layouts(struct mdc_swap_layouts *msl);
void lustre_swab_close_data(struct close_data *data);
void lustre_swab_lmv_user_md(struct lmv_user_md *lum);
void lustre_swab_ladvise(struct lu_ladvise *ladvise);
void lustre_swab_ladvise_hdr(struct ladvise_hdr *ladvise_hdr);
#endif
|
tweag/lustre
|
lustre/include/lustre_swab.h
|
C
|
gpl-2.0
| 5,817
|
WARNINGS=-W -Wall -Wstrict-prototypes -Wmissing-prototypes -Waggregate-return \
-Wcast-align -Wcast-qual -Wnested-externs -Wshadow -Wbad-function-cast \
-Wwrite-strings
CFLAGS+=-ffast-math -fomit-frame-pointer -static -pthread
ifeq "$(DATATYPE)" ""
DATATYPE=double
endif
ifeq "$(DATATYPE)" "int16_t"
TYPEFLAGS=-DFIXED_POINT=16
endif
ifeq "$(DATATYPE)" "int32_t"
TYPEFLAGS=-DFIXED_POINT=32
endif
TYPEFLAGS=-Dkiss_fft_scalar=$(DATATYPE)
SRCFILES=dmic-test.c wav.c
all: clean dmic-test
dmic-test: $(SRCFILES)
mipsel-linux-gcc -o $@ $(CFLAGS) $(TYPEFLAGS) -DREAL_FASTFIR -lm -lpthread $+ -DFAST_FILT_UTIL
clean:
rm -rf dmic-test
|
IngenicSemiconductor/kernel-inwatch
|
tools/dmic-test/Makefile
|
Makefile
|
gpl-2.0
| 645
|
/*
* Hibernate, Relational Persistence for Idiomatic Java
*
* License: GNU Lesser General Public License (LGPL), version 2.1 or later
* See the lgpl.txt file in the root directory or http://www.gnu.org/licenses/lgpl-2.1.html
*/
package org.hibernate.jpa.event.spi;
import java.lang.annotation.Annotation;
import javax.persistence.PostLoad;
import javax.persistence.PostPersist;
import javax.persistence.PostRemove;
import javax.persistence.PostUpdate;
import javax.persistence.PrePersist;
import javax.persistence.PreRemove;
import javax.persistence.PreUpdate;
/**
* @author Steve Ebersole
*/
public enum CallbackType {
PRE_UPDATE( PreUpdate.class ),
POST_UPDATE( PostUpdate.class ),
PRE_PERSIST( PrePersist.class ),
POST_PERSIST( PostPersist.class ),
PRE_REMOVE( PreRemove.class ),
POST_REMOVE( PostRemove.class ),
POST_LOAD( PostLoad.class )
;
private Class<? extends Annotation> callbackAnnotation;
CallbackType(Class<? extends Annotation> callbackAnnotation) {
this.callbackAnnotation = callbackAnnotation;
}
public Class<? extends Annotation> getCallbackAnnotation() {
return callbackAnnotation;
}
}
|
lamsfoundation/lams
|
3rdParty_sources/hibernate-core/org/hibernate/jpa/event/spi/CallbackType.java
|
Java
|
gpl-2.0
| 1,134
|
package org.youscope.plugin.matlabscripting;
/*
* Copyright (c) 2010, Joshua Kaplan
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of matlabcontrol nor the names of its contributors may
* be used to endorse or promote products derived from this software
* without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
/**
* Allows for calling MATLAB from <b>within</b> MATLAB. All of the methods in this class can be
* called from any thread. The way methods are relayed to MATLAB differs depending on whether or not
* the methods were invoked on the main MATLAB thread; unexpected behavior may occur if methods are
* invoked from both the main MATLAB thread and any other thread. Any of the methods that are
* relayed to MATLAB may throw exceptions. Exceptions may be thrown if an internal MATLAB exception
* occurs.
*
* @author <a href="mailto:jak2@cs.brown.edu">Joshua Kaplan</a>
*/
public final class LocalMatlabProxy
{
/**
* The underlying wrapper to JMI.
*/
private final static JMIWrapper _wrapper = new JMIWrapper();
/**
* Private constructor so that this class cannot be constructed.
*/
private LocalMatlabProxy()
{
// Private constructor so that this class cannot be constructed.
}
/**
* Exits MATLAB.
*
* @throws MatlabInvocationException
*/
public static void exit() throws MatlabInvocationException
{
JMIWrapper.exit();
}
/**
* Evaluates a command in MATLAB. The result of this command will not be returned. <br>
* <br>
* This is equivalent to MATLAB's <code>eval(['command'])</code>.
*
* @param command the command to be evaluated in MATLAB
* @throws MatlabInvocationException
* @see #returningEval(String, int)
*/
public static void eval(String command) throws MatlabInvocationException
{
_wrapper.eval(command);
}
/**
* Evaluates a command in MATLAB. The result of this command can be returned. <br>
* <br>
* This is equivalent to MATLAB's <code>eval(['command'])</code>. <br>
* <br>
* In order for the result of this command to be returned the number of arguments to be returned
* must be specified by <code>returnCount</code>. If the command you are evaluating is a MATLAB
* function you can determine the amount of arguments it returns by using the
* <code>nargout</code> function in the MATLAB Command Window. If it returns -1 that means the
* function returns a variable number of arguments based on what you pass in. In that case, you
* will need to manually determine the number of arguments returned. If the number of arguments
* returned differs from <code>returnCount</code> then either <code>null</code> or an empty
* <code>String</code> will be returned.
*
* @param command the command to be evaluated in MATLAB
* @param returnCount the number of arguments that will be returned from evaluating the command
* @see #eval(String)
* @throws MatlabInvocationException
* @return result of MATLAB eval
*/
public static Object returningEval(String command, int returnCount)
throws MatlabInvocationException
{
return _wrapper.returningEval(command, returnCount);
}
/**
* Calls a MATLAB function with the name <code>functionName</code>. Arguments to the function
* may be provided as <code>args</code>, if you wish to call the function with no arguments pass
* in <code>null</code>. The result of this command will not be returned. <br>
* <br>
* The <code>Object</code>s in the array will be converted into MATLAB equivalents as
* appropriate. Importantly, this means that any <code>String</code> will be converted to a
* MATLAB char array, not a variable name.
*
* @param functionName name of the MATLAB function to call
* @param args the arguments to the function, <code>null</code> if none
* @throws MatlabInvocationException
* @see #returningFeval(String, Object[], int)
* @see #returningFeval(String, Object[])
*/
public static void feval(String functionName, Object[] args) throws MatlabInvocationException
{
_wrapper.feval(functionName, args);
}
/**
* Calls a MATLAB function with the name <code>functionName</code>. Arguments to the function
* may be provided as <code>args</code>, if you wish to call the function with no arguments pass
* in <code>null</code>. <br>
* <br>
* The <code>Object</code>s in the array will be converted into MATLAB equivalents as
* appropriate. Importantly, this means that any <code>String</code> will be converted to a
* MATLAB char array, not a variable name. <br>
* <br>
* The result of this function can be returned. In order for a function's return data to be
* returned to MATLAB it is necessary to know how many arguments will be returned. This method
* will attempt to determine that automatically, but in the case where a function has a variable
* number of arguments returned it will only return one of them. To have all of them returned
* use {@link #returningFeval(String, Object[], int)} and specify the number of arguments that
* will be returned.
*
* @param functionName name of the MATLAB function to call
* @param args the arguments to the function, <code>null</code> if none
* @see #feval(String, Object[])
* @see #returningFeval(String, Object[])
* @return result of MATLAB function
* @throws MatlabInvocationException
*/
public static Object returningFeval(String functionName, Object[] args)
throws MatlabInvocationException
{
return _wrapper.returningFeval(functionName, args);
}
/**
* Calls a MATLAB function with the name <code>functionName</code>. Arguments to the function
* may be provided as <code>args</code>, if you wish to call the function with no arguments pass
* in <code>null</code>. <br>
* <br>
* The <code>Object</code>s in the array will be converted into MATLAB equivalents as
* appropriate. Importantly, this means that any <code>String</code> will be converted to a
* MATLAB char array, not a variable name. <br>
* <br>
* The result of this function can be returned. In order for the result of this function to be
* returned the number of arguments to be returned must be specified by <code>returnCount</code>
* . You can use the <code>nargout</code> function in the MATLAB Command Window to determine the
* number of arguments that will be returned. If <code>nargout</code> returns -1 that means the
* function returns a variable number of arguments based on what you pass in. In that case, you
* will need to manually determine the number of arguments returned. If the number of arguments
* returned differs from <code>returnCount</code> then either only some of the items will be
* returned or <code>null</code> will be returned.
*
* @param functionName name of the MATLAB function to call
* @param args the arguments to the function, <code>null</code> if none
* @param returnCount the number of arguments that will be returned from this function
* @see #feval(String, Object[])
* @see #returningFeval(String, Object[])
* @return result of MATLAB function
* @throws MatlabInvocationException
*/
public static Object returningFeval(String functionName, Object[] args, int returnCount)
throws MatlabInvocationException
{
return _wrapper.returningFeval(functionName, args, returnCount);
}
/**
* Sets the variable to the given <code>value</code>.
*
* @param variableName
* @param value
* @throws MatlabInvocationException
*/
public static void setVariable(String variableName, Object value)
throws MatlabInvocationException
{
_wrapper.setVariable(variableName, value);
}
/**
* Gets the value of the variable named </code>variableName</code> from MATLAB.
*
* @param variableName
* @return value
* @throws MatlabInvocationException
*/
public static Object getVariable(String variableName) throws MatlabInvocationException
{
return _wrapper.getVariable(variableName);
}
/**
* Allows for enabling a diagnostic mode that will show in MATLAB each time a Java method that
* calls into MATLAB is invoked.
*
* @param echo
* @throws MatlabInvocationException
*/
public static void setEchoEval(boolean echo) throws MatlabInvocationException
{
JMIWrapper.setEchoEval(echo);
}
}
|
langmo/youscope
|
plugins/matlab-scripting/src/main/java/org/youscope/plugin/matlabscripting/LocalMatlabProxy.java
|
Java
|
gpl-2.0
| 10,055
|
/* Build expressions with type checking for C compiler.
Copyright (C) 1987, 1988, 1991, 1992, 1993, 1994, 1995, 1996, 1997, 1998,
1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
Free Software Foundation, Inc.
This file is part of GCC.
GCC is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free
Software Foundation; either version 3, or (at your option) any later
version.
GCC is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
for more details.
You should have received a copy of the GNU General Public License
along with GCC; see the file COPYING3. If not see
<http://www.gnu.org/licenses/>. */
/* This file is part of the C front end.
It contains routines to build C expressions given their operands,
including computing the types of the result, C-specific error checks,
and some optimization. */
#include "config.h"
#include "system.h"
#include "coretypes.h"
#include "tm.h"
#include "tree.h"
#include "langhooks.h"
#include "c-tree.h"
#include "c-lang.h"
#include "flags.h"
#include "output.h"
#include "intl.h"
#include "target.h"
#include "tree-iterator.h"
#include "bitmap.h"
#include "gimple.h"
#include "c-family/c-objc.h"
/* Possible cases of implicit bad conversions. Used to select
diagnostic messages in convert_for_assignment. */
enum impl_conv {
ic_argpass,
ic_assign,
ic_init,
ic_return
};
/* Possibe cases of scalar_to_vector conversion. */
enum stv_conv {
stv_error, /* Error occured. */
stv_nothing, /* Nothing happened. */
stv_firstarg, /* First argument must be expanded. */
stv_secondarg /* Second argument must be expanded. */
};
/* The level of nesting inside "__alignof__". */
int in_alignof;
/* The level of nesting inside "sizeof". */
int in_sizeof;
/* The level of nesting inside "typeof". */
int in_typeof;
/* Nonzero if we've already printed a "missing braces around initializer"
message within this initializer. */
static int missing_braces_mentioned;
static int require_constant_value;
static int require_constant_elements;
static bool null_pointer_constant_p (const_tree);
static tree qualify_type (tree, tree);
static int tagged_types_tu_compatible_p (const_tree, const_tree, bool *,
bool *);
static int comp_target_types (location_t, tree, tree);
static int function_types_compatible_p (const_tree, const_tree, bool *,
bool *);
static int type_lists_compatible_p (const_tree, const_tree, bool *, bool *);
static tree lookup_field (tree, tree);
static int convert_arguments (tree, VEC(tree,gc) *, VEC(tree,gc) *, tree,
tree);
static tree pointer_diff (location_t, tree, tree);
static tree convert_for_assignment (location_t, tree, tree, tree,
enum impl_conv, bool, tree, tree, int);
static tree valid_compound_expr_initializer (tree, tree);
static void push_string (const char *);
static void push_member_name (tree);
static int spelling_length (void);
static char *print_spelling (char *);
static void warning_init (int, const char *);
static tree digest_init (location_t, tree, tree, tree, bool, bool, int);
static void output_init_element (tree, tree, bool, tree, tree, int, bool,
struct obstack *);
static void output_pending_init_elements (int, struct obstack *);
static int set_designator (int, struct obstack *);
static void push_range_stack (tree, struct obstack *);
static void add_pending_init (tree, tree, tree, bool, struct obstack *);
static void set_nonincremental_init (struct obstack *);
static void set_nonincremental_init_from_string (tree, struct obstack *);
static tree find_init_member (tree, struct obstack *);
static void readonly_warning (tree, enum lvalue_use);
static int lvalue_or_else (location_t, const_tree, enum lvalue_use);
static void record_maybe_used_decl (tree);
static int comptypes_internal (const_tree, const_tree, bool *, bool *);
/* Return true if EXP is a null pointer constant, false otherwise. */
static bool
null_pointer_constant_p (const_tree expr)
{
/* This should really operate on c_expr structures, but they aren't
yet available everywhere required. */
tree type = TREE_TYPE (expr);
return (TREE_CODE (expr) == INTEGER_CST
&& !TREE_OVERFLOW (expr)
&& integer_zerop (expr)
&& (INTEGRAL_TYPE_P (type)
|| (TREE_CODE (type) == POINTER_TYPE
&& VOID_TYPE_P (TREE_TYPE (type))
&& TYPE_QUALS (TREE_TYPE (type)) == TYPE_UNQUALIFIED)));
}
/* EXPR may appear in an unevaluated part of an integer constant
expression, but not in an evaluated part. Wrap it in a
C_MAYBE_CONST_EXPR, or mark it with TREE_OVERFLOW if it is just an
INTEGER_CST and we cannot create a C_MAYBE_CONST_EXPR. */
static tree
note_integer_operands (tree expr)
{
tree ret;
if (TREE_CODE (expr) == INTEGER_CST && in_late_binary_op)
{
ret = copy_node (expr);
TREE_OVERFLOW (ret) = 1;
}
else
{
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (expr), NULL_TREE, expr);
C_MAYBE_CONST_EXPR_INT_OPERANDS (ret) = 1;
}
return ret;
}
/* Having checked whether EXPR may appear in an unevaluated part of an
integer constant expression and found that it may, remove any
C_MAYBE_CONST_EXPR noting this fact and return the resulting
expression. */
static inline tree
remove_c_maybe_const_expr (tree expr)
{
if (TREE_CODE (expr) == C_MAYBE_CONST_EXPR)
return C_MAYBE_CONST_EXPR_EXPR (expr);
else
return expr;
}
/* This is a cache to hold if two types are compatible or not. */
struct tagged_tu_seen_cache {
const struct tagged_tu_seen_cache * next;
const_tree t1;
const_tree t2;
/* The return value of tagged_types_tu_compatible_p if we had seen
these two types already. */
int val;
};
static const struct tagged_tu_seen_cache * tagged_tu_seen_base;
static void free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *);
/* Do `exp = require_complete_type (exp);' to make sure exp
does not have an incomplete type. (That includes void types.) */
tree
require_complete_type (tree value)
{
tree type = TREE_TYPE (value);
if (value == error_mark_node || type == error_mark_node)
return error_mark_node;
/* First, detect a valid value with a complete type. */
if (COMPLETE_TYPE_P (type))
return value;
c_incomplete_type_error (value, type);
return error_mark_node;
}
/* Print an error message for invalid use of an incomplete type.
VALUE is the expression that was used (or 0 if that isn't known)
and TYPE is the type that was invalid. */
void
c_incomplete_type_error (const_tree value, const_tree type)
{
const char *type_code_string;
/* Avoid duplicate error message. */
if (TREE_CODE (type) == ERROR_MARK)
return;
if (value != 0 && (TREE_CODE (value) == VAR_DECL
|| TREE_CODE (value) == PARM_DECL))
error ("%qD has an incomplete type", value);
else
{
retry:
/* We must print an error message. Be clever about what it says. */
switch (TREE_CODE (type))
{
case RECORD_TYPE:
type_code_string = "struct";
break;
case UNION_TYPE:
type_code_string = "union";
break;
case ENUMERAL_TYPE:
type_code_string = "enum";
break;
case VOID_TYPE:
error ("invalid use of void expression");
return;
case ARRAY_TYPE:
if (TYPE_DOMAIN (type))
{
if (TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL)
{
error ("invalid use of flexible array member");
return;
}
type = TREE_TYPE (type);
goto retry;
}
error ("invalid use of array with unspecified bounds");
return;
default:
gcc_unreachable ();
}
if (TREE_CODE (TYPE_NAME (type)) == IDENTIFIER_NODE)
error ("invalid use of undefined type %<%s %E%>",
type_code_string, TYPE_NAME (type));
else
/* If this type has a typedef-name, the TYPE_NAME is a TYPE_DECL. */
error ("invalid use of incomplete typedef %qD", TYPE_NAME (type));
}
}
/* Given a type, apply default promotions wrt unnamed function
arguments and return the new type. */
tree
c_type_promotes_to (tree type)
{
if (TYPE_MAIN_VARIANT (type) == float_type_node)
return double_type_node;
if (c_promoting_integer_type_p (type))
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
&& (TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node)))
return unsigned_type_node;
return integer_type_node;
}
return type;
}
/* Return true if between two named address spaces, whether there is a superset
named address space that encompasses both address spaces. If there is a
superset, return which address space is the superset. */
static bool
addr_space_superset (addr_space_t as1, addr_space_t as2, addr_space_t *common)
{
if (as1 == as2)
{
*common = as1;
return true;
}
else if (targetm.addr_space.subset_p (as1, as2))
{
*common = as2;
return true;
}
else if (targetm.addr_space.subset_p (as2, as1))
{
*common = as1;
return true;
}
else
return false;
}
/* Return a variant of TYPE which has all the type qualifiers of LIKE
as well as those of TYPE. */
static tree
qualify_type (tree type, tree like)
{
addr_space_t as_type = TYPE_ADDR_SPACE (type);
addr_space_t as_like = TYPE_ADDR_SPACE (like);
addr_space_t as_common;
/* If the two named address spaces are different, determine the common
superset address space. If there isn't one, raise an error. */
if (!addr_space_superset (as_type, as_like, &as_common))
{
as_common = as_type;
error ("%qT and %qT are in disjoint named address spaces",
type, like);
}
return c_build_qualified_type (type,
TYPE_QUALS_NO_ADDR_SPACE (type)
| TYPE_QUALS_NO_ADDR_SPACE (like)
| ENCODE_QUAL_ADDR_SPACE (as_common));
}
/* Return true iff the given tree T is a variable length array. */
bool
c_vla_type_p (const_tree t)
{
if (TREE_CODE (t) == ARRAY_TYPE
&& C_TYPE_VARIABLE_SIZE (t))
return true;
return false;
}
/* Return the composite type of two compatible types.
We assume that comptypes has already been done and returned
nonzero; if that isn't so, this may crash. In particular, we
assume that qualifiers match. */
tree
composite_type (tree t1, tree t2)
{
enum tree_code code1;
enum tree_code code2;
tree attributes;
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
code1 = TREE_CODE (t1);
code2 = TREE_CODE (t2);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
/* If one is an enumerated type and the other is the compatible
integer type, the composite type might be either of the two
(DR#013 question 3). For consistency, use the enumerated type as
the composite type. */
if (code1 == ENUMERAL_TYPE && code2 == INTEGER_TYPE)
return t1;
if (code2 == ENUMERAL_TYPE && code1 == INTEGER_TYPE)
return t2;
gcc_assert (code1 == code2);
switch (code1)
{
case POINTER_TYPE:
/* For two pointers, do this recursively on the target type. */
{
tree pointed_to_1 = TREE_TYPE (t1);
tree pointed_to_2 = TREE_TYPE (t2);
tree target = composite_type (pointed_to_1, pointed_to_2);
t1 = build_pointer_type_for_mode (target, TYPE_MODE (t1), false);
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
case ARRAY_TYPE:
{
tree elt = composite_type (TREE_TYPE (t1), TREE_TYPE (t2));
int quals;
tree unqual_elt;
tree d1 = TYPE_DOMAIN (t1);
tree d2 = TYPE_DOMAIN (t2);
bool d1_variable, d2_variable;
bool d1_zero, d2_zero;
bool t1_complete, t2_complete;
/* We should not have any type quals on arrays at all. */
gcc_assert (!TYPE_QUALS_NO_ADDR_SPACE (t1)
&& !TYPE_QUALS_NO_ADDR_SPACE (t2));
t1_complete = COMPLETE_TYPE_P (t1);
t2_complete = COMPLETE_TYPE_P (t2);
d1_zero = d1 == 0 || !TYPE_MAX_VALUE (d1);
d2_zero = d2 == 0 || !TYPE_MAX_VALUE (d2);
d1_variable = (!d1_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST));
d2_variable = (!d2_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST));
d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1));
d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2));
/* Save space: see if the result is identical to one of the args. */
if (elt == TREE_TYPE (t1) && TYPE_DOMAIN (t1)
&& (d2_variable || d2_zero || !d1_variable))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && TYPE_DOMAIN (t2)
&& (d1_variable || d1_zero || !d2_variable))
return build_type_attribute_variant (t2, attributes);
if (elt == TREE_TYPE (t1) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t1, attributes);
if (elt == TREE_TYPE (t2) && !TYPE_DOMAIN (t2) && !TYPE_DOMAIN (t1))
return build_type_attribute_variant (t2, attributes);
/* Merge the element types, and have a size if either arg has
one. We may have qualifiers on the element types. To set
up TYPE_MAIN_VARIANT correctly, we need to form the
composite of the unqualified types and add the qualifiers
back at the end. */
quals = TYPE_QUALS (strip_array_types (elt));
unqual_elt = c_build_qualified_type (elt, TYPE_UNQUALIFIED);
t1 = build_array_type (unqual_elt,
TYPE_DOMAIN ((TYPE_DOMAIN (t1)
&& (d2_variable
|| d2_zero
|| !d1_variable))
? t1
: t2));
/* Ensure a composite type involving a zero-length array type
is a zero-length type not an incomplete type. */
if (d1_zero && d2_zero
&& (t1_complete || t2_complete)
&& !COMPLETE_TYPE_P (t1))
{
TYPE_SIZE (t1) = bitsize_zero_node;
TYPE_SIZE_UNIT (t1) = size_zero_node;
}
t1 = c_build_qualified_type (t1, quals);
return build_type_attribute_variant (t1, attributes);
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (attributes != NULL)
{
/* Try harder not to create a new aggregate type. */
if (attribute_list_equal (TYPE_ATTRIBUTES (t1), attributes))
return t1;
if (attribute_list_equal (TYPE_ATTRIBUTES (t2), attributes))
return t2;
}
return build_type_attribute_variant (t1, attributes);
case FUNCTION_TYPE:
/* Function types: prefer the one that specified arg types.
If both do, merge the arg types. Also merge the return types. */
{
tree valtype = composite_type (TREE_TYPE (t1), TREE_TYPE (t2));
tree p1 = TYPE_ARG_TYPES (t1);
tree p2 = TYPE_ARG_TYPES (t2);
int len;
tree newargs, n;
int i;
/* Save space: see if the result is identical to one of the args. */
if (valtype == TREE_TYPE (t1) && !TYPE_ARG_TYPES (t2))
return build_type_attribute_variant (t1, attributes);
if (valtype == TREE_TYPE (t2) && !TYPE_ARG_TYPES (t1))
return build_type_attribute_variant (t2, attributes);
/* Simple way if one arg fails to specify argument types. */
if (TYPE_ARG_TYPES (t1) == 0)
{
t1 = build_function_type (valtype, TYPE_ARG_TYPES (t2));
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
if (TYPE_ARG_TYPES (t2) == 0)
{
t1 = build_function_type (valtype, TYPE_ARG_TYPES (t1));
t1 = build_type_attribute_variant (t1, attributes);
return qualify_type (t1, t2);
}
/* If both args specify argument types, we must merge the two
lists, argument by argument. */
len = list_length (p1);
newargs = 0;
for (i = 0; i < len; i++)
newargs = tree_cons (NULL_TREE, NULL_TREE, newargs);
n = newargs;
for (; p1;
p1 = TREE_CHAIN (p1), p2 = TREE_CHAIN (p2), n = TREE_CHAIN (n))
{
/* A null type means arg type is not specified.
Take whatever the other function type has. */
if (TREE_VALUE (p1) == 0)
{
TREE_VALUE (n) = TREE_VALUE (p2);
goto parm_done;
}
if (TREE_VALUE (p2) == 0)
{
TREE_VALUE (n) = TREE_VALUE (p1);
goto parm_done;
}
/* Given wait (union {union wait *u; int *i} *)
and wait (union wait *),
prefer union wait * as type of parm. */
if (TREE_CODE (TREE_VALUE (p1)) == UNION_TYPE
&& TREE_VALUE (p1) != TREE_VALUE (p2))
{
tree memb;
tree mv2 = TREE_VALUE (p2);
if (mv2 && mv2 != error_mark_node
&& TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (mv2);
for (memb = TYPE_FIELDS (TREE_VALUE (p1));
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes (mv3, mv2))
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p2));
pedwarn (input_location, OPT_pedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
}
}
if (TREE_CODE (TREE_VALUE (p2)) == UNION_TYPE
&& TREE_VALUE (p2) != TREE_VALUE (p1))
{
tree memb;
tree mv1 = TREE_VALUE (p1);
if (mv1 && mv1 != error_mark_node
&& TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (mv1);
for (memb = TYPE_FIELDS (TREE_VALUE (p2));
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes (mv3, mv1))
{
TREE_VALUE (n) = composite_type (TREE_TYPE (memb),
TREE_VALUE (p1));
pedwarn (input_location, OPT_pedantic,
"function types not truly compatible in ISO C");
goto parm_done;
}
}
}
TREE_VALUE (n) = composite_type (TREE_VALUE (p1), TREE_VALUE (p2));
parm_done: ;
}
t1 = build_function_type (valtype, newargs);
t1 = qualify_type (t1, t2);
/* ... falls through ... */
}
default:
return build_type_attribute_variant (t1, attributes);
}
}
/* Return the type of a conditional expression between pointers to
possibly differently qualified versions of compatible types.
We assume that comp_target_types has already been done and returned
nonzero; if that isn't so, this may crash. */
static tree
common_pointer_type (tree t1, tree t2)
{
tree attributes;
tree pointed_to_1, mv1;
tree pointed_to_2, mv2;
tree target;
unsigned target_quals;
addr_space_t as1, as2, as_common;
int quals1, quals2;
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
gcc_assert (TREE_CODE (t1) == POINTER_TYPE
&& TREE_CODE (t2) == POINTER_TYPE);
/* Merge the attributes. */
attributes = targetm.merge_type_attributes (t1, t2);
/* Find the composite type of the target types, and combine the
qualifiers of the two types' targets. Do not lose qualifiers on
array element types by taking the TYPE_MAIN_VARIANT. */
mv1 = pointed_to_1 = TREE_TYPE (t1);
mv2 = pointed_to_2 = TREE_TYPE (t2);
if (TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (pointed_to_1);
if (TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (pointed_to_2);
target = composite_type (mv1, mv2);
/* For function types do not merge const qualifiers, but drop them
if used inconsistently. The middle-end uses these to mark const
and noreturn functions. */
quals1 = TYPE_QUALS_NO_ADDR_SPACE (pointed_to_1);
quals2 = TYPE_QUALS_NO_ADDR_SPACE (pointed_to_2);
if (TREE_CODE (pointed_to_1) == FUNCTION_TYPE)
target_quals = (quals1 & quals2);
else
target_quals = (quals1 | quals2);
/* If the two named address spaces are different, determine the common
superset address space. This is guaranteed to exist due to the
assumption that comp_target_type returned non-zero. */
as1 = TYPE_ADDR_SPACE (pointed_to_1);
as2 = TYPE_ADDR_SPACE (pointed_to_2);
if (!addr_space_superset (as1, as2, &as_common))
gcc_unreachable ();
target_quals |= ENCODE_QUAL_ADDR_SPACE (as_common);
t1 = build_pointer_type (c_build_qualified_type (target, target_quals));
return build_type_attribute_variant (t1, attributes);
}
/* Return the common type for two arithmetic types under the usual
arithmetic conversions. The default conversions have already been
applied, and enumerated types converted to their compatible integer
types. The resulting type is unqualified and has no attributes.
This is the type for the result of most arithmetic operations
if the operands have the given two types. */
static tree
c_common_type (tree t1, tree t2)
{
enum tree_code code1;
enum tree_code code2;
/* If one type is nonsense, use the other. */
if (t1 == error_mark_node)
return t2;
if (t2 == error_mark_node)
return t1;
if (TYPE_QUALS (t1) != TYPE_UNQUALIFIED)
t1 = TYPE_MAIN_VARIANT (t1);
if (TYPE_QUALS (t2) != TYPE_UNQUALIFIED)
t2 = TYPE_MAIN_VARIANT (t2);
if (TYPE_ATTRIBUTES (t1) != NULL_TREE)
t1 = build_type_attribute_variant (t1, NULL_TREE);
if (TYPE_ATTRIBUTES (t2) != NULL_TREE)
t2 = build_type_attribute_variant (t2, NULL_TREE);
/* Save time if the two types are the same. */
if (t1 == t2) return t1;
code1 = TREE_CODE (t1);
code2 = TREE_CODE (t2);
gcc_assert (code1 == VECTOR_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == REAL_TYPE
|| code1 == INTEGER_TYPE);
gcc_assert (code2 == VECTOR_TYPE || code2 == COMPLEX_TYPE
|| code2 == FIXED_POINT_TYPE || code2 == REAL_TYPE
|| code2 == INTEGER_TYPE);
/* When one operand is a decimal float type, the other operand cannot be
a generic float type or a complex type. We also disallow vector types
here. */
if ((DECIMAL_FLOAT_TYPE_P (t1) || DECIMAL_FLOAT_TYPE_P (t2))
&& !(DECIMAL_FLOAT_TYPE_P (t1) && DECIMAL_FLOAT_TYPE_P (t2)))
{
if (code1 == VECTOR_TYPE || code2 == VECTOR_TYPE)
{
error ("can%'t mix operands of decimal float and vector types");
return error_mark_node;
}
if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
{
error ("can%'t mix operands of decimal float and complex types");
return error_mark_node;
}
if (code1 == REAL_TYPE && code2 == REAL_TYPE)
{
error ("can%'t mix operands of decimal float and other float types");
return error_mark_node;
}
}
/* If one type is a vector type, return that type. (How the usual
arithmetic conversions apply to the vector types extension is not
precisely specified.) */
if (code1 == VECTOR_TYPE)
return t1;
if (code2 == VECTOR_TYPE)
return t2;
/* If one type is complex, form the common type of the non-complex
components, then make that complex. Use T1 or T2 if it is the
required type. */
if (code1 == COMPLEX_TYPE || code2 == COMPLEX_TYPE)
{
tree subtype1 = code1 == COMPLEX_TYPE ? TREE_TYPE (t1) : t1;
tree subtype2 = code2 == COMPLEX_TYPE ? TREE_TYPE (t2) : t2;
tree subtype = c_common_type (subtype1, subtype2);
if (code1 == COMPLEX_TYPE && TREE_TYPE (t1) == subtype)
return t1;
else if (code2 == COMPLEX_TYPE && TREE_TYPE (t2) == subtype)
return t2;
else
return build_complex_type (subtype);
}
/* If only one is real, use it as the result. */
if (code1 == REAL_TYPE && code2 != REAL_TYPE)
return t1;
if (code2 == REAL_TYPE && code1 != REAL_TYPE)
return t2;
/* If both are real and either are decimal floating point types, use
the decimal floating point type with the greater precision. */
if (code1 == REAL_TYPE && code2 == REAL_TYPE)
{
if (TYPE_MAIN_VARIANT (t1) == dfloat128_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat128_type_node)
return dfloat128_type_node;
else if (TYPE_MAIN_VARIANT (t1) == dfloat64_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat64_type_node)
return dfloat64_type_node;
else if (TYPE_MAIN_VARIANT (t1) == dfloat32_type_node
|| TYPE_MAIN_VARIANT (t2) == dfloat32_type_node)
return dfloat32_type_node;
}
/* Deal with fixed-point types. */
if (code1 == FIXED_POINT_TYPE || code2 == FIXED_POINT_TYPE)
{
unsigned int unsignedp = 0, satp = 0;
enum machine_mode m1, m2;
unsigned int fbit1, ibit1, fbit2, ibit2, max_fbit, max_ibit;
m1 = TYPE_MODE (t1);
m2 = TYPE_MODE (t2);
/* If one input type is saturating, the result type is saturating. */
if (TYPE_SATURATING (t1) || TYPE_SATURATING (t2))
satp = 1;
/* If both fixed-point types are unsigned, the result type is unsigned.
When mixing fixed-point and integer types, follow the sign of the
fixed-point type.
Otherwise, the result type is signed. */
if ((TYPE_UNSIGNED (t1) && TYPE_UNSIGNED (t2)
&& code1 == FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE)
|| (code1 == FIXED_POINT_TYPE && code2 != FIXED_POINT_TYPE
&& TYPE_UNSIGNED (t1))
|| (code1 != FIXED_POINT_TYPE && code2 == FIXED_POINT_TYPE
&& TYPE_UNSIGNED (t2)))
unsignedp = 1;
/* The result type is signed. */
if (unsignedp == 0)
{
/* If the input type is unsigned, we need to convert to the
signed type. */
if (code1 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t1))
{
enum mode_class mclass = (enum mode_class) 0;
if (GET_MODE_CLASS (m1) == MODE_UFRACT)
mclass = MODE_FRACT;
else if (GET_MODE_CLASS (m1) == MODE_UACCUM)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
m1 = mode_for_size (GET_MODE_PRECISION (m1), mclass, 0);
}
if (code2 == FIXED_POINT_TYPE && TYPE_UNSIGNED (t2))
{
enum mode_class mclass = (enum mode_class) 0;
if (GET_MODE_CLASS (m2) == MODE_UFRACT)
mclass = MODE_FRACT;
else if (GET_MODE_CLASS (m2) == MODE_UACCUM)
mclass = MODE_ACCUM;
else
gcc_unreachable ();
m2 = mode_for_size (GET_MODE_PRECISION (m2), mclass, 0);
}
}
if (code1 == FIXED_POINT_TYPE)
{
fbit1 = GET_MODE_FBIT (m1);
ibit1 = GET_MODE_IBIT (m1);
}
else
{
fbit1 = 0;
/* Signed integers need to subtract one sign bit. */
ibit1 = TYPE_PRECISION (t1) - (!TYPE_UNSIGNED (t1));
}
if (code2 == FIXED_POINT_TYPE)
{
fbit2 = GET_MODE_FBIT (m2);
ibit2 = GET_MODE_IBIT (m2);
}
else
{
fbit2 = 0;
/* Signed integers need to subtract one sign bit. */
ibit2 = TYPE_PRECISION (t2) - (!TYPE_UNSIGNED (t2));
}
max_ibit = ibit1 >= ibit2 ? ibit1 : ibit2;
max_fbit = fbit1 >= fbit2 ? fbit1 : fbit2;
return c_common_fixed_point_type_for_size (max_ibit, max_fbit, unsignedp,
satp);
}
/* Both real or both integers; use the one with greater precision. */
if (TYPE_PRECISION (t1) > TYPE_PRECISION (t2))
return t1;
else if (TYPE_PRECISION (t2) > TYPE_PRECISION (t1))
return t2;
/* Same precision. Prefer long longs to longs to ints when the
same precision, following the C99 rules on integer type rank
(which are equivalent to the C90 rules for C90 types). */
if (TYPE_MAIN_VARIANT (t1) == long_long_unsigned_type_node
|| TYPE_MAIN_VARIANT (t2) == long_long_unsigned_type_node)
return long_long_unsigned_type_node;
if (TYPE_MAIN_VARIANT (t1) == long_long_integer_type_node
|| TYPE_MAIN_VARIANT (t2) == long_long_integer_type_node)
{
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_long_unsigned_type_node;
else
return long_long_integer_type_node;
}
if (TYPE_MAIN_VARIANT (t1) == long_unsigned_type_node
|| TYPE_MAIN_VARIANT (t2) == long_unsigned_type_node)
return long_unsigned_type_node;
if (TYPE_MAIN_VARIANT (t1) == long_integer_type_node
|| TYPE_MAIN_VARIANT (t2) == long_integer_type_node)
{
/* But preserve unsignedness from the other type,
since long cannot hold all the values of an unsigned int. */
if (TYPE_UNSIGNED (t1) || TYPE_UNSIGNED (t2))
return long_unsigned_type_node;
else
return long_integer_type_node;
}
/* Likewise, prefer long double to double even if same size. */
if (TYPE_MAIN_VARIANT (t1) == long_double_type_node
|| TYPE_MAIN_VARIANT (t2) == long_double_type_node)
return long_double_type_node;
/* Otherwise prefer the unsigned one. */
if (TYPE_UNSIGNED (t1))
return t1;
else
return t2;
}
/* Wrapper around c_common_type that is used by c-common.c and other
front end optimizations that remove promotions. ENUMERAL_TYPEs
are allowed here and are converted to their compatible integer types.
BOOLEAN_TYPEs are allowed here and return either boolean_type_node or
preferably a non-Boolean type as the common type. */
tree
common_type (tree t1, tree t2)
{
if (TREE_CODE (t1) == ENUMERAL_TYPE)
t1 = c_common_type_for_size (TYPE_PRECISION (t1), 1);
if (TREE_CODE (t2) == ENUMERAL_TYPE)
t2 = c_common_type_for_size (TYPE_PRECISION (t2), 1);
/* If both types are BOOLEAN_TYPE, then return boolean_type_node. */
if (TREE_CODE (t1) == BOOLEAN_TYPE
&& TREE_CODE (t2) == BOOLEAN_TYPE)
return boolean_type_node;
/* If either type is BOOLEAN_TYPE, then return the other. */
if (TREE_CODE (t1) == BOOLEAN_TYPE)
return t2;
if (TREE_CODE (t2) == BOOLEAN_TYPE)
return t1;
return c_common_type (t1, t2);
}
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. */
int
comptypes (tree type1, tree type2)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, NULL, NULL);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Like comptypes, but if it returns non-zero because enum and int are
compatible, it sets *ENUM_AND_INT_P to true. */
static int
comptypes_check_enum_int (tree type1, tree type2, bool *enum_and_int_p)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, enum_and_int_p, NULL);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Like comptypes, but if it returns nonzero for different types, it
sets *DIFFERENT_TYPES_P to true. */
int
comptypes_check_different_types (tree type1, tree type2,
bool *different_types_p)
{
const struct tagged_tu_seen_cache * tagged_tu_seen_base1 = tagged_tu_seen_base;
int val;
val = comptypes_internal (type1, type2, NULL, different_types_p);
free_all_tagged_tu_seen_up_to (tagged_tu_seen_base1);
return val;
}
/* Return 1 if TYPE1 and TYPE2 are compatible types for assignment
or various other operations. Return 2 if they are compatible
but a warning may be needed if you use them together. If
ENUM_AND_INT_P is not NULL, and one type is an enum and the other a
compatible integer type, then this sets *ENUM_AND_INT_P to true;
*ENUM_AND_INT_P is never set to false. If DIFFERENT_TYPES_P is not
NULL, and the types are compatible but different enough not to be
permitted in C11 typedef redeclarations, then this sets
*DIFFERENT_TYPES_P to true; *DIFFERENT_TYPES_P is never set to
false, but may or may not be set if the types are incompatible.
This differs from comptypes, in that we don't free the seen
types. */
static int
comptypes_internal (const_tree type1, const_tree type2, bool *enum_and_int_p,
bool *different_types_p)
{
const_tree t1 = type1;
const_tree t2 = type2;
int attrval, val;
/* Suppress errors caused by previously reported errors. */
if (t1 == t2 || !t1 || !t2
|| TREE_CODE (t1) == ERROR_MARK || TREE_CODE (t2) == ERROR_MARK)
return 1;
/* Enumerated types are compatible with integer types, but this is
not transitive: two enumerated types in the same translation unit
are compatible with each other only if they are the same type. */
if (TREE_CODE (t1) == ENUMERAL_TYPE && TREE_CODE (t2) != ENUMERAL_TYPE)
{
t1 = c_common_type_for_size (TYPE_PRECISION (t1), TYPE_UNSIGNED (t1));
if (TREE_CODE (t2) != VOID_TYPE)
{
if (enum_and_int_p != NULL)
*enum_and_int_p = true;
if (different_types_p != NULL)
*different_types_p = true;
}
}
else if (TREE_CODE (t2) == ENUMERAL_TYPE && TREE_CODE (t1) != ENUMERAL_TYPE)
{
t2 = c_common_type_for_size (TYPE_PRECISION (t2), TYPE_UNSIGNED (t2));
if (TREE_CODE (t1) != VOID_TYPE)
{
if (enum_and_int_p != NULL)
*enum_and_int_p = true;
if (different_types_p != NULL)
*different_types_p = true;
}
}
if (t1 == t2)
return 1;
/* Different classes of types can't be compatible. */
if (TREE_CODE (t1) != TREE_CODE (t2))
return 0;
/* Qualifiers must match. C99 6.7.3p9 */
if (TYPE_QUALS (t1) != TYPE_QUALS (t2))
return 0;
/* Allow for two different type nodes which have essentially the same
definition. Note that we already checked for equality of the type
qualifiers (just above). */
if (TREE_CODE (t1) != ARRAY_TYPE
&& TYPE_MAIN_VARIANT (t1) == TYPE_MAIN_VARIANT (t2))
return 1;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
if (!(attrval = comp_type_attributes (t1, t2)))
return 0;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
val = 0;
switch (TREE_CODE (t1))
{
case POINTER_TYPE:
/* Do not remove mode or aliasing information. */
if (TYPE_MODE (t1) != TYPE_MODE (t2)
|| TYPE_REF_CAN_ALIAS_ALL (t1) != TYPE_REF_CAN_ALIAS_ALL (t2))
break;
val = (TREE_TYPE (t1) == TREE_TYPE (t2)
? 1 : comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
case FUNCTION_TYPE:
val = function_types_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
break;
case ARRAY_TYPE:
{
tree d1 = TYPE_DOMAIN (t1);
tree d2 = TYPE_DOMAIN (t2);
bool d1_variable, d2_variable;
bool d1_zero, d2_zero;
val = 1;
/* Target types must match incl. qualifiers. */
if (TREE_TYPE (t1) != TREE_TYPE (t2)
&& 0 == (val = comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p,
different_types_p)))
return 0;
if (different_types_p != NULL
&& (d1 == 0) != (d2 == 0))
*different_types_p = true;
/* Sizes must match unless one is missing or variable. */
if (d1 == 0 || d2 == 0 || d1 == d2)
break;
d1_zero = !TYPE_MAX_VALUE (d1);
d2_zero = !TYPE_MAX_VALUE (d2);
d1_variable = (!d1_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d1)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d1)) != INTEGER_CST));
d2_variable = (!d2_zero
&& (TREE_CODE (TYPE_MIN_VALUE (d2)) != INTEGER_CST
|| TREE_CODE (TYPE_MAX_VALUE (d2)) != INTEGER_CST));
d1_variable = d1_variable || (d1_zero && c_vla_type_p (t1));
d2_variable = d2_variable || (d2_zero && c_vla_type_p (t2));
if (different_types_p != NULL
&& d1_variable != d2_variable)
*different_types_p = true;
if (d1_variable || d2_variable)
break;
if (d1_zero && d2_zero)
break;
if (d1_zero || d2_zero
|| !tree_int_cst_equal (TYPE_MIN_VALUE (d1), TYPE_MIN_VALUE (d2))
|| !tree_int_cst_equal (TYPE_MAX_VALUE (d1), TYPE_MAX_VALUE (d2)))
val = 0;
break;
}
case ENUMERAL_TYPE:
case RECORD_TYPE:
case UNION_TYPE:
if (val != 1 && !same_translation_unit_p (t1, t2))
{
tree a1 = TYPE_ATTRIBUTES (t1);
tree a2 = TYPE_ATTRIBUTES (t2);
if (! attribute_list_contained (a1, a2)
&& ! attribute_list_contained (a2, a1))
break;
if (attrval != 2)
return tagged_types_tu_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
val = tagged_types_tu_compatible_p (t1, t2, enum_and_int_p,
different_types_p);
}
break;
case VECTOR_TYPE:
val = (TYPE_VECTOR_SUBPARTS (t1) == TYPE_VECTOR_SUBPARTS (t2)
&& comptypes_internal (TREE_TYPE (t1), TREE_TYPE (t2),
enum_and_int_p, different_types_p));
break;
default:
break;
}
return attrval == 2 && val == 1 ? 2 : val;
}
/* Return 1 if TTL and TTR are pointers to types that are equivalent, ignoring
their qualifiers, except for named address spaces. If the pointers point to
different named addresses, then we must determine if one address space is a
subset of the other. */
static int
comp_target_types (location_t location, tree ttl, tree ttr)
{
int val;
tree mvl = TREE_TYPE (ttl);
tree mvr = TREE_TYPE (ttr);
addr_space_t asl = TYPE_ADDR_SPACE (mvl);
addr_space_t asr = TYPE_ADDR_SPACE (mvr);
addr_space_t as_common;
bool enum_and_int_p;
/* Fail if pointers point to incompatible address spaces. */
if (!addr_space_superset (asl, asr, &as_common))
return 0;
/* Do not lose qualifiers on element types of array types that are
pointer targets by taking their TYPE_MAIN_VARIANT. */
if (TREE_CODE (mvl) != ARRAY_TYPE)
mvl = TYPE_MAIN_VARIANT (mvl);
if (TREE_CODE (mvr) != ARRAY_TYPE)
mvr = TYPE_MAIN_VARIANT (mvr);
enum_and_int_p = false;
val = comptypes_check_enum_int (mvl, mvr, &enum_and_int_p);
if (val == 2)
pedwarn (location, OPT_pedantic, "types are not quite compatible");
if (val == 1 && enum_and_int_p && warn_cxx_compat)
warning_at (location, OPT_Wc___compat,
"pointer target types incompatible in C++");
return val;
}
/* Subroutines of `comptypes'. */
/* Determine whether two trees derive from the same translation unit.
If the CONTEXT chain ends in a null, that tree's context is still
being parsed, so if two trees have context chains ending in null,
they're in the same translation unit. */
int
same_translation_unit_p (const_tree t1, const_tree t2)
{
while (t1 && TREE_CODE (t1) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t1)))
{
case tcc_declaration:
t1 = DECL_CONTEXT (t1); break;
case tcc_type:
t1 = TYPE_CONTEXT (t1); break;
case tcc_exceptional:
t1 = BLOCK_SUPERCONTEXT (t1); break; /* assume block */
default: gcc_unreachable ();
}
while (t2 && TREE_CODE (t2) != TRANSLATION_UNIT_DECL)
switch (TREE_CODE_CLASS (TREE_CODE (t2)))
{
case tcc_declaration:
t2 = DECL_CONTEXT (t2); break;
case tcc_type:
t2 = TYPE_CONTEXT (t2); break;
case tcc_exceptional:
t2 = BLOCK_SUPERCONTEXT (t2); break; /* assume block */
default: gcc_unreachable ();
}
return t1 == t2;
}
/* Allocate the seen two types, assuming that they are compatible. */
static struct tagged_tu_seen_cache *
alloc_tagged_tu_seen_cache (const_tree t1, const_tree t2)
{
struct tagged_tu_seen_cache *tu = XNEW (struct tagged_tu_seen_cache);
tu->next = tagged_tu_seen_base;
tu->t1 = t1;
tu->t2 = t2;
tagged_tu_seen_base = tu;
/* The C standard says that two structures in different translation
units are compatible with each other only if the types of their
fields are compatible (among other things). We assume that they
are compatible until proven otherwise when building the cache.
An example where this can occur is:
struct a
{
struct a *next;
};
If we are comparing this against a similar struct in another TU,
and did not assume they were compatible, we end up with an infinite
loop. */
tu->val = 1;
return tu;
}
/* Free the seen types until we get to TU_TIL. */
static void
free_all_tagged_tu_seen_up_to (const struct tagged_tu_seen_cache *tu_til)
{
const struct tagged_tu_seen_cache *tu = tagged_tu_seen_base;
while (tu != tu_til)
{
const struct tagged_tu_seen_cache *const tu1
= (const struct tagged_tu_seen_cache *) tu;
tu = tu1->next;
free (CONST_CAST (struct tagged_tu_seen_cache *, tu1));
}
tagged_tu_seen_base = tu_til;
}
/* Return 1 if two 'struct', 'union', or 'enum' types T1 and T2 are
compatible. If the two types are not the same (which has been
checked earlier), this can only happen when multiple translation
units are being compiled. See C99 6.2.7 paragraph 1 for the exact
rules. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in
comptypes_internal. */
static int
tagged_types_tu_compatible_p (const_tree t1, const_tree t2,
bool *enum_and_int_p, bool *different_types_p)
{
tree s1, s2;
bool needs_warning = false;
/* We have to verify that the tags of the types are the same. This
is harder than it looks because this may be a typedef, so we have
to go look at the original type. It may even be a typedef of a
typedef...
In the case of compiler-created builtin structs the TYPE_DECL
may be a dummy, with no DECL_ORIGINAL_TYPE. Don't fault. */
while (TYPE_NAME (t1)
&& TREE_CODE (TYPE_NAME (t1)) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (TYPE_NAME (t1)))
t1 = DECL_ORIGINAL_TYPE (TYPE_NAME (t1));
while (TYPE_NAME (t2)
&& TREE_CODE (TYPE_NAME (t2)) == TYPE_DECL
&& DECL_ORIGINAL_TYPE (TYPE_NAME (t2)))
t2 = DECL_ORIGINAL_TYPE (TYPE_NAME (t2));
/* C90 didn't have the requirement that the two tags be the same. */
if (flag_isoc99 && TYPE_NAME (t1) != TYPE_NAME (t2))
return 0;
/* C90 didn't say what happened if one or both of the types were
incomplete; we choose to follow C99 rules here, which is that they
are compatible. */
if (TYPE_SIZE (t1) == NULL
|| TYPE_SIZE (t2) == NULL)
return 1;
{
const struct tagged_tu_seen_cache * tts_i;
for (tts_i = tagged_tu_seen_base; tts_i != NULL; tts_i = tts_i->next)
if (tts_i->t1 == t1 && tts_i->t2 == t2)
return tts_i->val;
}
switch (TREE_CODE (t1))
{
case ENUMERAL_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
/* Speed up the case where the type values are in the same order. */
tree tv1 = TYPE_VALUES (t1);
tree tv2 = TYPE_VALUES (t2);
if (tv1 == tv2)
{
return 1;
}
for (;tv1 && tv2; tv1 = TREE_CHAIN (tv1), tv2 = TREE_CHAIN (tv2))
{
if (TREE_PURPOSE (tv1) != TREE_PURPOSE (tv2))
break;
if (simple_cst_equal (TREE_VALUE (tv1), TREE_VALUE (tv2)) != 1)
{
tu->val = 0;
return 0;
}
}
if (tv1 == NULL_TREE && tv2 == NULL_TREE)
{
return 1;
}
if (tv1 == NULL_TREE || tv2 == NULL_TREE)
{
tu->val = 0;
return 0;
}
if (list_length (TYPE_VALUES (t1)) != list_length (TYPE_VALUES (t2)))
{
tu->val = 0;
return 0;
}
for (s1 = TYPE_VALUES (t1); s1; s1 = TREE_CHAIN (s1))
{
s2 = purpose_member (TREE_PURPOSE (s1), TYPE_VALUES (t2));
if (s2 == NULL
|| simple_cst_equal (TREE_VALUE (s1), TREE_VALUE (s2)) != 1)
{
tu->val = 0;
return 0;
}
}
return 1;
}
case UNION_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
if (list_length (TYPE_FIELDS (t1)) != list_length (TYPE_FIELDS (t2)))
{
tu->val = 0;
return 0;
}
/* Speed up the common case where the fields are in the same order. */
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2); s1 && s2;
s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2))
{
int result;
if (DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p, different_types_p);
if (result != 1 && !DECL_NAME (s1))
break;
if (result == 0)
{
tu->val = 0;
return 0;
}
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
{
tu->val = 0;
return 0;
}
}
if (!s1 && !s2)
{
tu->val = needs_warning ? 2 : 1;
return tu->val;
}
for (s1 = TYPE_FIELDS (t1); s1; s1 = DECL_CHAIN (s1))
{
bool ok = false;
for (s2 = TYPE_FIELDS (t2); s2; s2 = DECL_CHAIN (s2))
if (DECL_NAME (s1) == DECL_NAME (s2))
{
int result;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p,
different_types_p);
if (result != 1 && !DECL_NAME (s1))
continue;
if (result == 0)
{
tu->val = 0;
return 0;
}
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
break;
ok = true;
break;
}
if (!ok)
{
tu->val = 0;
return 0;
}
}
tu->val = needs_warning ? 2 : 10;
return tu->val;
}
case RECORD_TYPE:
{
struct tagged_tu_seen_cache *tu = alloc_tagged_tu_seen_cache (t1, t2);
for (s1 = TYPE_FIELDS (t1), s2 = TYPE_FIELDS (t2);
s1 && s2;
s1 = DECL_CHAIN (s1), s2 = DECL_CHAIN (s2))
{
int result;
if (TREE_CODE (s1) != TREE_CODE (s2)
|| DECL_NAME (s1) != DECL_NAME (s2))
break;
result = comptypes_internal (TREE_TYPE (s1), TREE_TYPE (s2),
enum_and_int_p, different_types_p);
if (result == 0)
break;
if (result == 2)
needs_warning = true;
if (TREE_CODE (s1) == FIELD_DECL
&& simple_cst_equal (DECL_FIELD_BIT_OFFSET (s1),
DECL_FIELD_BIT_OFFSET (s2)) != 1)
break;
}
if (s1 && s2)
tu->val = 0;
else
tu->val = needs_warning ? 2 : 1;
return tu->val;
}
default:
gcc_unreachable ();
}
}
/* Return 1 if two function types F1 and F2 are compatible.
If either type specifies no argument types,
the other must specify a fixed number of self-promoting arg types.
Otherwise, if one type specifies only the number of arguments,
the other must specify that number of self-promoting arg types.
Otherwise, the argument types must match.
ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in comptypes_internal. */
static int
function_types_compatible_p (const_tree f1, const_tree f2,
bool *enum_and_int_p, bool *different_types_p)
{
tree args1, args2;
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
int val = 1;
int val1;
tree ret1, ret2;
ret1 = TREE_TYPE (f1);
ret2 = TREE_TYPE (f2);
/* 'volatile' qualifiers on a function's return type used to mean
the function is noreturn. */
if (TYPE_VOLATILE (ret1) != TYPE_VOLATILE (ret2))
pedwarn (input_location, 0, "function return types not compatible due to %<volatile%>");
if (TYPE_VOLATILE (ret1))
ret1 = build_qualified_type (TYPE_MAIN_VARIANT (ret1),
TYPE_QUALS (ret1) & ~TYPE_QUAL_VOLATILE);
if (TYPE_VOLATILE (ret2))
ret2 = build_qualified_type (TYPE_MAIN_VARIANT (ret2),
TYPE_QUALS (ret2) & ~TYPE_QUAL_VOLATILE);
val = comptypes_internal (ret1, ret2, enum_and_int_p, different_types_p);
if (val == 0)
return 0;
args1 = TYPE_ARG_TYPES (f1);
args2 = TYPE_ARG_TYPES (f2);
if (different_types_p != NULL
&& (args1 == 0) != (args2 == 0))
*different_types_p = true;
/* An unspecified parmlist matches any specified parmlist
whose argument types don't need default promotions. */
if (args1 == 0)
{
if (!self_promoting_args_p (args2))
return 0;
/* If one of these types comes from a non-prototype fn definition,
compare that with the other type's arglist.
If they don't match, ask for a warning (but no error). */
if (TYPE_ACTUAL_ARG_TYPES (f1)
&& 1 != type_lists_compatible_p (args2, TYPE_ACTUAL_ARG_TYPES (f1),
enum_and_int_p, different_types_p))
val = 2;
return val;
}
if (args2 == 0)
{
if (!self_promoting_args_p (args1))
return 0;
if (TYPE_ACTUAL_ARG_TYPES (f2)
&& 1 != type_lists_compatible_p (args1, TYPE_ACTUAL_ARG_TYPES (f2),
enum_and_int_p, different_types_p))
val = 2;
return val;
}
/* Both types have argument lists: compare them and propagate results. */
val1 = type_lists_compatible_p (args1, args2, enum_and_int_p,
different_types_p);
return val1 != 1 ? val1 : val;
}
/* Check two lists of types for compatibility, returning 0 for
incompatible, 1 for compatible, or 2 for compatible with
warning. ENUM_AND_INT_P and DIFFERENT_TYPES_P are as in
comptypes_internal. */
static int
type_lists_compatible_p (const_tree args1, const_tree args2,
bool *enum_and_int_p, bool *different_types_p)
{
/* 1 if no need for warning yet, 2 if warning cause has been seen. */
int val = 1;
int newval = 0;
while (1)
{
tree a1, mv1, a2, mv2;
if (args1 == 0 && args2 == 0)
return val;
/* If one list is shorter than the other,
they fail to match. */
if (args1 == 0 || args2 == 0)
return 0;
mv1 = a1 = TREE_VALUE (args1);
mv2 = a2 = TREE_VALUE (args2);
if (mv1 && mv1 != error_mark_node && TREE_CODE (mv1) != ARRAY_TYPE)
mv1 = TYPE_MAIN_VARIANT (mv1);
if (mv2 && mv2 != error_mark_node && TREE_CODE (mv2) != ARRAY_TYPE)
mv2 = TYPE_MAIN_VARIANT (mv2);
/* A null pointer instead of a type
means there is supposed to be an argument
but nothing is specified about what type it has.
So match anything that self-promotes. */
if (different_types_p != NULL
&& (a1 == 0) != (a2 == 0))
*different_types_p = true;
if (a1 == 0)
{
if (c_type_promotes_to (a2) != a2)
return 0;
}
else if (a2 == 0)
{
if (c_type_promotes_to (a1) != a1)
return 0;
}
/* If one of the lists has an error marker, ignore this arg. */
else if (TREE_CODE (a1) == ERROR_MARK
|| TREE_CODE (a2) == ERROR_MARK)
;
else if (!(newval = comptypes_internal (mv1, mv2, enum_and_int_p,
different_types_p)))
{
if (different_types_p != NULL)
*different_types_p = true;
/* Allow wait (union {union wait *u; int *i} *)
and wait (union wait *) to be compatible. */
if (TREE_CODE (a1) == UNION_TYPE
&& (TYPE_NAME (a1) == 0
|| TYPE_TRANSPARENT_AGGR (a1))
&& TREE_CODE (TYPE_SIZE (a1)) == INTEGER_CST
&& tree_int_cst_equal (TYPE_SIZE (a1),
TYPE_SIZE (a2)))
{
tree memb;
for (memb = TYPE_FIELDS (a1);
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes_internal (mv3, mv2, enum_and_int_p,
different_types_p))
break;
}
if (memb == 0)
return 0;
}
else if (TREE_CODE (a2) == UNION_TYPE
&& (TYPE_NAME (a2) == 0
|| TYPE_TRANSPARENT_AGGR (a2))
&& TREE_CODE (TYPE_SIZE (a2)) == INTEGER_CST
&& tree_int_cst_equal (TYPE_SIZE (a2),
TYPE_SIZE (a1)))
{
tree memb;
for (memb = TYPE_FIELDS (a2);
memb; memb = DECL_CHAIN (memb))
{
tree mv3 = TREE_TYPE (memb);
if (mv3 && mv3 != error_mark_node
&& TREE_CODE (mv3) != ARRAY_TYPE)
mv3 = TYPE_MAIN_VARIANT (mv3);
if (comptypes_internal (mv3, mv1, enum_and_int_p,
different_types_p))
break;
}
if (memb == 0)
return 0;
}
else
return 0;
}
/* comptypes said ok, but record if it said to warn. */
if (newval > val)
val = newval;
args1 = TREE_CHAIN (args1);
args2 = TREE_CHAIN (args2);
}
}
/* Compute the size to increment a pointer by. */
static tree
c_size_in_bytes (const_tree type)
{
enum tree_code code = TREE_CODE (type);
if (code == FUNCTION_TYPE || code == VOID_TYPE || code == ERROR_MARK)
return size_one_node;
if (!COMPLETE_OR_VOID_TYPE_P (type))
{
error ("arithmetic on pointer to an incomplete type");
return size_one_node;
}
/* Convert in case a char is more than one unit. */
return size_binop_loc (input_location, CEIL_DIV_EXPR, TYPE_SIZE_UNIT (type),
size_int (TYPE_PRECISION (char_type_node)
/ BITS_PER_UNIT));
}
/* Return either DECL or its known constant value (if it has one). */
tree
decl_constant_value (tree decl)
{
if (/* Don't change a variable array bound or initial value to a constant
in a place where a variable is invalid. Note that DECL_INITIAL
isn't valid for a PARM_DECL. */
current_function_decl != 0
&& TREE_CODE (decl) != PARM_DECL
&& !TREE_THIS_VOLATILE (decl)
&& TREE_READONLY (decl)
&& DECL_INITIAL (decl) != 0
&& TREE_CODE (DECL_INITIAL (decl)) != ERROR_MARK
/* This is invalid if initial value is not constant.
If it has either a function call, a memory reference,
or a variable, then re-evaluating it could give different results. */
&& TREE_CONSTANT (DECL_INITIAL (decl))
/* Check for cases where this is sub-optimal, even though valid. */
&& TREE_CODE (DECL_INITIAL (decl)) != CONSTRUCTOR)
return DECL_INITIAL (decl);
return decl;
}
/* Convert the array expression EXP to a pointer. */
static tree
array_to_pointer_conversion (location_t loc, tree exp)
{
tree orig_exp = exp;
tree type = TREE_TYPE (exp);
tree adr;
tree restype = TREE_TYPE (type);
tree ptrtype;
gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
ptrtype = build_pointer_type (restype);
if (TREE_CODE (exp) == INDIRECT_REF)
return convert (ptrtype, TREE_OPERAND (exp, 0));
adr = build_unary_op (loc, ADDR_EXPR, exp, 1);
return convert (ptrtype, adr);
}
/* Convert the function expression EXP to a pointer. */
static tree
function_to_pointer_conversion (location_t loc, tree exp)
{
tree orig_exp = exp;
gcc_assert (TREE_CODE (TREE_TYPE (exp)) == FUNCTION_TYPE);
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
return build_unary_op (loc, ADDR_EXPR, exp, 0);
}
/* Mark EXP as read, not just set, for set but not used -Wunused
warning purposes. */
void
mark_exp_read (tree exp)
{
switch (TREE_CODE (exp))
{
case VAR_DECL:
case PARM_DECL:
DECL_READ_P (exp) = 1;
break;
case ARRAY_REF:
case COMPONENT_REF:
case MODIFY_EXPR:
case REALPART_EXPR:
case IMAGPART_EXPR:
CASE_CONVERT:
case ADDR_EXPR:
mark_exp_read (TREE_OPERAND (exp, 0));
break;
case COMPOUND_EXPR:
case C_MAYBE_CONST_EXPR:
mark_exp_read (TREE_OPERAND (exp, 1));
break;
default:
break;
}
}
/* Perform the default conversion of arrays and functions to pointers.
Return the result of converting EXP. For any other expression, just
return EXP.
LOC is the location of the expression. */
struct c_expr
default_function_array_conversion (location_t loc, struct c_expr exp)
{
tree orig_exp = exp.value;
tree type = TREE_TYPE (exp.value);
enum tree_code code = TREE_CODE (type);
switch (code)
{
case ARRAY_TYPE:
{
bool not_lvalue = false;
bool lvalue_array_p;
while ((TREE_CODE (exp.value) == NON_LVALUE_EXPR
|| CONVERT_EXPR_P (exp.value))
&& TREE_TYPE (TREE_OPERAND (exp.value, 0)) == type)
{
if (TREE_CODE (exp.value) == NON_LVALUE_EXPR)
not_lvalue = true;
exp.value = TREE_OPERAND (exp.value, 0);
}
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp.value) = 1;
lvalue_array_p = !not_lvalue && lvalue_p (exp.value);
if (!flag_isoc99 && !lvalue_array_p)
{
/* Before C99, non-lvalue arrays do not decay to pointers.
Normally, using such an array would be invalid; but it can
be used correctly inside sizeof or as a statement expression.
Thus, do not give an error here; an error will result later. */
return exp;
}
exp.value = array_to_pointer_conversion (loc, exp.value);
}
break;
case FUNCTION_TYPE:
exp.value = function_to_pointer_conversion (loc, exp.value);
break;
default:
break;
}
return exp;
}
struct c_expr
default_function_array_read_conversion (location_t loc, struct c_expr exp)
{
mark_exp_read (exp.value);
return default_function_array_conversion (loc, exp);
}
/* EXP is an expression of integer type. Apply the integer promotions
to it and return the promoted value. */
tree
perform_integral_promotions (tree exp)
{
tree type = TREE_TYPE (exp);
enum tree_code code = TREE_CODE (type);
gcc_assert (INTEGRAL_TYPE_P (type));
/* Normally convert enums to int,
but convert wide enums to something wider. */
if (code == ENUMERAL_TYPE)
{
type = c_common_type_for_size (MAX (TYPE_PRECISION (type),
TYPE_PRECISION (integer_type_node)),
((TYPE_PRECISION (type)
>= TYPE_PRECISION (integer_type_node))
&& TYPE_UNSIGNED (type)));
return convert (type, exp);
}
/* ??? This should no longer be needed now bit-fields have their
proper types. */
if (TREE_CODE (exp) == COMPONENT_REF
&& DECL_C_BIT_FIELD (TREE_OPERAND (exp, 1))
/* If it's thinner than an int, promote it like a
c_promoting_integer_type_p, otherwise leave it alone. */
&& 0 > compare_tree_int (DECL_SIZE (TREE_OPERAND (exp, 1)),
TYPE_PRECISION (integer_type_node)))
return convert (integer_type_node, exp);
if (c_promoting_integer_type_p (type))
{
/* Preserve unsignedness if not really getting any wider. */
if (TYPE_UNSIGNED (type)
&& TYPE_PRECISION (type) == TYPE_PRECISION (integer_type_node))
return convert (unsigned_type_node, exp);
return convert (integer_type_node, exp);
}
return exp;
}
/* Perform default promotions for C data used in expressions.
Enumeral types or short or char are converted to int.
In addition, manifest constants symbols are replaced by their values. */
tree
default_conversion (tree exp)
{
tree orig_exp;
tree type = TREE_TYPE (exp);
enum tree_code code = TREE_CODE (type);
tree promoted_type;
mark_exp_read (exp);
/* Functions and arrays have been converted during parsing. */
gcc_assert (code != FUNCTION_TYPE);
if (code == ARRAY_TYPE)
return exp;
/* Constants can be used directly unless they're not loadable. */
if (TREE_CODE (exp) == CONST_DECL)
exp = DECL_INITIAL (exp);
/* Strip no-op conversions. */
orig_exp = exp;
STRIP_TYPE_NOPS (exp);
if (TREE_NO_WARNING (orig_exp))
TREE_NO_WARNING (exp) = 1;
if (code == VOID_TYPE)
{
error ("void value not ignored as it ought to be");
return error_mark_node;
}
exp = require_complete_type (exp);
if (exp == error_mark_node)
return error_mark_node;
promoted_type = targetm.promoted_type (type);
if (promoted_type)
return convert (promoted_type, exp);
if (INTEGRAL_TYPE_P (type))
return perform_integral_promotions (exp);
return exp;
}
/* Look up COMPONENT in a structure or union TYPE.
If the component name is not found, returns NULL_TREE. Otherwise,
the return value is a TREE_LIST, with each TREE_VALUE a FIELD_DECL
stepping down the chain to the component, which is in the last
TREE_VALUE of the list. Normally the list is of length one, but if
the component is embedded within (nested) anonymous structures or
unions, the list steps down the chain to the component. */
static tree
lookup_field (tree type, tree component)
{
tree field;
/* If TYPE_LANG_SPECIFIC is set, then it is a sorted array of pointers
to the field elements. Use a binary search on this array to quickly
find the element. Otherwise, do a linear search. TYPE_LANG_SPECIFIC
will always be set for structures which have many elements. */
if (TYPE_LANG_SPECIFIC (type) && TYPE_LANG_SPECIFIC (type)->s)
{
int bot, top, half;
tree *field_array = &TYPE_LANG_SPECIFIC (type)->s->elts[0];
field = TYPE_FIELDS (type);
bot = 0;
top = TYPE_LANG_SPECIFIC (type)->s->len;
while (top - bot > 1)
{
half = (top - bot + 1) >> 1;
field = field_array[bot+half];
if (DECL_NAME (field) == NULL_TREE)
{
/* Step through all anon unions in linear fashion. */
while (DECL_NAME (field_array[bot]) == NULL_TREE)
{
field = field_array[bot++];
if (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
{
tree anon = lookup_field (TREE_TYPE (field), component);
if (anon)
return tree_cons (NULL_TREE, field, anon);
/* The Plan 9 compiler permits referring
directly to an anonymous struct/union field
using a typedef name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (field)) != NULL_TREE
&& (TREE_CODE (TYPE_NAME (TREE_TYPE (field)))
== TYPE_DECL)
&& (DECL_NAME (TYPE_NAME (TREE_TYPE (field)))
== component))
break;
}
}
/* Entire record is only anon unions. */
if (bot > top)
return NULL_TREE;
/* Restart the binary search, with new lower bound. */
continue;
}
if (DECL_NAME (field) == component)
break;
if (DECL_NAME (field) < component)
bot += half;
else
top = bot + half;
}
if (DECL_NAME (field_array[bot]) == component)
field = field_array[bot];
else if (DECL_NAME (field) != component)
return NULL_TREE;
}
else
{
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
{
if (DECL_NAME (field) == NULL_TREE
&& (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (field)) == UNION_TYPE))
{
tree anon = lookup_field (TREE_TYPE (field), component);
if (anon)
return tree_cons (NULL_TREE, field, anon);
/* The Plan 9 compiler permits referring directly to an
anonymous struct/union field using a typedef
name. */
if (flag_plan9_extensions
&& TYPE_NAME (TREE_TYPE (field)) != NULL_TREE
&& TREE_CODE (TYPE_NAME (TREE_TYPE (field))) == TYPE_DECL
&& (DECL_NAME (TYPE_NAME (TREE_TYPE (field)))
== component))
break;
}
if (DECL_NAME (field) == component)
break;
}
if (field == NULL_TREE)
return NULL_TREE;
}
return tree_cons (NULL_TREE, field, NULL_TREE);
}
/* Make an expression to refer to the COMPONENT field of structure or
union value DATUM. COMPONENT is an IDENTIFIER_NODE. LOC is the
location of the COMPONENT_REF. */
tree
build_component_ref (location_t loc, tree datum, tree component)
{
tree type = TREE_TYPE (datum);
enum tree_code code = TREE_CODE (type);
tree field = NULL;
tree ref;
bool datum_lvalue = lvalue_p (datum);
if (!objc_is_public (datum, component))
return error_mark_node;
/* Detect Objective-C property syntax object.property. */
if (c_dialect_objc ()
&& (ref = objc_maybe_build_component_ref (datum, component)))
return ref;
/* See if there is a field or component with name COMPONENT. */
if (code == RECORD_TYPE || code == UNION_TYPE)
{
if (!COMPLETE_TYPE_P (type))
{
c_incomplete_type_error (NULL_TREE, type);
return error_mark_node;
}
field = lookup_field (type, component);
if (!field)
{
error_at (loc, "%qT has no member named %qE", type, component);
return error_mark_node;
}
/* Chain the COMPONENT_REFs if necessary down to the FIELD.
This might be better solved in future the way the C++ front
end does it - by giving the anonymous entities each a
separate name and type, and then have build_component_ref
recursively call itself. We can't do that here. */
do
{
tree subdatum = TREE_VALUE (field);
int quals;
tree subtype;
bool use_datum_quals;
if (TREE_TYPE (subdatum) == error_mark_node)
return error_mark_node;
/* If this is an rvalue, it does not have qualifiers in C
standard terms and we must avoid propagating such
qualifiers down to a non-lvalue array that is then
converted to a pointer. */
use_datum_quals = (datum_lvalue
|| TREE_CODE (TREE_TYPE (subdatum)) != ARRAY_TYPE);
quals = TYPE_QUALS (strip_array_types (TREE_TYPE (subdatum)));
if (use_datum_quals)
quals |= TYPE_QUALS (TREE_TYPE (datum));
subtype = c_build_qualified_type (TREE_TYPE (subdatum), quals);
ref = build3 (COMPONENT_REF, subtype, datum, subdatum,
NULL_TREE);
SET_EXPR_LOCATION (ref, loc);
if (TREE_READONLY (subdatum)
|| (use_datum_quals && TREE_READONLY (datum)))
TREE_READONLY (ref) = 1;
if (TREE_THIS_VOLATILE (subdatum)
|| (use_datum_quals && TREE_THIS_VOLATILE (datum)))
TREE_THIS_VOLATILE (ref) = 1;
if (TREE_DEPRECATED (subdatum))
warn_deprecated_use (subdatum, NULL_TREE);
datum = ref;
field = TREE_CHAIN (field);
}
while (field);
return ref;
}
else if (code != ERROR_MARK)
error_at (loc,
"request for member %qE in something not a structure or union",
component);
return error_mark_node;
}
/* Given an expression PTR for a pointer, return an expression
for the value pointed to.
ERRORSTRING is the name of the operator to appear in error messages.
LOC is the location to use for the generated tree. */
tree
build_indirect_ref (location_t loc, tree ptr, ref_operator errstring)
{
tree pointer = default_conversion (ptr);
tree type = TREE_TYPE (pointer);
tree ref;
if (TREE_CODE (type) == POINTER_TYPE)
{
if (CONVERT_EXPR_P (pointer)
|| TREE_CODE (pointer) == VIEW_CONVERT_EXPR)
{
/* If a warning is issued, mark it to avoid duplicates from
the backend. This only needs to be done at
warn_strict_aliasing > 2. */
if (warn_strict_aliasing > 2)
if (strict_aliasing_warning (TREE_TYPE (TREE_OPERAND (pointer, 0)),
type, TREE_OPERAND (pointer, 0)))
TREE_NO_WARNING (pointer) = 1;
}
if (TREE_CODE (pointer) == ADDR_EXPR
&& (TREE_TYPE (TREE_OPERAND (pointer, 0))
== TREE_TYPE (type)))
{
ref = TREE_OPERAND (pointer, 0);
protected_set_expr_location (ref, loc);
return ref;
}
else
{
tree t = TREE_TYPE (type);
ref = build1 (INDIRECT_REF, t, pointer);
if (!COMPLETE_OR_VOID_TYPE_P (t) && TREE_CODE (t) != ARRAY_TYPE)
{
error_at (loc, "dereferencing pointer to incomplete type");
return error_mark_node;
}
if (VOID_TYPE_P (t) && c_inhibit_evaluation_warnings == 0)
warning_at (loc, 0, "dereferencing %<void *%> pointer");
/* We *must* set TREE_READONLY when dereferencing a pointer to const,
so that we get the proper error message if the result is used
to assign to. Also, &* is supposed to be a no-op.
And ANSI C seems to specify that the type of the result
should be the const type. */
/* A de-reference of a pointer to const is not a const. It is valid
to change it via some other pointer. */
TREE_READONLY (ref) = TYPE_READONLY (t);
TREE_SIDE_EFFECTS (ref)
= TYPE_VOLATILE (t) || TREE_SIDE_EFFECTS (pointer);
TREE_THIS_VOLATILE (ref) = TYPE_VOLATILE (t);
protected_set_expr_location (ref, loc);
return ref;
}
}
else if (TREE_CODE (pointer) != ERROR_MARK)
invalid_indirection_error (loc, type, errstring);
return error_mark_node;
}
/* This handles expressions of the form "a[i]", which denotes
an array reference.
This is logically equivalent in C to *(a+i), but we may do it differently.
If A is a variable or a member, we generate a primitive ARRAY_REF.
This avoids forcing the array out of registers, and can work on
arrays that are not lvalues (for example, members of structures returned
by functions).
For vector types, allow vector[i] but not i[vector], and create
*(((type*)&vectortype) + i) for the expression.
LOC is the location to use for the returned expression. */
tree
build_array_ref (location_t loc, tree array, tree index)
{
tree ret;
bool swapped = false;
if (TREE_TYPE (array) == error_mark_node
|| TREE_TYPE (index) == error_mark_node)
return error_mark_node;
if (TREE_CODE (TREE_TYPE (array)) != ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (array)) != POINTER_TYPE
/* Allow vector[index] but not index[vector]. */
&& TREE_CODE (TREE_TYPE (array)) != VECTOR_TYPE)
{
tree temp;
if (TREE_CODE (TREE_TYPE (index)) != ARRAY_TYPE
&& TREE_CODE (TREE_TYPE (index)) != POINTER_TYPE)
{
error_at (loc,
"subscripted value is neither array nor pointer nor vector");
return error_mark_node;
}
temp = array;
array = index;
index = temp;
swapped = true;
}
if (!INTEGRAL_TYPE_P (TREE_TYPE (index)))
{
error_at (loc, "array subscript is not an integer");
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (TREE_TYPE (array))) == FUNCTION_TYPE)
{
error_at (loc, "subscripted value is pointer to function");
return error_mark_node;
}
/* ??? Existing practice has been to warn only when the char
index is syntactically the index, not for char[array]. */
if (!swapped)
warn_array_subscript_with_type_char (index);
/* Apply default promotions *after* noticing character types. */
index = default_conversion (index);
gcc_assert (TREE_CODE (TREE_TYPE (index)) == INTEGER_TYPE);
/* For vector[index], convert the vector to a
pointer of the underlying type. */
if (TREE_CODE (TREE_TYPE (array)) == VECTOR_TYPE)
{
tree type = TREE_TYPE (array);
tree type1;
if (TREE_CODE (index) == INTEGER_CST)
if (!host_integerp (index, 1)
|| ((unsigned HOST_WIDE_INT) tree_low_cst (index, 1)
>= TYPE_VECTOR_SUBPARTS (TREE_TYPE (array))))
warning_at (loc, OPT_Warray_bounds, "index value is out of bound");
c_common_mark_addressable_vec (array);
type = build_qualified_type (TREE_TYPE (type), TYPE_QUALS (type));
type = build_pointer_type (type);
type1 = build_pointer_type (TREE_TYPE (array));
array = build1 (ADDR_EXPR, type1, array);
array = convert (type, array);
}
if (TREE_CODE (TREE_TYPE (array)) == ARRAY_TYPE)
{
tree rval, type;
/* An array that is indexed by a non-constant
cannot be stored in a register; we must be able to do
address arithmetic on its address.
Likewise an array of elements of variable size. */
if (TREE_CODE (index) != INTEGER_CST
|| (COMPLETE_TYPE_P (TREE_TYPE (TREE_TYPE (array)))
&& TREE_CODE (TYPE_SIZE (TREE_TYPE (TREE_TYPE (array)))) != INTEGER_CST))
{
if (!c_mark_addressable (array))
return error_mark_node;
}
/* An array that is indexed by a constant value which is not within
the array bounds cannot be stored in a register either; because we
would get a crash in store_bit_field/extract_bit_field when trying
to access a non-existent part of the register. */
if (TREE_CODE (index) == INTEGER_CST
&& TYPE_DOMAIN (TREE_TYPE (array))
&& !int_fits_type_p (index, TYPE_DOMAIN (TREE_TYPE (array))))
{
if (!c_mark_addressable (array))
return error_mark_node;
}
if (pedantic)
{
tree foo = array;
while (TREE_CODE (foo) == COMPONENT_REF)
foo = TREE_OPERAND (foo, 0);
if (TREE_CODE (foo) == VAR_DECL && C_DECL_REGISTER (foo))
pedwarn (loc, OPT_pedantic,
"ISO C forbids subscripting %<register%> array");
else if (!flag_isoc99 && !lvalue_p (foo))
pedwarn (loc, OPT_pedantic,
"ISO C90 forbids subscripting non-lvalue array");
}
type = TREE_TYPE (TREE_TYPE (array));
rval = build4 (ARRAY_REF, type, array, index, NULL_TREE, NULL_TREE);
/* Array ref is const/volatile if the array elements are
or if the array is. */
TREE_READONLY (rval)
|= (TYPE_READONLY (TREE_TYPE (TREE_TYPE (array)))
| TREE_READONLY (array));
TREE_SIDE_EFFECTS (rval)
|= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
| TREE_SIDE_EFFECTS (array));
TREE_THIS_VOLATILE (rval)
|= (TYPE_VOLATILE (TREE_TYPE (TREE_TYPE (array)))
/* This was added by rms on 16 Nov 91.
It fixes vol struct foo *a; a->elts[1]
in an inline function.
Hope it doesn't break something else. */
| TREE_THIS_VOLATILE (array));
ret = require_complete_type (rval);
protected_set_expr_location (ret, loc);
return ret;
}
else
{
tree ar = default_conversion (array);
if (ar == error_mark_node)
return ar;
gcc_assert (TREE_CODE (TREE_TYPE (ar)) == POINTER_TYPE);
gcc_assert (TREE_CODE (TREE_TYPE (TREE_TYPE (ar))) != FUNCTION_TYPE);
return build_indirect_ref
(loc, build_binary_op (loc, PLUS_EXPR, ar, index, 0),
RO_ARRAY_INDEXING);
}
}
/* Build an external reference to identifier ID. FUN indicates
whether this will be used for a function call. LOC is the source
location of the identifier. This sets *TYPE to the type of the
identifier, which is not the same as the type of the returned value
for CONST_DECLs defined as enum constants. If the type of the
identifier is not available, *TYPE is set to NULL. */
tree
build_external_ref (location_t loc, tree id, int fun, tree *type)
{
tree ref;
tree decl = lookup_name (id);
/* In Objective-C, an instance variable (ivar) may be preferred to
whatever lookup_name() found. */
decl = objc_lookup_ivar (decl, id);
*type = NULL;
if (decl && decl != error_mark_node)
{
ref = decl;
*type = TREE_TYPE (ref);
}
else if (fun)
/* Implicit function declaration. */
ref = implicitly_declare (loc, id);
else if (decl == error_mark_node)
/* Don't complain about something that's already been
complained about. */
return error_mark_node;
else
{
undeclared_variable (loc, id);
return error_mark_node;
}
if (TREE_TYPE (ref) == error_mark_node)
return error_mark_node;
if (TREE_DEPRECATED (ref))
warn_deprecated_use (ref, NULL_TREE);
/* Recursive call does not count as usage. */
if (ref != current_function_decl)
{
TREE_USED (ref) = 1;
}
if (TREE_CODE (ref) == FUNCTION_DECL && !in_alignof)
{
if (!in_sizeof && !in_typeof)
C_DECL_USED (ref) = 1;
else if (DECL_INITIAL (ref) == 0
&& DECL_EXTERNAL (ref)
&& !TREE_PUBLIC (ref))
record_maybe_used_decl (ref);
}
if (TREE_CODE (ref) == CONST_DECL)
{
used_types_insert (TREE_TYPE (ref));
if (warn_cxx_compat
&& TREE_CODE (TREE_TYPE (ref)) == ENUMERAL_TYPE
&& C_TYPE_DEFINED_IN_STRUCT (TREE_TYPE (ref)))
{
warning_at (loc, OPT_Wc___compat,
("enum constant defined in struct or union "
"is not visible in C++"));
inform (DECL_SOURCE_LOCATION (ref), "enum constant defined here");
}
ref = DECL_INITIAL (ref);
TREE_CONSTANT (ref) = 1;
}
else if (current_function_decl != 0
&& !DECL_FILE_SCOPE_P (current_function_decl)
&& (TREE_CODE (ref) == VAR_DECL
|| TREE_CODE (ref) == PARM_DECL
|| TREE_CODE (ref) == FUNCTION_DECL))
{
tree context = decl_function_context (ref);
if (context != 0 && context != current_function_decl)
DECL_NONLOCAL (ref) = 1;
}
/* C99 6.7.4p3: An inline definition of a function with external
linkage ... shall not contain a reference to an identifier with
internal linkage. */
else if (current_function_decl != 0
&& DECL_DECLARED_INLINE_P (current_function_decl)
&& DECL_EXTERNAL (current_function_decl)
&& VAR_OR_FUNCTION_DECL_P (ref)
&& (TREE_CODE (ref) != VAR_DECL || TREE_STATIC (ref))
&& ! TREE_PUBLIC (ref)
&& DECL_CONTEXT (ref) != current_function_decl)
record_inline_static (loc, current_function_decl, ref,
csi_internal);
return ref;
}
/* Record details of decls possibly used inside sizeof or typeof. */
struct maybe_used_decl
{
/* The decl. */
tree decl;
/* The level seen at (in_sizeof + in_typeof). */
int level;
/* The next one at this level or above, or NULL. */
struct maybe_used_decl *next;
};
static struct maybe_used_decl *maybe_used_decls;
/* Record that DECL, an undefined static function reference seen
inside sizeof or typeof, might be used if the operand of sizeof is
a VLA type or the operand of typeof is a variably modified
type. */
static void
record_maybe_used_decl (tree decl)
{
struct maybe_used_decl *t = XOBNEW (&parser_obstack, struct maybe_used_decl);
t->decl = decl;
t->level = in_sizeof + in_typeof;
t->next = maybe_used_decls;
maybe_used_decls = t;
}
/* Pop the stack of decls possibly used inside sizeof or typeof. If
USED is false, just discard them. If it is true, mark them used
(if no longer inside sizeof or typeof) or move them to the next
level up (if still inside sizeof or typeof). */
void
pop_maybe_used (bool used)
{
struct maybe_used_decl *p = maybe_used_decls;
int cur_level = in_sizeof + in_typeof;
while (p && p->level > cur_level)
{
if (used)
{
if (cur_level == 0)
C_DECL_USED (p->decl) = 1;
else
p->level = cur_level;
}
p = p->next;
}
if (!used || cur_level == 0)
maybe_used_decls = p;
}
/* Return the result of sizeof applied to EXPR. */
struct c_expr
c_expr_sizeof_expr (location_t loc, struct c_expr expr)
{
struct c_expr ret;
if (expr.value == error_mark_node)
{
ret.value = error_mark_node;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
pop_maybe_used (false);
}
else
{
bool expr_const_operands = true;
tree folded_expr = c_fully_fold (expr.value, require_constant_value,
&expr_const_operands);
ret.value = c_sizeof (loc, TREE_TYPE (folded_expr));
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
if (c_vla_type_p (TREE_TYPE (folded_expr)))
{
/* sizeof is evaluated when given a vla (C99 6.5.3.4p2). */
ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value),
folded_expr, ret.value);
C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !expr_const_operands;
SET_EXPR_LOCATION (ret.value, loc);
}
pop_maybe_used (C_TYPE_VARIABLE_SIZE (TREE_TYPE (folded_expr)));
}
return ret;
}
/* Return the result of sizeof applied to T, a structure for the type
name passed to sizeof (rather than the type itself). LOC is the
location of the original expression. */
struct c_expr
c_expr_sizeof_type (location_t loc, struct c_type_name *t)
{
tree type;
struct c_expr ret;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
type = groktypename (t, &type_expr, &type_expr_const);
ret.value = c_sizeof (loc, type);
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
if ((type_expr || TREE_CODE (ret.value) == INTEGER_CST)
&& c_vla_type_p (type))
{
/* If the type is a [*] array, it is a VLA but is represented as
having a size of zero. In such a case we must ensure that
the result of sizeof does not get folded to a constant by
c_fully_fold, because if the size is evaluated the result is
not constant and so constraints on zero or negative size
arrays must not be applied when this sizeof call is inside
another array declarator. */
if (!type_expr)
type_expr = integer_zero_node;
ret.value = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret.value),
type_expr, ret.value);
C_MAYBE_CONST_EXPR_NON_CONST (ret.value) = !type_expr_const;
}
pop_maybe_used (type != error_mark_node
? C_TYPE_VARIABLE_SIZE (type) : false);
return ret;
}
/* Build a function call to function FUNCTION with parameters PARAMS.
The function call is at LOC.
PARAMS is a list--a chain of TREE_LIST nodes--in which the
TREE_VALUE of each node is a parameter-expression.
FUNCTION's data type may be a function type or a pointer-to-function. */
tree
build_function_call (location_t loc, tree function, tree params)
{
VEC(tree,gc) *vec;
tree ret;
vec = VEC_alloc (tree, gc, list_length (params));
for (; params; params = TREE_CHAIN (params))
VEC_quick_push (tree, vec, TREE_VALUE (params));
ret = build_function_call_vec (loc, function, vec, NULL);
VEC_free (tree, gc, vec);
return ret;
}
/* Give a note about the location of the declaration of DECL. */
static void inform_declaration (tree decl)
{
if (decl && (TREE_CODE (decl) != FUNCTION_DECL || !DECL_BUILT_IN (decl)))
inform (DECL_SOURCE_LOCATION (decl), "declared here");
}
/* Build a function call to function FUNCTION with parameters PARAMS.
ORIGTYPES, if not NULL, is a vector of types; each element is
either NULL or the original type of the corresponding element in
PARAMS. The original type may differ from TREE_TYPE of the
parameter for enums. FUNCTION's data type may be a function type
or pointer-to-function. This function changes the elements of
PARAMS. */
tree
build_function_call_vec (location_t loc, tree function, VEC(tree,gc) *params,
VEC(tree,gc) *origtypes)
{
tree fntype, fundecl = 0;
tree name = NULL_TREE, result;
tree tem;
int nargs;
tree *argarray;
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (function);
/* Convert anything with function type to a pointer-to-function. */
if (TREE_CODE (function) == FUNCTION_DECL)
{
/* Implement type-directed function overloading for builtins.
resolve_overloaded_builtin and targetm.resolve_overloaded_builtin
handle all the type checking. The result is a complete expression
that implements this function call. */
tem = resolve_overloaded_builtin (loc, function, params);
if (tem)
return tem;
name = DECL_NAME (function);
if (flag_tm)
tm_malloc_replacement (function);
fundecl = function;
/* Atomic functions have type checking/casting already done. They are
often rewritten and don't match the original parameter list. */
if (name && !strncmp (IDENTIFIER_POINTER (name), "__atomic_", 9))
origtypes = NULL;
}
if (TREE_CODE (TREE_TYPE (function)) == FUNCTION_TYPE)
function = function_to_pointer_conversion (loc, function);
/* For Objective-C, convert any calls via a cast to OBJC_TYPE_REF
expressions, like those used for ObjC messenger dispatches. */
if (!VEC_empty (tree, params))
function = objc_rewrite_function_call (function,
VEC_index (tree, params, 0));
function = c_fully_fold (function, false, NULL);
fntype = TREE_TYPE (function);
if (TREE_CODE (fntype) == ERROR_MARK)
return error_mark_node;
if (!(TREE_CODE (fntype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (fntype)) == FUNCTION_TYPE))
{
if (!flag_diagnostics_show_caret)
error_at (loc,
"called object %qE is not a function or function pointer",
function);
else if (DECL_P (function))
{
error_at (loc,
"called object %qD is not a function or function pointer",
function);
inform_declaration (function);
}
else
error_at (loc,
"called object is not a function or function pointer");
return error_mark_node;
}
if (fundecl && TREE_THIS_VOLATILE (fundecl))
current_function_returns_abnormally = 1;
/* fntype now gets the type of function pointed to. */
fntype = TREE_TYPE (fntype);
/* Convert the parameters to the types declared in the
function prototype, or apply default promotions. */
nargs = convert_arguments (TYPE_ARG_TYPES (fntype), params, origtypes,
function, fundecl);
if (nargs < 0)
return error_mark_node;
/* Check that the function is called through a compatible prototype.
If it is not, replace the call by a trap, wrapped up in a compound
expression if necessary. This has the nice side-effect to prevent
the tree-inliner from generating invalid assignment trees which may
blow up in the RTL expander later. */
if (CONVERT_EXPR_P (function)
&& TREE_CODE (tem = TREE_OPERAND (function, 0)) == ADDR_EXPR
&& TREE_CODE (tem = TREE_OPERAND (tem, 0)) == FUNCTION_DECL
&& !comptypes (fntype, TREE_TYPE (tem)))
{
tree return_type = TREE_TYPE (fntype);
tree trap = build_function_call (loc,
builtin_decl_explicit (BUILT_IN_TRAP),
NULL_TREE);
int i;
/* This situation leads to run-time undefined behavior. We can't,
therefore, simply error unless we can prove that all possible
executions of the program must execute the code. */
if (warning_at (loc, 0, "function called through a non-compatible type"))
/* We can, however, treat "undefined" any way we please.
Call abort to encourage the user to fix the program. */
inform (loc, "if this code is reached, the program will abort");
/* Before the abort, allow the function arguments to exit or
call longjmp. */
for (i = 0; i < nargs; i++)
trap = build2 (COMPOUND_EXPR, void_type_node,
VEC_index (tree, params, i), trap);
if (VOID_TYPE_P (return_type))
{
if (TYPE_QUALS (return_type) != TYPE_UNQUALIFIED)
pedwarn (loc, 0,
"function with qualified void return type called");
return trap;
}
else
{
tree rhs;
if (AGGREGATE_TYPE_P (return_type))
rhs = build_compound_literal (loc, return_type,
build_constructor (return_type, 0),
false);
else
rhs = build_zero_cst (return_type);
return require_complete_type (build2 (COMPOUND_EXPR, return_type,
trap, rhs));
}
}
argarray = VEC_address (tree, params);
/* Check that arguments to builtin functions match the expectations. */
if (fundecl
&& DECL_BUILT_IN (fundecl)
&& DECL_BUILT_IN_CLASS (fundecl) == BUILT_IN_NORMAL
&& !check_builtin_function_arguments (fundecl, nargs, argarray))
return error_mark_node;
/* Check that the arguments to the function are valid. */
check_function_arguments (fntype, nargs, argarray);
if (name != NULL_TREE
&& !strncmp (IDENTIFIER_POINTER (name), "__builtin_", 10))
{
if (require_constant_value)
result =
fold_build_call_array_initializer_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
else
result = fold_build_call_array_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
if (TREE_CODE (result) == NOP_EXPR
&& TREE_CODE (TREE_OPERAND (result, 0)) == INTEGER_CST)
STRIP_TYPE_NOPS (result);
}
else
result = build_call_array_loc (loc, TREE_TYPE (fntype),
function, nargs, argarray);
if (VOID_TYPE_P (TREE_TYPE (result)))
{
if (TYPE_QUALS (TREE_TYPE (result)) != TYPE_UNQUALIFIED)
pedwarn (loc, 0,
"function with qualified void return type called");
return result;
}
return require_complete_type (result);
}
/* Build a VEC_PERM_EXPR if V0, V1 and MASK are not error_mark_nodes
and have vector types, V0 has the same type as V1, and the number of
elements of V0, V1, MASK is the same.
In case V1 is a NULL_TREE it is assumed that __builtin_shuffle was
called with two arguments. In this case implementation passes the
first argument twice in order to share the same tree code. This fact
could enable the mask-values being twice the vector length. This is
an implementation accident and this semantics is not guaranteed to
the user. */
tree
c_build_vec_perm_expr (location_t loc, tree v0, tree v1, tree mask)
{
tree ret;
bool wrap = true;
bool maybe_const = false;
bool two_arguments = false;
if (v1 == NULL_TREE)
{
two_arguments = true;
v1 = v0;
}
if (v0 == error_mark_node || v1 == error_mark_node
|| mask == error_mark_node)
return error_mark_node;
if (TREE_CODE (TREE_TYPE (mask)) != VECTOR_TYPE
|| TREE_CODE (TREE_TYPE (TREE_TYPE (mask))) != INTEGER_TYPE)
{
error_at (loc, "__builtin_shuffle last argument must "
"be an integer vector");
return error_mark_node;
}
if (TREE_CODE (TREE_TYPE (v0)) != VECTOR_TYPE
|| TREE_CODE (TREE_TYPE (v1)) != VECTOR_TYPE)
{
error_at (loc, "__builtin_shuffle arguments must be vectors");
return error_mark_node;
}
if (TYPE_MAIN_VARIANT (TREE_TYPE (v0)) != TYPE_MAIN_VARIANT (TREE_TYPE (v1)))
{
error_at (loc, "__builtin_shuffle argument vectors must be of "
"the same type");
return error_mark_node;
}
if (TYPE_VECTOR_SUBPARTS (TREE_TYPE (v0))
!= TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask))
&& TYPE_VECTOR_SUBPARTS (TREE_TYPE (v1))
!= TYPE_VECTOR_SUBPARTS (TREE_TYPE (mask)))
{
error_at (loc, "__builtin_shuffle number of elements of the "
"argument vector(s) and the mask vector should "
"be the same");
return error_mark_node;
}
if (GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (v0))))
!= GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (TREE_TYPE (mask)))))
{
error_at (loc, "__builtin_shuffle argument vector(s) inner type "
"must have the same size as inner type of the mask");
return error_mark_node;
}
/* Avoid C_MAYBE_CONST_EXPRs inside VEC_PERM_EXPR. */
v0 = c_fully_fold (v0, false, &maybe_const);
wrap &= maybe_const;
if (two_arguments)
v1 = v0 = save_expr (v0);
else
{
v1 = c_fully_fold (v1, false, &maybe_const);
wrap &= maybe_const;
}
mask = c_fully_fold (mask, false, &maybe_const);
wrap &= maybe_const;
ret = build3_loc (loc, VEC_PERM_EXPR, TREE_TYPE (v0), v0, v1, mask);
if (!wrap)
ret = c_wrap_maybe_const (ret, true);
return ret;
}
/* Convert the argument expressions in the vector VALUES
to the types in the list TYPELIST.
If TYPELIST is exhausted, or when an element has NULL as its type,
perform the default conversions.
ORIGTYPES is the original types of the expressions in VALUES. This
holds the type of enum values which have been converted to integral
types. It may be NULL.
FUNCTION is a tree for the called function. It is used only for
error messages, where it is formatted with %qE.
This is also where warnings about wrong number of args are generated.
Returns the actual number of arguments processed (which may be less
than the length of VALUES in some error situations), or -1 on
failure. */
static int
convert_arguments (tree typelist, VEC(tree,gc) *values,
VEC(tree,gc) *origtypes, tree function, tree fundecl)
{
tree typetail, val;
unsigned int parmnum;
bool error_args = false;
const bool type_generic = fundecl
&& lookup_attribute ("type generic", TYPE_ATTRIBUTES(TREE_TYPE (fundecl)));
bool type_generic_remove_excess_precision = false;
tree selector;
/* Change pointer to function to the function itself for
diagnostics. */
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
function = TREE_OPERAND (function, 0);
/* Handle an ObjC selector specially for diagnostics. */
selector = objc_message_selector ();
/* For type-generic built-in functions, determine whether excess
precision should be removed (classification) or not
(comparison). */
if (type_generic
&& DECL_BUILT_IN (fundecl)
&& DECL_BUILT_IN_CLASS (fundecl) == BUILT_IN_NORMAL)
{
switch (DECL_FUNCTION_CODE (fundecl))
{
case BUILT_IN_ISFINITE:
case BUILT_IN_ISINF:
case BUILT_IN_ISINF_SIGN:
case BUILT_IN_ISNAN:
case BUILT_IN_ISNORMAL:
case BUILT_IN_FPCLASSIFY:
type_generic_remove_excess_precision = true;
break;
default:
type_generic_remove_excess_precision = false;
break;
}
}
/* Scan the given expressions and types, producing individual
converted arguments. */
for (typetail = typelist, parmnum = 0;
VEC_iterate (tree, values, parmnum, val);
++parmnum)
{
tree type = typetail ? TREE_VALUE (typetail) : 0;
tree valtype = TREE_TYPE (val);
tree rname = function;
int argnum = parmnum + 1;
const char *invalid_func_diag;
bool excess_precision = false;
bool npc;
tree parmval;
if (type == void_type_node)
{
if (selector)
error_at (input_location,
"too many arguments to method %qE", selector);
else
error_at (input_location,
"too many arguments to function %qE", function);
inform_declaration (fundecl);
return parmnum;
}
if (selector && argnum > 2)
{
rname = selector;
argnum -= 2;
}
npc = null_pointer_constant_p (val);
/* If there is excess precision and a prototype, convert once to
the required type rather than converting via the semantic
type. Likewise without a prototype a float value represented
as long double should be converted once to double. But for
type-generic classification functions excess precision must
be removed here. */
if (TREE_CODE (val) == EXCESS_PRECISION_EXPR
&& (type || !type_generic || !type_generic_remove_excess_precision))
{
val = TREE_OPERAND (val, 0);
excess_precision = true;
}
val = c_fully_fold (val, false, NULL);
STRIP_TYPE_NOPS (val);
val = require_complete_type (val);
if (type != 0)
{
/* Formal parm type is specified by a function prototype. */
if (type == error_mark_node || !COMPLETE_TYPE_P (type))
{
error ("type of formal parameter %d is incomplete", parmnum + 1);
parmval = val;
}
else
{
tree origtype;
/* Optionally warn about conversions that
differ from the default conversions. */
if (warn_traditional_conversion || warn_traditional)
{
unsigned int formal_prec = TYPE_PRECISION (type);
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (valtype) == REAL_TYPE)
warning (0, "passing argument %d of %qE as integer "
"rather than floating due to prototype",
argnum, rname);
if (INTEGRAL_TYPE_P (type)
&& TREE_CODE (valtype) == COMPLEX_TYPE)
warning (0, "passing argument %d of %qE as integer "
"rather than complex due to prototype",
argnum, rname);
else if (TREE_CODE (type) == COMPLEX_TYPE
&& TREE_CODE (valtype) == REAL_TYPE)
warning (0, "passing argument %d of %qE as complex "
"rather than floating due to prototype",
argnum, rname);
else if (TREE_CODE (type) == REAL_TYPE
&& INTEGRAL_TYPE_P (valtype))
warning (0, "passing argument %d of %qE as floating "
"rather than integer due to prototype",
argnum, rname);
else if (TREE_CODE (type) == COMPLEX_TYPE
&& INTEGRAL_TYPE_P (valtype))
warning (0, "passing argument %d of %qE as complex "
"rather than integer due to prototype",
argnum, rname);
else if (TREE_CODE (type) == REAL_TYPE
&& TREE_CODE (valtype) == COMPLEX_TYPE)
warning (0, "passing argument %d of %qE as floating "
"rather than complex due to prototype",
argnum, rname);
/* ??? At some point, messages should be written about
conversions between complex types, but that's too messy
to do now. */
else if (TREE_CODE (type) == REAL_TYPE
&& TREE_CODE (valtype) == REAL_TYPE)
{
/* Warn if any argument is passed as `float',
since without a prototype it would be `double'. */
if (formal_prec == TYPE_PRECISION (float_type_node)
&& type != dfloat32_type_node)
warning (0, "passing argument %d of %qE as %<float%> "
"rather than %<double%> due to prototype",
argnum, rname);
/* Warn if mismatch between argument and prototype
for decimal float types. Warn of conversions with
binary float types and of precision narrowing due to
prototype. */
else if (type != valtype
&& (type == dfloat32_type_node
|| type == dfloat64_type_node
|| type == dfloat128_type_node
|| valtype == dfloat32_type_node
|| valtype == dfloat64_type_node
|| valtype == dfloat128_type_node)
&& (formal_prec
<= TYPE_PRECISION (valtype)
|| (type == dfloat128_type_node
&& (valtype
!= dfloat64_type_node
&& (valtype
!= dfloat32_type_node)))
|| (type == dfloat64_type_node
&& (valtype
!= dfloat32_type_node))))
warning (0, "passing argument %d of %qE as %qT "
"rather than %qT due to prototype",
argnum, rname, type, valtype);
}
/* Detect integer changing in width or signedness.
These warnings are only activated with
-Wtraditional-conversion, not with -Wtraditional. */
else if (warn_traditional_conversion && INTEGRAL_TYPE_P (type)
&& INTEGRAL_TYPE_P (valtype))
{
tree would_have_been = default_conversion (val);
tree type1 = TREE_TYPE (would_have_been);
if (TREE_CODE (type) == ENUMERAL_TYPE
&& (TYPE_MAIN_VARIANT (type)
== TYPE_MAIN_VARIANT (valtype)))
/* No warning if function asks for enum
and the actual arg is that enum type. */
;
else if (formal_prec != TYPE_PRECISION (type1))
warning (OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"with different width due to prototype",
argnum, rname);
else if (TYPE_UNSIGNED (type) == TYPE_UNSIGNED (type1))
;
/* Don't complain if the formal parameter type
is an enum, because we can't tell now whether
the value was an enum--even the same enum. */
else if (TREE_CODE (type) == ENUMERAL_TYPE)
;
else if (TREE_CODE (val) == INTEGER_CST
&& int_fits_type_p (val, type))
/* Change in signedness doesn't matter
if a constant value is unaffected. */
;
/* If the value is extended from a narrower
unsigned type, it doesn't matter whether we
pass it as signed or unsigned; the value
certainly is the same either way. */
else if (TYPE_PRECISION (valtype) < TYPE_PRECISION (type)
&& TYPE_UNSIGNED (valtype))
;
else if (TYPE_UNSIGNED (type))
warning (OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"as unsigned due to prototype",
argnum, rname);
else
warning (OPT_Wtraditional_conversion,
"passing argument %d of %qE "
"as signed due to prototype", argnum, rname);
}
}
/* Possibly restore an EXCESS_PRECISION_EXPR for the
sake of better warnings from convert_and_check. */
if (excess_precision)
val = build1 (EXCESS_PRECISION_EXPR, valtype, val);
origtype = (origtypes == NULL
? NULL_TREE
: VEC_index (tree, origtypes, parmnum));
parmval = convert_for_assignment (input_location, type, val,
origtype, ic_argpass, npc,
fundecl, function,
parmnum + 1);
if (targetm.calls.promote_prototypes (fundecl ? TREE_TYPE (fundecl) : 0)
&& INTEGRAL_TYPE_P (type)
&& (TYPE_PRECISION (type) < TYPE_PRECISION (integer_type_node)))
parmval = default_conversion (parmval);
}
}
else if (TREE_CODE (valtype) == REAL_TYPE
&& (TYPE_PRECISION (valtype)
< TYPE_PRECISION (double_type_node))
&& !DECIMAL_FLOAT_MODE_P (TYPE_MODE (valtype)))
{
if (type_generic)
parmval = val;
else
{
/* Convert `float' to `double'. */
if (warn_double_promotion && !c_inhibit_evaluation_warnings)
warning (OPT_Wdouble_promotion,
"implicit conversion from %qT to %qT when passing "
"argument to function",
valtype, double_type_node);
parmval = convert (double_type_node, val);
}
}
else if (excess_precision && !type_generic)
/* A "double" argument with excess precision being passed
without a prototype or in variable arguments. */
parmval = convert (valtype, val);
else if ((invalid_func_diag =
targetm.calls.invalid_arg_for_unprototyped_fn (typelist, fundecl, val)))
{
error (invalid_func_diag);
return -1;
}
else
/* Convert `short' and `char' to full-size `int'. */
parmval = default_conversion (val);
VEC_replace (tree, values, parmnum, parmval);
if (parmval == error_mark_node)
error_args = true;
if (typetail)
typetail = TREE_CHAIN (typetail);
}
gcc_assert (parmnum == VEC_length (tree, values));
if (typetail != 0 && TREE_VALUE (typetail) != void_type_node)
{
error_at (input_location,
"too few arguments to function %qE", function);
inform_declaration (fundecl);
return -1;
}
return error_args ? -1 : (int) parmnum;
}
/* This is the entry point used by the parser to build unary operators
in the input. CODE, a tree_code, specifies the unary operator, and
ARG is the operand. For unary plus, the C parser currently uses
CONVERT_EXPR for code.
LOC is the location to use for the tree generated.
*/
struct c_expr
parser_build_unary_op (location_t loc, enum tree_code code, struct c_expr arg)
{
struct c_expr result;
result.value = build_unary_op (loc, code, arg.value, 0);
result.original_code = code;
result.original_type = NULL;
if (TREE_OVERFLOW_P (result.value) && !TREE_OVERFLOW_P (arg.value))
overflow_warning (loc, result.value);
return result;
}
/* This is the entry point used by the parser to build binary operators
in the input. CODE, a tree_code, specifies the binary operator, and
ARG1 and ARG2 are the operands. In addition to constructing the
expression, we check for operands that were written with other binary
operators in a way that is likely to confuse the user.
LOCATION is the location of the binary operator. */
struct c_expr
parser_build_binary_op (location_t location, enum tree_code code,
struct c_expr arg1, struct c_expr arg2)
{
struct c_expr result;
enum tree_code code1 = arg1.original_code;
enum tree_code code2 = arg2.original_code;
tree type1 = (arg1.original_type
? arg1.original_type
: TREE_TYPE (arg1.value));
tree type2 = (arg2.original_type
? arg2.original_type
: TREE_TYPE (arg2.value));
result.value = build_binary_op (location, code,
arg1.value, arg2.value, 1);
result.original_code = code;
result.original_type = NULL;
if (TREE_CODE (result.value) == ERROR_MARK)
return result;
if (location != UNKNOWN_LOCATION)
protected_set_expr_location (result.value, location);
/* Check for cases such as x+y<<z which users are likely
to misinterpret. */
if (warn_parentheses)
warn_about_parentheses (code, code1, arg1.value, code2, arg2.value);
if (warn_logical_op)
warn_logical_operator (input_location, code, TREE_TYPE (result.value),
code1, arg1.value, code2, arg2.value);
/* Warn about comparisons against string literals, with the exception
of testing for equality or inequality of a string literal with NULL. */
if (code == EQ_EXPR || code == NE_EXPR)
{
if ((code1 == STRING_CST && !integer_zerop (arg2.value))
|| (code2 == STRING_CST && !integer_zerop (arg1.value)))
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
}
else if (TREE_CODE_CLASS (code) == tcc_comparison
&& (code1 == STRING_CST || code2 == STRING_CST))
warning_at (location, OPT_Waddress,
"comparison with string literal results in unspecified behavior");
if (TREE_OVERFLOW_P (result.value)
&& !TREE_OVERFLOW_P (arg1.value)
&& !TREE_OVERFLOW_P (arg2.value))
overflow_warning (location, result.value);
/* Warn about comparisons of different enum types. */
if (warn_enum_compare
&& TREE_CODE_CLASS (code) == tcc_comparison
&& TREE_CODE (type1) == ENUMERAL_TYPE
&& TREE_CODE (type2) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (type1) != TYPE_MAIN_VARIANT (type2))
warning_at (location, OPT_Wenum_compare,
"comparison between %qT and %qT",
type1, type2);
return result;
}
/* Return a tree for the difference of pointers OP0 and OP1.
The resulting tree has type int. */
static tree
pointer_diff (location_t loc, tree op0, tree op1)
{
tree restype = ptrdiff_type_node;
tree result, inttype;
addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op0)));
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (TREE_TYPE (op1)));
tree target_type = TREE_TYPE (TREE_TYPE (op0));
tree con0, con1, lit0, lit1;
tree orig_op1 = op1;
/* If the operands point into different address spaces, we need to
explicitly convert them to pointers into the common address space
before we can subtract the numerical address values. */
if (as0 != as1)
{
addr_space_t as_common;
tree common_type;
/* Determine the common superset address space. This is guaranteed
to exist because the caller verified that comp_target_types
returned non-zero. */
if (!addr_space_superset (as0, as1, &as_common))
gcc_unreachable ();
common_type = common_pointer_type (TREE_TYPE (op0), TREE_TYPE (op1));
op0 = convert (common_type, op0);
op1 = convert (common_type, op1);
}
/* Determine integer type to perform computations in. This will usually
be the same as the result type (ptrdiff_t), but may need to be a wider
type if pointers for the address space are wider than ptrdiff_t. */
if (TYPE_PRECISION (restype) < TYPE_PRECISION (TREE_TYPE (op0)))
inttype = c_common_type_for_size (TYPE_PRECISION (TREE_TYPE (op0)), 0);
else
inttype = restype;
if (TREE_CODE (target_type) == VOID_TYPE)
pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer of type %<void *%> used in subtraction");
if (TREE_CODE (target_type) == FUNCTION_TYPE)
pedwarn (loc, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"pointer to a function used in subtraction");
/* If the conversion to ptrdiff_type does anything like widening or
converting a partial to an integral mode, we get a convert_expression
that is in the way to do any simplifications.
(fold-const.c doesn't know that the extra bits won't be needed.
split_tree uses STRIP_SIGN_NOPS, which leaves conversions to a
different mode in place.)
So first try to find a common term here 'by hand'; we want to cover
at least the cases that occur in legal static initializers. */
if (CONVERT_EXPR_P (op0)
&& (TYPE_PRECISION (TREE_TYPE (op0))
== TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op0, 0)))))
con0 = TREE_OPERAND (op0, 0);
else
con0 = op0;
if (CONVERT_EXPR_P (op1)
&& (TYPE_PRECISION (TREE_TYPE (op1))
== TYPE_PRECISION (TREE_TYPE (TREE_OPERAND (op1, 0)))))
con1 = TREE_OPERAND (op1, 0);
else
con1 = op1;
if (TREE_CODE (con0) == POINTER_PLUS_EXPR)
{
lit0 = TREE_OPERAND (con0, 1);
con0 = TREE_OPERAND (con0, 0);
}
else
lit0 = integer_zero_node;
if (TREE_CODE (con1) == POINTER_PLUS_EXPR)
{
lit1 = TREE_OPERAND (con1, 1);
con1 = TREE_OPERAND (con1, 0);
}
else
lit1 = integer_zero_node;
if (operand_equal_p (con0, con1, 0))
{
op0 = lit0;
op1 = lit1;
}
/* First do the subtraction as integers;
then drop through to build the divide operator.
Do not do default conversions on the minus operator
in case restype is a short type. */
op0 = build_binary_op (loc,
MINUS_EXPR, convert (inttype, op0),
convert (inttype, op1), 0);
/* This generates an error if op1 is pointer to incomplete type. */
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (TREE_TYPE (orig_op1))))
error_at (loc, "arithmetic on pointer to an incomplete type");
/* This generates an error if op0 is pointer to incomplete type. */
op1 = c_size_in_bytes (target_type);
/* Divide by the size, in easiest possible way. */
result = fold_build2_loc (loc, EXACT_DIV_EXPR, inttype,
op0, convert (inttype, op1));
/* Convert to final result type if necessary. */
return convert (restype, result);
}
/* Construct and perhaps optimize a tree representation
for a unary operation. CODE, a tree_code, specifies the operation
and XARG is the operand.
For any CODE other than ADDR_EXPR, FLAG nonzero suppresses
the default promotions (such as from short to int).
For ADDR_EXPR, the default promotions are not applied; FLAG nonzero
allows non-lvalues; this is only used to handle conversion of non-lvalue
arrays to pointers in C99.
LOCATION is the location of the operator. */
tree
build_unary_op (location_t location,
enum tree_code code, tree xarg, int flag)
{
/* No default_conversion here. It causes trouble for ADDR_EXPR. */
tree arg = xarg;
tree argtype = 0;
enum tree_code typecode;
tree val;
tree ret = error_mark_node;
tree eptype = NULL_TREE;
int noconvert = flag;
const char *invalid_op_diag;
bool int_operands;
int_operands = EXPR_INT_CONST_OPERANDS (xarg);
if (int_operands)
arg = remove_c_maybe_const_expr (arg);
if (code != ADDR_EXPR)
arg = require_complete_type (arg);
typecode = TREE_CODE (TREE_TYPE (arg));
if (typecode == ERROR_MARK)
return error_mark_node;
if (typecode == ENUMERAL_TYPE || typecode == BOOLEAN_TYPE)
typecode = INTEGER_TYPE;
if ((invalid_op_diag
= targetm.invalid_unary_op (code, TREE_TYPE (xarg))))
{
error_at (location, invalid_op_diag);
return error_mark_node;
}
if (TREE_CODE (arg) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (arg);
arg = TREE_OPERAND (arg, 0);
}
switch (code)
{
case CONVERT_EXPR:
/* This is used for unary plus, because a CONVERT_EXPR
is enough to prevent anybody from looking inside for
associativity, but won't generate any code. */
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE
|| typecode == VECTOR_TYPE))
{
error_at (location, "wrong type argument to unary plus");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
arg = non_lvalue_loc (location, arg);
break;
case NEGATE_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == FIXED_POINT_TYPE || typecode == COMPLEX_TYPE
|| typecode == VECTOR_TYPE))
{
error_at (location, "wrong type argument to unary minus");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case BIT_NOT_EXPR:
/* ~ works on integer types and non float vectors. */
if (typecode == INTEGER_TYPE
|| (typecode == VECTOR_TYPE
&& !VECTOR_FLOAT_TYPE_P (TREE_TYPE (arg))))
{
if (!noconvert)
arg = default_conversion (arg);
}
else if (typecode == COMPLEX_TYPE)
{
code = CONJ_EXPR;
pedwarn (location, OPT_pedantic,
"ISO C does not support %<~%> for complex conjugation");
if (!noconvert)
arg = default_conversion (arg);
}
else
{
error_at (location, "wrong type argument to bit-complement");
return error_mark_node;
}
break;
case ABS_EXPR:
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE))
{
error_at (location, "wrong type argument to abs");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case CONJ_EXPR:
/* Conjugating a real value is a no-op, but allow it anyway. */
if (!(typecode == INTEGER_TYPE || typecode == REAL_TYPE
|| typecode == COMPLEX_TYPE))
{
error_at (location, "wrong type argument to conjugation");
return error_mark_node;
}
else if (!noconvert)
arg = default_conversion (arg);
break;
case TRUTH_NOT_EXPR:
if (typecode != INTEGER_TYPE && typecode != FIXED_POINT_TYPE
&& typecode != REAL_TYPE && typecode != POINTER_TYPE
&& typecode != COMPLEX_TYPE)
{
error_at (location,
"wrong type argument to unary exclamation mark");
return error_mark_node;
}
arg = c_objc_common_truthvalue_conversion (location, arg);
ret = invert_truthvalue_loc (location, arg);
/* If the TRUTH_NOT_EXPR has been folded, reset the location. */
if (EXPR_P (ret) && EXPR_HAS_LOCATION (ret))
location = EXPR_LOCATION (ret);
goto return_build_unary_op;
case REALPART_EXPR:
case IMAGPART_EXPR:
ret = build_real_imag_expr (location, code, arg);
if (ret == error_mark_node)
return error_mark_node;
if (eptype && TREE_CODE (eptype) == COMPLEX_TYPE)
eptype = TREE_TYPE (eptype);
goto return_build_unary_op;
case PREINCREMENT_EXPR:
case POSTINCREMENT_EXPR:
case PREDECREMENT_EXPR:
case POSTDECREMENT_EXPR:
if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR)
{
tree inner = build_unary_op (location, code,
C_MAYBE_CONST_EXPR_EXPR (arg), flag);
if (inner == error_mark_node)
return error_mark_node;
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (arg), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg));
C_MAYBE_CONST_EXPR_NON_CONST (ret) = 1;
goto return_build_unary_op;
}
/* Complain about anything that is not a true lvalue. In
Objective-C, skip this check for property_refs. */
if (!objc_is_property_ref (arg)
&& !lvalue_or_else (location,
arg, ((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment
: lv_decrement)))
return error_mark_node;
if (warn_cxx_compat && TREE_CODE (TREE_TYPE (arg)) == ENUMERAL_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
warning_at (location, OPT_Wc___compat,
"increment of enumeration value is invalid in C++");
else
warning_at (location, OPT_Wc___compat,
"decrement of enumeration value is invalid in C++");
}
/* Ensure the argument is fully folded inside any SAVE_EXPR. */
arg = c_fully_fold (arg, false, NULL);
/* Increment or decrement the real part of the value,
and don't change the imaginary part. */
if (typecode == COMPLEX_TYPE)
{
tree real, imag;
pedwarn (location, OPT_pedantic,
"ISO C does not support %<++%> and %<--%> on complex types");
arg = stabilize_reference (arg);
real = build_unary_op (EXPR_LOCATION (arg), REALPART_EXPR, arg, 1);
imag = build_unary_op (EXPR_LOCATION (arg), IMAGPART_EXPR, arg, 1);
real = build_unary_op (EXPR_LOCATION (arg), code, real, 1);
if (real == error_mark_node || imag == error_mark_node)
return error_mark_node;
ret = build2 (COMPLEX_EXPR, TREE_TYPE (arg),
real, imag);
goto return_build_unary_op;
}
/* Report invalid types. */
if (typecode != POINTER_TYPE && typecode != FIXED_POINT_TYPE
&& typecode != INTEGER_TYPE && typecode != REAL_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
error_at (location, "wrong type argument to increment");
else
error_at (location, "wrong type argument to decrement");
return error_mark_node;
}
{
tree inc;
argtype = TREE_TYPE (arg);
/* Compute the increment. */
if (typecode == POINTER_TYPE)
{
/* If pointer target is an undefined struct,
we just cannot know how to do the arithmetic. */
if (!COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (argtype)))
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
error_at (location,
"increment of pointer to unknown structure");
else
error_at (location,
"decrement of pointer to unknown structure");
}
else if (TREE_CODE (TREE_TYPE (argtype)) == FUNCTION_TYPE
|| TREE_CODE (TREE_TYPE (argtype)) == VOID_TYPE)
{
if (code == PREINCREMENT_EXPR || code == POSTINCREMENT_EXPR)
pedwarn (location, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"wrong type argument to increment");
else
pedwarn (location, pedantic ? OPT_pedantic : OPT_Wpointer_arith,
"wrong type argument to decrement");
}
inc = c_size_in_bytes (TREE_TYPE (argtype));
inc = convert_to_ptrofftype_loc (location, inc);
}
else if (FRACT_MODE_P (TYPE_MODE (argtype)))
{
/* For signed fract types, we invert ++ to -- or
-- to ++, and change inc from 1 to -1, because
it is not possible to represent 1 in signed fract constants.
For unsigned fract types, the result always overflows and
we get an undefined (original) or the maximum value. */
if (code == PREINCREMENT_EXPR)
code = PREDECREMENT_EXPR;
else if (code == PREDECREMENT_EXPR)
code = PREINCREMENT_EXPR;
else if (code == POSTINCREMENT_EXPR)
code = POSTDECREMENT_EXPR;
else /* code == POSTDECREMENT_EXPR */
code = POSTINCREMENT_EXPR;
inc = integer_minus_one_node;
inc = convert (argtype, inc);
}
else
{
inc = integer_one_node;
inc = convert (argtype, inc);
}
/* If 'arg' is an Objective-C PROPERTY_REF expression, then we
need to ask Objective-C to build the increment or decrement
expression for it. */
if (objc_is_property_ref (arg))
return objc_build_incr_expr_for_property_ref (location, code,
arg, inc);
/* Report a read-only lvalue. */
if (TYPE_READONLY (argtype))
{
readonly_error (arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment : lv_decrement));
return error_mark_node;
}
else if (TREE_READONLY (arg))
readonly_warning (arg,
((code == PREINCREMENT_EXPR
|| code == POSTINCREMENT_EXPR)
? lv_increment : lv_decrement));
if (TREE_CODE (TREE_TYPE (arg)) == BOOLEAN_TYPE)
val = boolean_increment (code, arg);
else
val = build2 (code, TREE_TYPE (arg), arg, inc);
TREE_SIDE_EFFECTS (val) = 1;
if (TREE_CODE (val) != code)
TREE_NO_WARNING (val) = 1;
ret = val;
goto return_build_unary_op;
}
case ADDR_EXPR:
/* Note that this operation never does default_conversion. */
/* The operand of unary '&' must be an lvalue (which excludes
expressions of type void), or, in C99, the result of a [] or
unary '*' operator. */
if (VOID_TYPE_P (TREE_TYPE (arg))
&& TYPE_QUALS (TREE_TYPE (arg)) == TYPE_UNQUALIFIED
&& (TREE_CODE (arg) != INDIRECT_REF
|| !flag_isoc99))
pedwarn (location, 0, "taking address of expression of type %<void%>");
/* Let &* cancel out to simplify resulting code. */
if (TREE_CODE (arg) == INDIRECT_REF)
{
/* Don't let this be an lvalue. */
if (lvalue_p (TREE_OPERAND (arg, 0)))
return non_lvalue_loc (location, TREE_OPERAND (arg, 0));
ret = TREE_OPERAND (arg, 0);
goto return_build_unary_op;
}
/* For &x[y], return x+y */
if (TREE_CODE (arg) == ARRAY_REF)
{
tree op0 = TREE_OPERAND (arg, 0);
if (!c_mark_addressable (op0))
return error_mark_node;
}
/* Anything not already handled and not a true memory reference
or a non-lvalue array is an error. */
else if (typecode != FUNCTION_TYPE && !flag
&& !lvalue_or_else (location, arg, lv_addressof))
return error_mark_node;
/* Move address operations inside C_MAYBE_CONST_EXPR to simplify
folding later. */
if (TREE_CODE (arg) == C_MAYBE_CONST_EXPR)
{
tree inner = build_unary_op (location, code,
C_MAYBE_CONST_EXPR_EXPR (arg), flag);
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (arg), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (arg));
C_MAYBE_CONST_EXPR_NON_CONST (ret)
= C_MAYBE_CONST_EXPR_NON_CONST (arg);
goto return_build_unary_op;
}
/* Ordinary case; arg is a COMPONENT_REF or a decl. */
argtype = TREE_TYPE (arg);
/* If the lvalue is const or volatile, merge that into the type
to which the address will point. This is only needed
for function types. */
if ((DECL_P (arg) || REFERENCE_CLASS_P (arg))
&& (TREE_READONLY (arg) || TREE_THIS_VOLATILE (arg))
&& TREE_CODE (argtype) == FUNCTION_TYPE)
{
int orig_quals = TYPE_QUALS (strip_array_types (argtype));
int quals = orig_quals;
if (TREE_READONLY (arg))
quals |= TYPE_QUAL_CONST;
if (TREE_THIS_VOLATILE (arg))
quals |= TYPE_QUAL_VOLATILE;
argtype = c_build_qualified_type (argtype, quals);
}
if (!c_mark_addressable (arg))
return error_mark_node;
gcc_assert (TREE_CODE (arg) != COMPONENT_REF
|| !DECL_C_BIT_FIELD (TREE_OPERAND (arg, 1)));
argtype = build_pointer_type (argtype);
/* ??? Cope with user tricks that amount to offsetof. Delete this
when we have proper support for integer constant expressions. */
val = get_base_address (arg);
if (val && TREE_CODE (val) == INDIRECT_REF
&& TREE_CONSTANT (TREE_OPERAND (val, 0)))
{
ret = fold_convert_loc (location, argtype, fold_offsetof_1 (arg));
goto return_build_unary_op;
}
val = build1 (ADDR_EXPR, argtype, arg);
ret = val;
goto return_build_unary_op;
default:
gcc_unreachable ();
}
if (argtype == 0)
argtype = TREE_TYPE (arg);
if (TREE_CODE (arg) == INTEGER_CST)
ret = (require_constant_value
? fold_build1_initializer_loc (location, code, argtype, arg)
: fold_build1_loc (location, code, argtype, arg));
else
ret = build1 (code, argtype, arg);
return_build_unary_op:
gcc_assert (ret != error_mark_node);
if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret)
&& !(TREE_CODE (xarg) == INTEGER_CST && !TREE_OVERFLOW (xarg)))
ret = build1 (NOP_EXPR, TREE_TYPE (ret), ret);
else if (TREE_CODE (ret) != INTEGER_CST && int_operands)
ret = note_integer_operands (ret);
if (eptype)
ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
protected_set_expr_location (ret, location);
return ret;
}
/* Return nonzero if REF is an lvalue valid for this language.
Lvalues can be assigned, unless their type has TYPE_READONLY.
Lvalues can have their address taken, unless they have C_DECL_REGISTER. */
bool
lvalue_p (const_tree ref)
{
const enum tree_code code = TREE_CODE (ref);
switch (code)
{
case REALPART_EXPR:
case IMAGPART_EXPR:
case COMPONENT_REF:
return lvalue_p (TREE_OPERAND (ref, 0));
case C_MAYBE_CONST_EXPR:
return lvalue_p (TREE_OPERAND (ref, 1));
case COMPOUND_LITERAL_EXPR:
case STRING_CST:
return 1;
case INDIRECT_REF:
case ARRAY_REF:
case VAR_DECL:
case PARM_DECL:
case RESULT_DECL:
case ERROR_MARK:
return (TREE_CODE (TREE_TYPE (ref)) != FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (ref)) != METHOD_TYPE);
case BIND_EXPR:
return TREE_CODE (TREE_TYPE (ref)) == ARRAY_TYPE;
default:
return 0;
}
}
/* Give a warning for storing in something that is read-only in GCC
terms but not const in ISO C terms. */
static void
readonly_warning (tree arg, enum lvalue_use use)
{
switch (use)
{
case lv_assign:
warning (0, "assignment of read-only location %qE", arg);
break;
case lv_increment:
warning (0, "increment of read-only location %qE", arg);
break;
case lv_decrement:
warning (0, "decrement of read-only location %qE", arg);
break;
default:
gcc_unreachable ();
}
return;
}
/* Return nonzero if REF is an lvalue valid for this language;
otherwise, print an error message and return zero. USE says
how the lvalue is being used and so selects the error message.
LOCATION is the location at which any error should be reported. */
static int
lvalue_or_else (location_t loc, const_tree ref, enum lvalue_use use)
{
int win = lvalue_p (ref);
if (!win)
lvalue_error (loc, use);
return win;
}
/* Mark EXP saying that we need to be able to take the
address of it; it should not be allocated in a register.
Returns true if successful. */
bool
c_mark_addressable (tree exp)
{
tree x = exp;
while (1)
switch (TREE_CODE (x))
{
case COMPONENT_REF:
if (DECL_C_BIT_FIELD (TREE_OPERAND (x, 1)))
{
error
("cannot take address of bit-field %qD", TREE_OPERAND (x, 1));
return false;
}
/* ... fall through ... */
case ADDR_EXPR:
case ARRAY_REF:
case REALPART_EXPR:
case IMAGPART_EXPR:
x = TREE_OPERAND (x, 0);
break;
case COMPOUND_LITERAL_EXPR:
case CONSTRUCTOR:
TREE_ADDRESSABLE (x) = 1;
return true;
case VAR_DECL:
case CONST_DECL:
case PARM_DECL:
case RESULT_DECL:
if (C_DECL_REGISTER (x)
&& DECL_NONLOCAL (x))
{
if (TREE_PUBLIC (x) || TREE_STATIC (x) || DECL_EXTERNAL (x))
{
error
("global register variable %qD used in nested function", x);
return false;
}
pedwarn (input_location, 0, "register variable %qD used in nested function", x);
}
else if (C_DECL_REGISTER (x))
{
if (TREE_PUBLIC (x) || TREE_STATIC (x) || DECL_EXTERNAL (x))
error ("address of global register variable %qD requested", x);
else
error ("address of register variable %qD requested", x);
return false;
}
/* drops in */
case FUNCTION_DECL:
TREE_ADDRESSABLE (x) = 1;
/* drops out */
default:
return true;
}
}
/* Convert EXPR to TYPE, warning about conversion problems with
constants. SEMANTIC_TYPE is the type this conversion would use
without excess precision. If SEMANTIC_TYPE is NULL, this function
is equivalent to convert_and_check. This function is a wrapper that
handles conversions that may be different than
the usual ones because of excess precision. */
static tree
ep_convert_and_check (tree type, tree expr, tree semantic_type)
{
if (TREE_TYPE (expr) == type)
return expr;
if (!semantic_type)
return convert_and_check (type, expr);
if (TREE_CODE (TREE_TYPE (expr)) == INTEGER_TYPE
&& TREE_TYPE (expr) != semantic_type)
{
/* For integers, we need to check the real conversion, not
the conversion to the excess precision type. */
expr = convert_and_check (semantic_type, expr);
}
/* Result type is the excess precision type, which should be
large enough, so do not check. */
return convert (type, expr);
}
/* Build and return a conditional expression IFEXP ? OP1 : OP2. If
IFEXP_BCP then the condition is a call to __builtin_constant_p, and
if folded to an integer constant then the unselected half may
contain arbitrary operations not normally permitted in constant
expressions. Set the location of the expression to LOC. */
tree
build_conditional_expr (location_t colon_loc, tree ifexp, bool ifexp_bcp,
tree op1, tree op1_original_type, tree op2,
tree op2_original_type)
{
tree type1;
tree type2;
enum tree_code code1;
enum tree_code code2;
tree result_type = NULL;
tree semantic_result_type = NULL;
tree orig_op1 = op1, orig_op2 = op2;
bool int_const, op1_int_operands, op2_int_operands, int_operands;
bool ifexp_int_operands;
tree ret;
op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1);
if (op1_int_operands)
op1 = remove_c_maybe_const_expr (op1);
op2_int_operands = EXPR_INT_CONST_OPERANDS (orig_op2);
if (op2_int_operands)
op2 = remove_c_maybe_const_expr (op2);
ifexp_int_operands = EXPR_INT_CONST_OPERANDS (ifexp);
if (ifexp_int_operands)
ifexp = remove_c_maybe_const_expr (ifexp);
/* Promote both alternatives. */
if (TREE_CODE (TREE_TYPE (op1)) != VOID_TYPE)
op1 = default_conversion (op1);
if (TREE_CODE (TREE_TYPE (op2)) != VOID_TYPE)
op2 = default_conversion (op2);
if (TREE_CODE (ifexp) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (op1)) == ERROR_MARK
|| TREE_CODE (TREE_TYPE (op2)) == ERROR_MARK)
return error_mark_node;
type1 = TREE_TYPE (op1);
code1 = TREE_CODE (type1);
type2 = TREE_TYPE (op2);
code2 = TREE_CODE (type2);
/* C90 does not permit non-lvalue arrays in conditional expressions.
In C99 they will be pointers by now. */
if (code1 == ARRAY_TYPE || code2 == ARRAY_TYPE)
{
error_at (colon_loc, "non-lvalue array in conditional expression");
return error_mark_node;
}
if ((TREE_CODE (op1) == EXCESS_PRECISION_EXPR
|| TREE_CODE (op2) == EXCESS_PRECISION_EXPR)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)
&& (code2 == INTEGER_TYPE || code2 == REAL_TYPE
|| code2 == COMPLEX_TYPE))
{
semantic_result_type = c_common_type (type1, type2);
if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
{
op1 = TREE_OPERAND (op1, 0);
type1 = TREE_TYPE (op1);
gcc_assert (TREE_CODE (type1) == code1);
}
if (TREE_CODE (op2) == EXCESS_PRECISION_EXPR)
{
op2 = TREE_OPERAND (op2, 0);
type2 = TREE_TYPE (op2);
gcc_assert (TREE_CODE (type2) == code2);
}
}
if (warn_cxx_compat)
{
tree t1 = op1_original_type ? op1_original_type : TREE_TYPE (orig_op1);
tree t2 = op2_original_type ? op2_original_type : TREE_TYPE (orig_op2);
if (TREE_CODE (t1) == ENUMERAL_TYPE
&& TREE_CODE (t2) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (t1) != TYPE_MAIN_VARIANT (t2))
warning_at (colon_loc, OPT_Wc___compat,
("different enum types in conditional is "
"invalid in C++: %qT vs %qT"),
t1, t2);
}
/* Quickly detect the usual case where op1 and op2 have the same type
after promotion. */
if (TYPE_MAIN_VARIANT (type1) == TYPE_MAIN_VARIANT (type2))
{
if (type1 == type2)
result_type = type1;
else
result_type = TYPE_MAIN_VARIANT (type1);
}
else if ((code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == COMPLEX_TYPE)
&& (code2 == INTEGER_TYPE || code2 == REAL_TYPE
|| code2 == COMPLEX_TYPE))
{
result_type = c_common_type (type1, type2);
do_warn_double_promotion (result_type, type1, type2,
"implicit conversion from %qT to %qT to "
"match other result of conditional",
colon_loc);
/* If -Wsign-compare, warn here if type1 and type2 have
different signedness. We'll promote the signed to unsigned
and later code won't know it used to be different.
Do this check on the original types, so that explicit casts
will be considered, but default promotions won't. */
if (c_inhibit_evaluation_warnings == 0)
{
int unsigned_op1 = TYPE_UNSIGNED (TREE_TYPE (orig_op1));
int unsigned_op2 = TYPE_UNSIGNED (TREE_TYPE (orig_op2));
if (unsigned_op1 ^ unsigned_op2)
{
bool ovf;
/* Do not warn if the result type is signed, since the
signed type will only be chosen if it can represent
all the values of the unsigned type. */
if (!TYPE_UNSIGNED (result_type))
/* OK */;
else
{
bool op1_maybe_const = true;
bool op2_maybe_const = true;
/* Do not warn if the signed quantity is an
unsuffixed integer literal (or some static
constant expression involving such literals) and
it is non-negative. This warning requires the
operands to be folded for best results, so do
that folding in this case even without
warn_sign_compare to avoid warning options
possibly affecting code generation. */
c_inhibit_evaluation_warnings
+= (ifexp == truthvalue_false_node);
op1 = c_fully_fold (op1, require_constant_value,
&op1_maybe_const);
c_inhibit_evaluation_warnings
-= (ifexp == truthvalue_false_node);
c_inhibit_evaluation_warnings
+= (ifexp == truthvalue_true_node);
op2 = c_fully_fold (op2, require_constant_value,
&op2_maybe_const);
c_inhibit_evaluation_warnings
-= (ifexp == truthvalue_true_node);
if (warn_sign_compare)
{
if ((unsigned_op2
&& tree_expr_nonnegative_warnv_p (op1, &ovf))
|| (unsigned_op1
&& tree_expr_nonnegative_warnv_p (op2, &ovf)))
/* OK */;
else
warning_at (colon_loc, OPT_Wsign_compare,
("signed and unsigned type in "
"conditional expression"));
}
if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST)
op1 = c_wrap_maybe_const (op1, !op1_maybe_const);
if (!op2_maybe_const || TREE_CODE (op2) != INTEGER_CST)
op2 = c_wrap_maybe_const (op2, !op2_maybe_const);
}
}
}
}
else if (code1 == VOID_TYPE || code2 == VOID_TYPE)
{
if (code1 != VOID_TYPE || code2 != VOID_TYPE)
pedwarn (colon_loc, OPT_pedantic,
"ISO C forbids conditional expr with only one void side");
result_type = void_type_node;
}
else if (code1 == POINTER_TYPE && code2 == POINTER_TYPE)
{
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1));
addr_space_t as2 = TYPE_ADDR_SPACE (TREE_TYPE (type2));
addr_space_t as_common;
if (comp_target_types (colon_loc, type1, type2))
result_type = common_pointer_type (type1, type2);
else if (null_pointer_constant_p (orig_op1))
result_type = type2;
else if (null_pointer_constant_p (orig_op2))
result_type = type1;
else if (!addr_space_superset (as1, as2, &as_common))
{
error_at (colon_loc, "pointers to disjoint address spaces "
"used in conditional expression");
return error_mark_node;
}
else if (VOID_TYPE_P (TREE_TYPE (type1)))
{
if (TREE_CODE (TREE_TYPE (type2)) == FUNCTION_TYPE)
pedwarn (colon_loc, OPT_pedantic,
"ISO C forbids conditional expr between "
"%<void *%> and function pointer");
result_type = build_pointer_type (qualify_type (TREE_TYPE (type1),
TREE_TYPE (type2)));
}
else if (VOID_TYPE_P (TREE_TYPE (type2)))
{
if (TREE_CODE (TREE_TYPE (type1)) == FUNCTION_TYPE)
pedwarn (colon_loc, OPT_pedantic,
"ISO C forbids conditional expr between "
"%<void *%> and function pointer");
result_type = build_pointer_type (qualify_type (TREE_TYPE (type2),
TREE_TYPE (type1)));
}
/* Objective-C pointer comparisons are a bit more lenient. */
else if (objc_have_common_type (type1, type2, -3, NULL_TREE))
result_type = objc_common_type (type1, type2);
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
pedwarn (colon_loc, 0,
"pointer type mismatch in conditional expression");
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
}
}
else if (code1 == POINTER_TYPE && code2 == INTEGER_TYPE)
{
if (!null_pointer_constant_p (orig_op2))
pedwarn (colon_loc, 0,
"pointer/integer type mismatch in conditional expression");
else
{
op2 = null_pointer_node;
}
result_type = type1;
}
else if (code2 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
if (!null_pointer_constant_p (orig_op1))
pedwarn (colon_loc, 0,
"pointer/integer type mismatch in conditional expression");
else
{
op1 = null_pointer_node;
}
result_type = type2;
}
if (!result_type)
{
if (flag_cond_mismatch)
result_type = void_type_node;
else
{
error_at (colon_loc, "type mismatch in conditional expression");
return error_mark_node;
}
}
/* Merge const and volatile flags of the incoming types. */
result_type
= build_type_variant (result_type,
TYPE_READONLY (type1) || TYPE_READONLY (type2),
TYPE_VOLATILE (type1) || TYPE_VOLATILE (type2));
op1 = ep_convert_and_check (result_type, op1, semantic_result_type);
op2 = ep_convert_and_check (result_type, op2, semantic_result_type);
if (ifexp_bcp && ifexp == truthvalue_true_node)
{
op2_int_operands = true;
op1 = c_fully_fold (op1, require_constant_value, NULL);
}
if (ifexp_bcp && ifexp == truthvalue_false_node)
{
op1_int_operands = true;
op2 = c_fully_fold (op2, require_constant_value, NULL);
}
int_const = int_operands = (ifexp_int_operands
&& op1_int_operands
&& op2_int_operands);
if (int_operands)
{
int_const = ((ifexp == truthvalue_true_node
&& TREE_CODE (orig_op1) == INTEGER_CST
&& !TREE_OVERFLOW (orig_op1))
|| (ifexp == truthvalue_false_node
&& TREE_CODE (orig_op2) == INTEGER_CST
&& !TREE_OVERFLOW (orig_op2)));
}
if (int_const || (ifexp_bcp && TREE_CODE (ifexp) == INTEGER_CST))
ret = fold_build3_loc (colon_loc, COND_EXPR, result_type, ifexp, op1, op2);
else
{
ret = build3 (COND_EXPR, result_type, ifexp, op1, op2);
if (int_operands)
ret = note_integer_operands (ret);
}
if (semantic_result_type)
ret = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, ret);
protected_set_expr_location (ret, colon_loc);
return ret;
}
/* Return a compound expression that performs two expressions and
returns the value of the second of them.
LOC is the location of the COMPOUND_EXPR. */
tree
build_compound_expr (location_t loc, tree expr1, tree expr2)
{
bool expr1_int_operands, expr2_int_operands;
tree eptype = NULL_TREE;
tree ret;
expr1_int_operands = EXPR_INT_CONST_OPERANDS (expr1);
if (expr1_int_operands)
expr1 = remove_c_maybe_const_expr (expr1);
expr2_int_operands = EXPR_INT_CONST_OPERANDS (expr2);
if (expr2_int_operands)
expr2 = remove_c_maybe_const_expr (expr2);
if (TREE_CODE (expr1) == EXCESS_PRECISION_EXPR)
expr1 = TREE_OPERAND (expr1, 0);
if (TREE_CODE (expr2) == EXCESS_PRECISION_EXPR)
{
eptype = TREE_TYPE (expr2);
expr2 = TREE_OPERAND (expr2, 0);
}
if (!TREE_SIDE_EFFECTS (expr1))
{
/* The left-hand operand of a comma expression is like an expression
statement: with -Wunused, we should warn if it doesn't have
any side-effects, unless it was explicitly cast to (void). */
if (warn_unused_value)
{
if (VOID_TYPE_P (TREE_TYPE (expr1))
&& CONVERT_EXPR_P (expr1))
; /* (void) a, b */
else if (VOID_TYPE_P (TREE_TYPE (expr1))
&& TREE_CODE (expr1) == COMPOUND_EXPR
&& CONVERT_EXPR_P (TREE_OPERAND (expr1, 1)))
; /* (void) a, (void) b, c */
else
warning_at (loc, OPT_Wunused_value,
"left-hand operand of comma expression has no effect");
}
}
/* With -Wunused, we should also warn if the left-hand operand does have
side-effects, but computes a value which is not used. For example, in
`foo() + bar(), baz()' the result of the `+' operator is not used,
so we should issue a warning. */
else if (warn_unused_value)
warn_if_unused_value (expr1, loc);
if (expr2 == error_mark_node)
return error_mark_node;
ret = build2 (COMPOUND_EXPR, TREE_TYPE (expr2), expr1, expr2);
if (flag_isoc99
&& expr1_int_operands
&& expr2_int_operands)
ret = note_integer_operands (ret);
if (eptype)
ret = build1 (EXCESS_PRECISION_EXPR, eptype, ret);
protected_set_expr_location (ret, loc);
return ret;
}
/* Issue -Wcast-qual warnings when appropriate. TYPE is the type to
which we are casting. OTYPE is the type of the expression being
cast. Both TYPE and OTYPE are pointer types. LOC is the location
of the cast. -Wcast-qual appeared on the command line. Named
address space qualifiers are not handled here, because they result
in different warnings. */
static void
handle_warn_cast_qual (location_t loc, tree type, tree otype)
{
tree in_type = type;
tree in_otype = otype;
int added = 0;
int discarded = 0;
bool is_const;
/* Check that the qualifiers on IN_TYPE are a superset of the
qualifiers of IN_OTYPE. The outermost level of POINTER_TYPE
nodes is uninteresting and we stop as soon as we hit a
non-POINTER_TYPE node on either type. */
do
{
in_otype = TREE_TYPE (in_otype);
in_type = TREE_TYPE (in_type);
/* GNU C allows cv-qualified function types. 'const' means the
function is very pure, 'volatile' means it can't return. We
need to warn when such qualifiers are added, not when they're
taken away. */
if (TREE_CODE (in_otype) == FUNCTION_TYPE
&& TREE_CODE (in_type) == FUNCTION_TYPE)
added |= (TYPE_QUALS_NO_ADDR_SPACE (in_type)
& ~TYPE_QUALS_NO_ADDR_SPACE (in_otype));
else
discarded |= (TYPE_QUALS_NO_ADDR_SPACE (in_otype)
& ~TYPE_QUALS_NO_ADDR_SPACE (in_type));
}
while (TREE_CODE (in_type) == POINTER_TYPE
&& TREE_CODE (in_otype) == POINTER_TYPE);
if (added)
warning_at (loc, OPT_Wcast_qual,
"cast adds %q#v qualifier to function type", added);
if (discarded)
/* There are qualifiers present in IN_OTYPE that are not present
in IN_TYPE. */
warning_at (loc, OPT_Wcast_qual,
"cast discards %q#v qualifier from pointer target type",
discarded);
if (added || discarded)
return;
/* A cast from **T to const **T is unsafe, because it can cause a
const value to be changed with no additional warning. We only
issue this warning if T is the same on both sides, and we only
issue the warning if there are the same number of pointers on
both sides, as otherwise the cast is clearly unsafe anyhow. A
cast is unsafe when a qualifier is added at one level and const
is not present at all outer levels.
To issue this warning, we check at each level whether the cast
adds new qualifiers not already seen. We don't need to special
case function types, as they won't have the same
TYPE_MAIN_VARIANT. */
if (TYPE_MAIN_VARIANT (in_type) != TYPE_MAIN_VARIANT (in_otype))
return;
if (TREE_CODE (TREE_TYPE (type)) != POINTER_TYPE)
return;
in_type = type;
in_otype = otype;
is_const = TYPE_READONLY (TREE_TYPE (in_type));
do
{
in_type = TREE_TYPE (in_type);
in_otype = TREE_TYPE (in_otype);
if ((TYPE_QUALS (in_type) &~ TYPE_QUALS (in_otype)) != 0
&& !is_const)
{
warning_at (loc, OPT_Wcast_qual,
"to be safe all intermediate pointers in cast from "
"%qT to %qT must be %<const%> qualified",
otype, type);
break;
}
if (is_const)
is_const = TYPE_READONLY (in_type);
}
while (TREE_CODE (in_type) == POINTER_TYPE);
}
/* Build an expression representing a cast to type TYPE of expression EXPR.
LOC is the location of the cast-- typically the open paren of the cast. */
tree
build_c_cast (location_t loc, tree type, tree expr)
{
tree value;
if (TREE_CODE (expr) == EXCESS_PRECISION_EXPR)
expr = TREE_OPERAND (expr, 0);
value = expr;
if (type == error_mark_node || expr == error_mark_node)
return error_mark_node;
/* The ObjC front-end uses TYPE_MAIN_VARIANT to tie together types differing
only in <protocol> qualifications. But when constructing cast expressions,
the protocols do matter and must be kept around. */
if (objc_is_object_ptr (type) && objc_is_object_ptr (TREE_TYPE (expr)))
return build1 (NOP_EXPR, type, expr);
type = TYPE_MAIN_VARIANT (type);
if (TREE_CODE (type) == ARRAY_TYPE)
{
error_at (loc, "cast specifies array type");
return error_mark_node;
}
if (TREE_CODE (type) == FUNCTION_TYPE)
{
error_at (loc, "cast specifies function type");
return error_mark_node;
}
if (!VOID_TYPE_P (type))
{
value = require_complete_type (value);
if (value == error_mark_node)
return error_mark_node;
}
if (type == TYPE_MAIN_VARIANT (TREE_TYPE (value)))
{
if (TREE_CODE (type) == RECORD_TYPE
|| TREE_CODE (type) == UNION_TYPE)
pedwarn (loc, OPT_pedantic,
"ISO C forbids casting nonscalar to the same type");
}
else if (TREE_CODE (type) == UNION_TYPE)
{
tree field;
for (field = TYPE_FIELDS (type); field; field = DECL_CHAIN (field))
if (TREE_TYPE (field) != error_mark_node
&& comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (field)),
TYPE_MAIN_VARIANT (TREE_TYPE (value))))
break;
if (field)
{
tree t;
bool maybe_const = true;
pedwarn (loc, OPT_pedantic, "ISO C forbids casts to union type");
t = c_fully_fold (value, false, &maybe_const);
t = build_constructor_single (type, field, t);
if (!maybe_const)
t = c_wrap_maybe_const (t, true);
t = digest_init (loc, type, t,
NULL_TREE, false, true, 0);
TREE_CONSTANT (t) = TREE_CONSTANT (value);
return t;
}
error_at (loc, "cast to union type from type not present in union");
return error_mark_node;
}
else
{
tree otype, ovalue;
if (type == void_type_node)
{
tree t = build1 (CONVERT_EXPR, type, value);
SET_EXPR_LOCATION (t, loc);
return t;
}
otype = TREE_TYPE (value);
/* Optionally warn about potentially worrisome casts. */
if (warn_cast_qual
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE)
handle_warn_cast_qual (loc, type, otype);
/* Warn about conversions between pointers to disjoint
address spaces. */
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& !null_pointer_constant_p (value))
{
addr_space_t as_to = TYPE_ADDR_SPACE (TREE_TYPE (type));
addr_space_t as_from = TYPE_ADDR_SPACE (TREE_TYPE (otype));
addr_space_t as_common;
if (!addr_space_superset (as_to, as_from, &as_common))
{
if (ADDR_SPACE_GENERIC_P (as_from))
warning_at (loc, 0, "cast to %s address space pointer "
"from disjoint generic address space pointer",
c_addr_space_name (as_to));
else if (ADDR_SPACE_GENERIC_P (as_to))
warning_at (loc, 0, "cast to generic address space pointer "
"from disjoint %s address space pointer",
c_addr_space_name (as_from));
else
warning_at (loc, 0, "cast to %s address space pointer "
"from disjoint %s address space pointer",
c_addr_space_name (as_to),
c_addr_space_name (as_from));
}
}
/* Warn about possible alignment problems. */
if (STRICT_ALIGNMENT
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != VOID_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
/* Don't warn about opaque types, where the actual alignment
restriction is unknown. */
&& !((TREE_CODE (TREE_TYPE (otype)) == UNION_TYPE
|| TREE_CODE (TREE_TYPE (otype)) == RECORD_TYPE)
&& TYPE_MODE (TREE_TYPE (otype)) == VOIDmode)
&& TYPE_ALIGN (TREE_TYPE (type)) > TYPE_ALIGN (TREE_TYPE (otype)))
warning_at (loc, OPT_Wcast_align,
"cast increases required alignment of target type");
if (TREE_CODE (type) == INTEGER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype))
/* Unlike conversion of integers to pointers, where the
warning is disabled for converting constants because
of cases such as SIG_*, warn about converting constant
pointers to integers. In some cases it may cause unwanted
sign extension, and a warning is appropriate. */
warning_at (loc, OPT_Wpointer_to_int_cast,
"cast from pointer to integer of different size");
if (TREE_CODE (value) == CALL_EXPR
&& TREE_CODE (type) != TREE_CODE (otype))
warning_at (loc, OPT_Wbad_function_cast,
"cast from function call of type %qT "
"to non-matching type %qT", otype, type);
if (TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == INTEGER_TYPE
&& TYPE_PRECISION (type) != TYPE_PRECISION (otype)
/* Don't warn about converting any constant. */
&& !TREE_CONSTANT (value))
warning_at (loc,
OPT_Wint_to_pointer_cast, "cast to pointer from integer "
"of different size");
if (warn_strict_aliasing <= 2)
strict_aliasing_warning (otype, type, expr);
/* If pedantic, warn for conversions between function and object
pointer types, except for converting a null pointer constant
to function pointer type. */
if (pedantic
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (otype)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE)
pedwarn (loc, OPT_pedantic, "ISO C forbids "
"conversion of function pointer to object pointer type");
if (pedantic
&& TREE_CODE (type) == POINTER_TYPE
&& TREE_CODE (otype) == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (type)) == FUNCTION_TYPE
&& TREE_CODE (TREE_TYPE (otype)) != FUNCTION_TYPE
&& !null_pointer_constant_p (value))
pedwarn (loc, OPT_pedantic, "ISO C forbids "
"conversion of object pointer to function pointer type");
ovalue = value;
value = convert (type, value);
/* Ignore any integer overflow caused by the cast. */
if (TREE_CODE (value) == INTEGER_CST && !FLOAT_TYPE_P (otype))
{
if (CONSTANT_CLASS_P (ovalue) && TREE_OVERFLOW (ovalue))
{
if (!TREE_OVERFLOW (value))
{
/* Avoid clobbering a shared constant. */
value = copy_node (value);
TREE_OVERFLOW (value) = TREE_OVERFLOW (ovalue);
}
}
else if (TREE_OVERFLOW (value))
/* Reset VALUE's overflow flags, ensuring constant sharing. */
value = build_int_cst_wide (TREE_TYPE (value),
TREE_INT_CST_LOW (value),
TREE_INT_CST_HIGH (value));
}
}
/* Don't let a cast be an lvalue. */
if (value == expr)
value = non_lvalue_loc (loc, value);
/* Don't allow the results of casting to floating-point or complex
types be confused with actual constants, or casts involving
integer and pointer types other than direct integer-to-integer
and integer-to-pointer be confused with integer constant
expressions and null pointer constants. */
if (TREE_CODE (value) == REAL_CST
|| TREE_CODE (value) == COMPLEX_CST
|| (TREE_CODE (value) == INTEGER_CST
&& !((TREE_CODE (expr) == INTEGER_CST
&& INTEGRAL_TYPE_P (TREE_TYPE (expr)))
|| TREE_CODE (expr) == REAL_CST
|| TREE_CODE (expr) == COMPLEX_CST)))
value = build1 (NOP_EXPR, type, value);
if (CAN_HAVE_LOCATION_P (value))
SET_EXPR_LOCATION (value, loc);
return value;
}
/* Interpret a cast of expression EXPR to type TYPE. LOC is the
location of the open paren of the cast, or the position of the cast
expr. */
tree
c_cast_expr (location_t loc, struct c_type_name *type_name, tree expr)
{
tree type;
tree type_expr = NULL_TREE;
bool type_expr_const = true;
tree ret;
int saved_wsp = warn_strict_prototypes;
/* This avoids warnings about unprototyped casts on
integers. E.g. "#define SIG_DFL (void(*)())0". */
if (TREE_CODE (expr) == INTEGER_CST)
warn_strict_prototypes = 0;
type = groktypename (type_name, &type_expr, &type_expr_const);
warn_strict_prototypes = saved_wsp;
ret = build_c_cast (loc, type, expr);
if (type_expr)
{
ret = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (ret), type_expr, ret);
C_MAYBE_CONST_EXPR_NON_CONST (ret) = !type_expr_const;
SET_EXPR_LOCATION (ret, loc);
}
if (CAN_HAVE_LOCATION_P (ret) && !EXPR_HAS_LOCATION (ret))
SET_EXPR_LOCATION (ret, loc);
/* C++ does not permits types to be defined in a cast, but it
allows references to incomplete types. */
if (warn_cxx_compat && type_name->specs->typespec_kind == ctsk_tagdef)
warning_at (loc, OPT_Wc___compat,
"defining a type in a cast is invalid in C++");
return ret;
}
/* Build an assignment expression of lvalue LHS from value RHS.
If LHS_ORIGTYPE is not NULL, it is the original type of LHS, which
may differ from TREE_TYPE (LHS) for an enum bitfield.
MODIFYCODE is the code for a binary operator that we use
to combine the old value of LHS with RHS to get the new value.
Or else MODIFYCODE is NOP_EXPR meaning do a simple assignment.
If RHS_ORIGTYPE is not NULL_TREE, it is the original type of RHS,
which may differ from TREE_TYPE (RHS) for an enum value.
LOCATION is the location of the MODIFYCODE operator.
RHS_LOC is the location of the RHS. */
tree
build_modify_expr (location_t location, tree lhs, tree lhs_origtype,
enum tree_code modifycode,
location_t rhs_loc, tree rhs, tree rhs_origtype)
{
tree result;
tree newrhs;
tree rhs_semantic_type = NULL_TREE;
tree lhstype = TREE_TYPE (lhs);
tree olhstype = lhstype;
bool npc;
/* Types that aren't fully specified cannot be used in assignments. */
lhs = require_complete_type (lhs);
/* Avoid duplicate error messages from operands that had errors. */
if (TREE_CODE (lhs) == ERROR_MARK || TREE_CODE (rhs) == ERROR_MARK)
return error_mark_node;
/* For ObjC properties, defer this check. */
if (!objc_is_property_ref (lhs) && !lvalue_or_else (location, lhs, lv_assign))
return error_mark_node;
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
{
rhs_semantic_type = TREE_TYPE (rhs);
rhs = TREE_OPERAND (rhs, 0);
}
newrhs = rhs;
if (TREE_CODE (lhs) == C_MAYBE_CONST_EXPR)
{
tree inner = build_modify_expr (location, C_MAYBE_CONST_EXPR_EXPR (lhs),
lhs_origtype, modifycode, rhs_loc, rhs,
rhs_origtype);
if (inner == error_mark_node)
return error_mark_node;
result = build2 (C_MAYBE_CONST_EXPR, TREE_TYPE (inner),
C_MAYBE_CONST_EXPR_PRE (lhs), inner);
gcc_assert (!C_MAYBE_CONST_EXPR_INT_OPERANDS (lhs));
C_MAYBE_CONST_EXPR_NON_CONST (result) = 1;
protected_set_expr_location (result, location);
return result;
}
/* If a binary op has been requested, combine the old LHS value with the RHS
producing the value we should actually store into the LHS. */
if (modifycode != NOP_EXPR)
{
lhs = c_fully_fold (lhs, false, NULL);
lhs = stabilize_reference (lhs);
newrhs = build_binary_op (location,
modifycode, lhs, rhs, 1);
/* The original type of the right hand side is no longer
meaningful. */
rhs_origtype = NULL_TREE;
}
if (c_dialect_objc ())
{
/* Check if we are modifying an Objective-C property reference;
if so, we need to generate setter calls. */
result = objc_maybe_build_modify_expr (lhs, newrhs);
if (result)
return result;
/* Else, do the check that we postponed for Objective-C. */
if (!lvalue_or_else (location, lhs, lv_assign))
return error_mark_node;
}
/* Give an error for storing in something that is 'const'. */
if (TYPE_READONLY (lhstype)
|| ((TREE_CODE (lhstype) == RECORD_TYPE
|| TREE_CODE (lhstype) == UNION_TYPE)
&& C_TYPE_FIELDS_READONLY (lhstype)))
{
readonly_error (lhs, lv_assign);
return error_mark_node;
}
else if (TREE_READONLY (lhs))
readonly_warning (lhs, lv_assign);
/* If storing into a structure or union member,
it has probably been given type `int'.
Compute the type that would go with
the actual amount of storage the member occupies. */
if (TREE_CODE (lhs) == COMPONENT_REF
&& (TREE_CODE (lhstype) == INTEGER_TYPE
|| TREE_CODE (lhstype) == BOOLEAN_TYPE
|| TREE_CODE (lhstype) == REAL_TYPE
|| TREE_CODE (lhstype) == ENUMERAL_TYPE))
lhstype = TREE_TYPE (get_unwidened (lhs, 0));
/* If storing in a field that is in actuality a short or narrower than one,
we must store in the field in its actual type. */
if (lhstype != TREE_TYPE (lhs))
{
lhs = copy_node (lhs);
TREE_TYPE (lhs) = lhstype;
}
/* Issue -Wc++-compat warnings about an assignment to an enum type
when LHS does not have its original type. This happens for,
e.g., an enum bitfield in a struct. */
if (warn_cxx_compat
&& lhs_origtype != NULL_TREE
&& lhs_origtype != lhstype
&& TREE_CODE (lhs_origtype) == ENUMERAL_TYPE)
{
tree checktype = (rhs_origtype != NULL_TREE
? rhs_origtype
: TREE_TYPE (rhs));
if (checktype != error_mark_node
&& TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (lhs_origtype))
warning_at (location, OPT_Wc___compat,
"enum conversion in assignment is invalid in C++");
}
/* Convert new value to destination type. Fold it first, then
restore any excess precision information, for the sake of
conversion warnings. */
npc = null_pointer_constant_p (newrhs);
newrhs = c_fully_fold (newrhs, false, NULL);
if (rhs_semantic_type)
newrhs = build1 (EXCESS_PRECISION_EXPR, rhs_semantic_type, newrhs);
newrhs = convert_for_assignment (location, lhstype, newrhs, rhs_origtype,
ic_assign, npc, NULL_TREE, NULL_TREE, 0);
if (TREE_CODE (newrhs) == ERROR_MARK)
return error_mark_node;
/* Emit ObjC write barrier, if necessary. */
if (c_dialect_objc () && flag_objc_gc)
{
result = objc_generate_write_barrier (lhs, modifycode, newrhs);
if (result)
{
protected_set_expr_location (result, location);
return result;
}
}
/* Scan operands. */
result = build2 (MODIFY_EXPR, lhstype, lhs, newrhs);
TREE_SIDE_EFFECTS (result) = 1;
protected_set_expr_location (result, location);
/* If we got the LHS in a different type for storing in,
convert the result back to the nominal type of LHS
so that the value we return always has the same type
as the LHS argument. */
if (olhstype == TREE_TYPE (result))
return result;
result = convert_for_assignment (location, olhstype, result, rhs_origtype,
ic_assign, false, NULL_TREE, NULL_TREE, 0);
protected_set_expr_location (result, location);
return result;
}
/* Return whether STRUCT_TYPE has an anonymous field with type TYPE.
This is used to implement -fplan9-extensions. */
static bool
find_anonymous_field_with_type (tree struct_type, tree type)
{
tree field;
bool found;
gcc_assert (TREE_CODE (struct_type) == RECORD_TYPE
|| TREE_CODE (struct_type) == UNION_TYPE);
found = false;
for (field = TYPE_FIELDS (struct_type);
field != NULL_TREE;
field = TREE_CHAIN (field))
{
if (DECL_NAME (field) == NULL
&& comptypes (type, TYPE_MAIN_VARIANT (TREE_TYPE (field))))
{
if (found)
return false;
found = true;
}
else if (DECL_NAME (field) == NULL
&& (TREE_CODE (TREE_TYPE (field)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (field)) == UNION_TYPE)
&& find_anonymous_field_with_type (TREE_TYPE (field), type))
{
if (found)
return false;
found = true;
}
}
return found;
}
/* RHS is an expression whose type is pointer to struct. If there is
an anonymous field in RHS with type TYPE, then return a pointer to
that field in RHS. This is used with -fplan9-extensions. This
returns NULL if no conversion could be found. */
static tree
convert_to_anonymous_field (location_t location, tree type, tree rhs)
{
tree rhs_struct_type, lhs_main_type;
tree field, found_field;
bool found_sub_field;
tree ret;
gcc_assert (POINTER_TYPE_P (TREE_TYPE (rhs)));
rhs_struct_type = TREE_TYPE (TREE_TYPE (rhs));
gcc_assert (TREE_CODE (rhs_struct_type) == RECORD_TYPE
|| TREE_CODE (rhs_struct_type) == UNION_TYPE);
gcc_assert (POINTER_TYPE_P (type));
lhs_main_type = TYPE_MAIN_VARIANT (TREE_TYPE (type));
found_field = NULL_TREE;
found_sub_field = false;
for (field = TYPE_FIELDS (rhs_struct_type);
field != NULL_TREE;
field = TREE_CHAIN (field))
{
if (DECL_NAME (field) != NULL_TREE
|| (TREE_CODE (TREE_TYPE (field)) != RECORD_TYPE
&& TREE_CODE (TREE_TYPE (field)) != UNION_TYPE))
continue;
if (comptypes (lhs_main_type, TYPE_MAIN_VARIANT (TREE_TYPE (field))))
{
if (found_field != NULL_TREE)
return NULL_TREE;
found_field = field;
}
else if (find_anonymous_field_with_type (TREE_TYPE (field),
lhs_main_type))
{
if (found_field != NULL_TREE)
return NULL_TREE;
found_field = field;
found_sub_field = true;
}
}
if (found_field == NULL_TREE)
return NULL_TREE;
ret = fold_build3_loc (location, COMPONENT_REF, TREE_TYPE (found_field),
build_fold_indirect_ref (rhs), found_field,
NULL_TREE);
ret = build_fold_addr_expr_loc (location, ret);
if (found_sub_field)
{
ret = convert_to_anonymous_field (location, type, ret);
gcc_assert (ret != NULL_TREE);
}
return ret;
}
/* Convert value RHS to type TYPE as preparation for an assignment to
an lvalue of type TYPE. If ORIGTYPE is not NULL_TREE, it is the
original type of RHS; this differs from TREE_TYPE (RHS) for enum
types. NULL_POINTER_CONSTANT says whether RHS was a null pointer
constant before any folding.
The real work of conversion is done by `convert'.
The purpose of this function is to generate error messages
for assignments that are not allowed in C.
ERRTYPE says whether it is argument passing, assignment,
initialization or return.
LOCATION is the location of the RHS.
FUNCTION is a tree for the function being called.
PARMNUM is the number of the argument, for printing in error messages. */
static tree
convert_for_assignment (location_t location, tree type, tree rhs,
tree origtype, enum impl_conv errtype,
bool null_pointer_constant, tree fundecl,
tree function, int parmnum)
{
enum tree_code codel = TREE_CODE (type);
tree orig_rhs = rhs;
tree rhstype;
enum tree_code coder;
tree rname = NULL_TREE;
bool objc_ok = false;
if (errtype == ic_argpass)
{
tree selector;
/* Change pointer to function to the function itself for
diagnostics. */
if (TREE_CODE (function) == ADDR_EXPR
&& TREE_CODE (TREE_OPERAND (function, 0)) == FUNCTION_DECL)
function = TREE_OPERAND (function, 0);
/* Handle an ObjC selector specially for diagnostics. */
selector = objc_message_selector ();
rname = function;
if (selector && parmnum > 2)
{
rname = selector;
parmnum -= 2;
}
}
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. */
#define WARN_FOR_ASSIGNMENT(LOCATION, OPT, AR, AS, IN, RE) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
if (pedwarn (LOCATION, OPT, AR, parmnum, rname)) \
inform ((fundecl && !DECL_IS_BUILTIN (fundecl)) \
? DECL_SOURCE_LOCATION (fundecl) : LOCATION, \
"expected %qT but argument is of type %qT", \
type, rhstype); \
break; \
case ic_assign: \
pedwarn (LOCATION, OPT, AS); \
break; \
case ic_init: \
pedwarn_init (LOCATION, OPT, IN); \
break; \
case ic_return: \
pedwarn (LOCATION, OPT, RE); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
/* This macro is used to emit diagnostics to ensure that all format
strings are complete sentences, visible to gettext and checked at
compile time. It is the same as WARN_FOR_ASSIGNMENT but with an
extra parameter to enumerate qualifiers. */
#define WARN_FOR_QUALIFIERS(LOCATION, OPT, AR, AS, IN, RE, QUALS) \
do { \
switch (errtype) \
{ \
case ic_argpass: \
if (pedwarn (LOCATION, OPT, AR, parmnum, rname, QUALS)) \
inform ((fundecl && !DECL_IS_BUILTIN (fundecl)) \
? DECL_SOURCE_LOCATION (fundecl) : LOCATION, \
"expected %qT but argument is of type %qT", \
type, rhstype); \
break; \
case ic_assign: \
pedwarn (LOCATION, OPT, AS, QUALS); \
break; \
case ic_init: \
pedwarn (LOCATION, OPT, IN, QUALS); \
break; \
case ic_return: \
pedwarn (LOCATION, OPT, RE, QUALS); \
break; \
default: \
gcc_unreachable (); \
} \
} while (0)
if (TREE_CODE (rhs) == EXCESS_PRECISION_EXPR)
rhs = TREE_OPERAND (rhs, 0);
rhstype = TREE_TYPE (rhs);
coder = TREE_CODE (rhstype);
if (coder == ERROR_MARK)
return error_mark_node;
if (c_dialect_objc ())
{
int parmno;
switch (errtype)
{
case ic_return:
parmno = 0;
break;
case ic_assign:
parmno = -1;
break;
case ic_init:
parmno = -2;
break;
default:
parmno = parmnum;
break;
}
objc_ok = objc_compare_types (type, rhstype, parmno, rname);
}
if (warn_cxx_compat)
{
tree checktype = origtype != NULL_TREE ? origtype : rhstype;
if (checktype != error_mark_node
&& TREE_CODE (type) == ENUMERAL_TYPE
&& TYPE_MAIN_VARIANT (checktype) != TYPE_MAIN_VARIANT (type))
{
WARN_FOR_ASSIGNMENT (input_location, OPT_Wc___compat,
G_("enum conversion when passing argument "
"%d of %qE is invalid in C++"),
G_("enum conversion in assignment is "
"invalid in C++"),
G_("enum conversion in initialization is "
"invalid in C++"),
G_("enum conversion in return is "
"invalid in C++"));
}
}
if (TYPE_MAIN_VARIANT (type) == TYPE_MAIN_VARIANT (rhstype))
return rhs;
if (coder == VOID_TYPE)
{
/* Except for passing an argument to an unprototyped function,
this is a constraint violation. When passing an argument to
an unprototyped function, it is compile-time undefined;
making it a constraint in that case was rejected in
DR#252. */
error_at (location, "void value not ignored as it ought to be");
return error_mark_node;
}
rhs = require_complete_type (rhs);
if (rhs == error_mark_node)
return error_mark_node;
/* A type converts to a reference to it.
This code doesn't fully support references, it's just for the
special case of va_start and va_copy. */
if (codel == REFERENCE_TYPE
&& comptypes (TREE_TYPE (type), TREE_TYPE (rhs)) == 1)
{
if (!lvalue_p (rhs))
{
error_at (location, "cannot pass rvalue to reference parameter");
return error_mark_node;
}
if (!c_mark_addressable (rhs))
return error_mark_node;
rhs = build1 (ADDR_EXPR, build_pointer_type (TREE_TYPE (rhs)), rhs);
SET_EXPR_LOCATION (rhs, location);
/* We already know that these two types are compatible, but they
may not be exactly identical. In fact, `TREE_TYPE (type)' is
likely to be __builtin_va_list and `TREE_TYPE (rhs)' is
likely to be va_list, a typedef to __builtin_va_list, which
is different enough that it will cause problems later. */
if (TREE_TYPE (TREE_TYPE (rhs)) != TREE_TYPE (type))
{
rhs = build1 (NOP_EXPR, build_pointer_type (TREE_TYPE (type)), rhs);
SET_EXPR_LOCATION (rhs, location);
}
rhs = build1 (NOP_EXPR, type, rhs);
SET_EXPR_LOCATION (rhs, location);
return rhs;
}
/* Some types can interconvert without explicit casts. */
else if (codel == VECTOR_TYPE && coder == VECTOR_TYPE
&& vector_types_convertible_p (type, TREE_TYPE (rhs), true))
return convert (type, rhs);
/* Arithmetic types all interconvert, and enum is treated like int. */
else if ((codel == INTEGER_TYPE || codel == REAL_TYPE
|| codel == FIXED_POINT_TYPE
|| codel == ENUMERAL_TYPE || codel == COMPLEX_TYPE
|| codel == BOOLEAN_TYPE)
&& (coder == INTEGER_TYPE || coder == REAL_TYPE
|| coder == FIXED_POINT_TYPE
|| coder == ENUMERAL_TYPE || coder == COMPLEX_TYPE
|| coder == BOOLEAN_TYPE))
{
tree ret;
bool save = in_late_binary_op;
if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE)
in_late_binary_op = true;
ret = convert_and_check (type, orig_rhs);
if (codel == BOOLEAN_TYPE || codel == COMPLEX_TYPE)
in_late_binary_op = save;
return ret;
}
/* Aggregates in different TUs might need conversion. */
if ((codel == RECORD_TYPE || codel == UNION_TYPE)
&& codel == coder
&& comptypes (type, rhstype))
return convert_and_check (type, rhs);
/* Conversion to a transparent union or record from its member types.
This applies only to function arguments. */
if (((codel == UNION_TYPE || codel == RECORD_TYPE)
&& TYPE_TRANSPARENT_AGGR (type))
&& errtype == ic_argpass)
{
tree memb, marginal_memb = NULL_TREE;
for (memb = TYPE_FIELDS (type); memb ; memb = DECL_CHAIN (memb))
{
tree memb_type = TREE_TYPE (memb);
if (comptypes (TYPE_MAIN_VARIANT (memb_type),
TYPE_MAIN_VARIANT (rhstype)))
break;
if (TREE_CODE (memb_type) != POINTER_TYPE)
continue;
if (coder == POINTER_TYPE)
{
tree ttl = TREE_TYPE (memb_type);
tree ttr = TREE_TYPE (rhstype);
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of
the rhs. */
if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr)
|| comp_target_types (location, memb_type, rhstype))
{
/* If this type won't generate any warnings, use it. */
if (TYPE_QUALS (ttl) == TYPE_QUALS (ttr)
|| ((TREE_CODE (ttr) == FUNCTION_TYPE
&& TREE_CODE (ttl) == FUNCTION_TYPE)
? ((TYPE_QUALS (ttl) | TYPE_QUALS (ttr))
== TYPE_QUALS (ttr))
: ((TYPE_QUALS (ttl) | TYPE_QUALS (ttr))
== TYPE_QUALS (ttl))))
break;
/* Keep looking for a better type, but remember this one. */
if (!marginal_memb)
marginal_memb = memb;
}
}
/* Can convert integer zero to any pointer type. */
if (null_pointer_constant)
{
rhs = null_pointer_node;
break;
}
}
if (memb || marginal_memb)
{
if (!memb)
{
/* We have only a marginally acceptable member type;
it needs a warning. */
tree ttl = TREE_TYPE (TREE_TYPE (marginal_memb));
tree ttr = TREE_TYPE (rhstype);
/* Const and volatile mean something different for function
types, so the usual warnings are not appropriate. */
if (TREE_CODE (ttr) == FUNCTION_TYPE
&& TREE_CODE (ttl) == FUNCTION_TYPE)
{
/* Because const and volatile on functions are
restrictions that say the function will not do
certain things, it is okay to use a const or volatile
function where an ordinary one is wanted, but not
vice-versa. */
if (TYPE_QUALS_NO_ADDR_SPACE (ttl)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttr))
WARN_FOR_QUALIFIERS (location, 0,
G_("passing argument %d of %qE "
"makes %q#v qualified function "
"pointer from unqualified"),
G_("assignment makes %q#v qualified "
"function pointer from "
"unqualified"),
G_("initialization makes %q#v qualified "
"function pointer from "
"unqualified"),
G_("return makes %q#v qualified function "
"pointer from unqualified"),
TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr));
}
else if (TYPE_QUALS_NO_ADDR_SPACE (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttl))
WARN_FOR_QUALIFIERS (location, 0,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
memb = marginal_memb;
}
if (!fundecl || !DECL_IN_SYSTEM_HEADER (fundecl))
pedwarn (location, OPT_pedantic,
"ISO C prohibits argument conversion to union type");
rhs = fold_convert_loc (location, TREE_TYPE (memb), rhs);
return build_constructor_single (type, memb, rhs);
}
}
/* Conversions among pointers */
else if ((codel == POINTER_TYPE || codel == REFERENCE_TYPE)
&& (coder == codel))
{
tree ttl = TREE_TYPE (type);
tree ttr = TREE_TYPE (rhstype);
tree mvl = ttl;
tree mvr = ttr;
bool is_opaque_pointer;
int target_cmp = 0; /* Cache comp_target_types () result. */
addr_space_t asl;
addr_space_t asr;
if (TREE_CODE (mvl) != ARRAY_TYPE)
mvl = TYPE_MAIN_VARIANT (mvl);
if (TREE_CODE (mvr) != ARRAY_TYPE)
mvr = TYPE_MAIN_VARIANT (mvr);
/* Opaque pointers are treated like void pointers. */
is_opaque_pointer = vector_targets_convertible_p (ttl, ttr);
/* The Plan 9 compiler permits a pointer to a struct to be
automatically converted into a pointer to an anonymous field
within the struct. */
if (flag_plan9_extensions
&& (TREE_CODE (mvl) == RECORD_TYPE || TREE_CODE(mvl) == UNION_TYPE)
&& (TREE_CODE (mvr) == RECORD_TYPE || TREE_CODE(mvr) == UNION_TYPE)
&& mvl != mvr)
{
tree new_rhs = convert_to_anonymous_field (location, type, rhs);
if (new_rhs != NULL_TREE)
{
rhs = new_rhs;
rhstype = TREE_TYPE (rhs);
coder = TREE_CODE (rhstype);
ttr = TREE_TYPE (rhstype);
mvr = TYPE_MAIN_VARIANT (ttr);
}
}
/* C++ does not allow the implicit conversion void* -> T*. However,
for the purpose of reducing the number of false positives, we
tolerate the special case of
int *p = NULL;
where NULL is typically defined in C to be '(void *) 0'. */
if (VOID_TYPE_P (ttr) && rhs != null_pointer_node && !VOID_TYPE_P (ttl))
warning_at (location, OPT_Wc___compat,
"request for implicit conversion "
"from %qT to %qT not permitted in C++", rhstype, type);
/* See if the pointers point to incompatible address spaces. */
asl = TYPE_ADDR_SPACE (ttl);
asr = TYPE_ADDR_SPACE (ttr);
if (!null_pointer_constant_p (rhs)
&& asr != asl && !targetm.addr_space.subset_p (asr, asl))
{
switch (errtype)
{
case ic_argpass:
error_at (location, "passing argument %d of %qE from pointer to "
"non-enclosed address space", parmnum, rname);
break;
case ic_assign:
error_at (location, "assignment from pointer to "
"non-enclosed address space");
break;
case ic_init:
error_at (location, "initialization from pointer to "
"non-enclosed address space");
break;
case ic_return:
error_at (location, "return from pointer to "
"non-enclosed address space");
break;
default:
gcc_unreachable ();
}
return error_mark_node;
}
/* Check if the right-hand side has a format attribute but the
left-hand side doesn't. */
if (warn_missing_format_attribute
&& check_missing_format_attribute (type, rhstype))
{
switch (errtype)
{
case ic_argpass:
warning_at (location, OPT_Wmissing_format_attribute,
"argument %d of %qE might be "
"a candidate for a format attribute",
parmnum, rname);
break;
case ic_assign:
warning_at (location, OPT_Wmissing_format_attribute,
"assignment left-hand side might be "
"a candidate for a format attribute");
break;
case ic_init:
warning_at (location, OPT_Wmissing_format_attribute,
"initialization left-hand side might be "
"a candidate for a format attribute");
break;
case ic_return:
warning_at (location, OPT_Wmissing_format_attribute,
"return type might be "
"a candidate for a format attribute");
break;
default:
gcc_unreachable ();
}
}
/* Any non-function converts to a [const][volatile] void *
and vice versa; otherwise, targets must be the same.
Meanwhile, the lhs target must have all the qualifiers of the rhs. */
if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr)
|| (target_cmp = comp_target_types (location, type, rhstype))
|| is_opaque_pointer
|| (c_common_unsigned_type (mvl)
== c_common_unsigned_type (mvr)))
{
if (pedantic
&& ((VOID_TYPE_P (ttl) && TREE_CODE (ttr) == FUNCTION_TYPE)
||
(VOID_TYPE_P (ttr)
&& !null_pointer_constant
&& TREE_CODE (ttl) == FUNCTION_TYPE)))
WARN_FOR_ASSIGNMENT (location, OPT_pedantic,
G_("ISO C forbids passing argument %d of "
"%qE between function pointer "
"and %<void *%>"),
G_("ISO C forbids assignment between "
"function pointer and %<void *%>"),
G_("ISO C forbids initialization between "
"function pointer and %<void *%>"),
G_("ISO C forbids return between function "
"pointer and %<void *%>"));
/* Const and volatile mean something different for function types,
so the usual warnings are not appropriate. */
else if (TREE_CODE (ttr) != FUNCTION_TYPE
&& TREE_CODE (ttl) != FUNCTION_TYPE)
{
if (TYPE_QUALS_NO_ADDR_SPACE (ttr)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttl))
{
WARN_FOR_QUALIFIERS (location, 0,
G_("passing argument %d of %qE discards "
"%qv qualifier from pointer target type"),
G_("assignment discards %qv qualifier "
"from pointer target type"),
G_("initialization discards %qv qualifier "
"from pointer target type"),
G_("return discards %qv qualifier from "
"pointer target type"),
TYPE_QUALS (ttr) & ~TYPE_QUALS (ttl));
}
/* If this is not a case of ignoring a mismatch in signedness,
no warning. */
else if (VOID_TYPE_P (ttl) || VOID_TYPE_P (ttr)
|| target_cmp)
;
/* If there is a mismatch, do warn. */
else if (warn_pointer_sign)
WARN_FOR_ASSIGNMENT (location, OPT_Wpointer_sign,
G_("pointer targets in passing argument "
"%d of %qE differ in signedness"),
G_("pointer targets in assignment "
"differ in signedness"),
G_("pointer targets in initialization "
"differ in signedness"),
G_("pointer targets in return differ "
"in signedness"));
}
else if (TREE_CODE (ttl) == FUNCTION_TYPE
&& TREE_CODE (ttr) == FUNCTION_TYPE)
{
/* Because const and volatile on functions are restrictions
that say the function will not do certain things,
it is okay to use a const or volatile function
where an ordinary one is wanted, but not vice-versa. */
if (TYPE_QUALS_NO_ADDR_SPACE (ttl)
& ~TYPE_QUALS_NO_ADDR_SPACE (ttr))
WARN_FOR_QUALIFIERS (location, 0,
G_("passing argument %d of %qE makes "
"%q#v qualified function pointer "
"from unqualified"),
G_("assignment makes %q#v qualified function "
"pointer from unqualified"),
G_("initialization makes %q#v qualified "
"function pointer from unqualified"),
G_("return makes %q#v qualified function "
"pointer from unqualified"),
TYPE_QUALS (ttl) & ~TYPE_QUALS (ttr));
}
}
else
/* Avoid warning about the volatile ObjC EH puts on decls. */
if (!objc_ok)
WARN_FOR_ASSIGNMENT (location, 0,
G_("passing argument %d of %qE from "
"incompatible pointer type"),
G_("assignment from incompatible pointer type"),
G_("initialization from incompatible "
"pointer type"),
G_("return from incompatible pointer type"));
return convert (type, rhs);
}
else if (codel == POINTER_TYPE && coder == ARRAY_TYPE)
{
/* ??? This should not be an error when inlining calls to
unprototyped functions. */
error_at (location, "invalid use of non-lvalue array");
return error_mark_node;
}
else if (codel == POINTER_TYPE && coder == INTEGER_TYPE)
{
/* An explicit constant 0 can convert to a pointer,
or one that results from arithmetic, even including
a cast to integer type. */
if (!null_pointer_constant)
WARN_FOR_ASSIGNMENT (location, 0,
G_("passing argument %d of %qE makes "
"pointer from integer without a cast"),
G_("assignment makes pointer from integer "
"without a cast"),
G_("initialization makes pointer from "
"integer without a cast"),
G_("return makes pointer from integer "
"without a cast"));
return convert (type, rhs);
}
else if (codel == INTEGER_TYPE && coder == POINTER_TYPE)
{
WARN_FOR_ASSIGNMENT (location, 0,
G_("passing argument %d of %qE makes integer "
"from pointer without a cast"),
G_("assignment makes integer from pointer "
"without a cast"),
G_("initialization makes integer from pointer "
"without a cast"),
G_("return makes integer from pointer "
"without a cast"));
return convert (type, rhs);
}
else if (codel == BOOLEAN_TYPE && coder == POINTER_TYPE)
{
tree ret;
bool save = in_late_binary_op;
in_late_binary_op = true;
ret = convert (type, rhs);
in_late_binary_op = save;
return ret;
}
switch (errtype)
{
case ic_argpass:
error_at (location, "incompatible type for argument %d of %qE", parmnum, rname);
inform ((fundecl && !DECL_IS_BUILTIN (fundecl))
? DECL_SOURCE_LOCATION (fundecl) : input_location,
"expected %qT but argument is of type %qT", type, rhstype);
break;
case ic_assign:
error_at (location, "incompatible types when assigning to type %qT from "
"type %qT", type, rhstype);
break;
case ic_init:
error_at (location,
"incompatible types when initializing type %qT using type %qT",
type, rhstype);
break;
case ic_return:
error_at (location,
"incompatible types when returning type %qT but %qT was "
"expected", rhstype, type);
break;
default:
gcc_unreachable ();
}
return error_mark_node;
}
/* If VALUE is a compound expr all of whose expressions are constant, then
return its value. Otherwise, return error_mark_node.
This is for handling COMPOUND_EXPRs as initializer elements
which is allowed with a warning when -pedantic is specified. */
static tree
valid_compound_expr_initializer (tree value, tree endtype)
{
if (TREE_CODE (value) == COMPOUND_EXPR)
{
if (valid_compound_expr_initializer (TREE_OPERAND (value, 0), endtype)
== error_mark_node)
return error_mark_node;
return valid_compound_expr_initializer (TREE_OPERAND (value, 1),
endtype);
}
else if (!initializer_constant_valid_p (value, endtype))
return error_mark_node;
else
return value;
}
/* Perform appropriate conversions on the initial value of a variable,
store it in the declaration DECL,
and print any error messages that are appropriate.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
If the init is invalid, store an ERROR_MARK.
INIT_LOC is the location of the initial value. */
void
store_init_value (location_t init_loc, tree decl, tree init, tree origtype)
{
tree value, type;
bool npc = false;
/* If variable's type was invalidly declared, just ignore it. */
type = TREE_TYPE (decl);
if (TREE_CODE (type) == ERROR_MARK)
return;
/* Digest the specified initializer into an expression. */
if (init)
npc = null_pointer_constant_p (init);
value = digest_init (init_loc, type, init, origtype, npc,
true, TREE_STATIC (decl));
/* Store the expression if valid; else report error. */
if (!in_system_header
&& AGGREGATE_TYPE_P (TREE_TYPE (decl)) && !TREE_STATIC (decl))
warning (OPT_Wtraditional, "traditional C rejects automatic "
"aggregate initialization");
DECL_INITIAL (decl) = value;
/* ANSI wants warnings about out-of-range constant initializers. */
STRIP_TYPE_NOPS (value);
if (TREE_STATIC (decl))
constant_expression_warning (value);
/* Check if we need to set array size from compound literal size. */
if (TREE_CODE (type) == ARRAY_TYPE
&& TYPE_DOMAIN (type) == 0
&& value != error_mark_node)
{
tree inside_init = init;
STRIP_TYPE_NOPS (inside_init);
inside_init = fold (inside_init);
if (TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
tree cldecl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
if (TYPE_DOMAIN (TREE_TYPE (cldecl)))
{
/* For int foo[] = (int [3]){1}; we need to set array size
now since later on array initializer will be just the
brace enclosed list of the compound literal. */
tree etype = strip_array_types (TREE_TYPE (decl));
type = build_distinct_type_copy (TYPE_MAIN_VARIANT (type));
TYPE_DOMAIN (type) = TYPE_DOMAIN (TREE_TYPE (cldecl));
layout_type (type);
layout_decl (cldecl, 0);
TREE_TYPE (decl)
= c_build_qualified_type (type, TYPE_QUALS (etype));
}
}
}
}
/* Methods for storing and printing names for error messages. */
/* Implement a spelling stack that allows components of a name to be pushed
and popped. Each element on the stack is this structure. */
struct spelling
{
int kind;
union
{
unsigned HOST_WIDE_INT i;
const char *s;
} u;
};
#define SPELLING_STRING 1
#define SPELLING_MEMBER 2
#define SPELLING_BOUNDS 3
static struct spelling *spelling; /* Next stack element (unused). */
static struct spelling *spelling_base; /* Spelling stack base. */
static int spelling_size; /* Size of the spelling stack. */
/* Macros to save and restore the spelling stack around push_... functions.
Alternative to SAVE_SPELLING_STACK. */
#define SPELLING_DEPTH() (spelling - spelling_base)
#define RESTORE_SPELLING_DEPTH(DEPTH) (spelling = spelling_base + (DEPTH))
/* Push an element on the spelling stack with type KIND and assign VALUE
to MEMBER. */
#define PUSH_SPELLING(KIND, VALUE, MEMBER) \
{ \
int depth = SPELLING_DEPTH (); \
\
if (depth >= spelling_size) \
{ \
spelling_size += 10; \
spelling_base = XRESIZEVEC (struct spelling, spelling_base, \
spelling_size); \
RESTORE_SPELLING_DEPTH (depth); \
} \
\
spelling->kind = (KIND); \
spelling->MEMBER = (VALUE); \
spelling++; \
}
/* Push STRING on the stack. Printed literally. */
static void
push_string (const char *string)
{
PUSH_SPELLING (SPELLING_STRING, string, u.s);
}
/* Push a member name on the stack. Printed as '.' STRING. */
static void
push_member_name (tree decl)
{
const char *const string
= (DECL_NAME (decl)
? identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl)))
: _("<anonymous>"));
PUSH_SPELLING (SPELLING_MEMBER, string, u.s);
}
/* Push an array bounds on the stack. Printed as [BOUNDS]. */
static void
push_array_bounds (unsigned HOST_WIDE_INT bounds)
{
PUSH_SPELLING (SPELLING_BOUNDS, bounds, u.i);
}
/* Compute the maximum size in bytes of the printed spelling. */
static int
spelling_length (void)
{
int size = 0;
struct spelling *p;
for (p = spelling_base; p < spelling; p++)
{
if (p->kind == SPELLING_BOUNDS)
size += 25;
else
size += strlen (p->u.s) + 1;
}
return size;
}
/* Print the spelling to BUFFER and return it. */
static char *
print_spelling (char *buffer)
{
char *d = buffer;
struct spelling *p;
for (p = spelling_base; p < spelling; p++)
if (p->kind == SPELLING_BOUNDS)
{
sprintf (d, "[" HOST_WIDE_INT_PRINT_UNSIGNED "]", p->u.i);
d += strlen (d);
}
else
{
const char *s;
if (p->kind == SPELLING_MEMBER)
*d++ = '.';
for (s = p->u.s; (*d = *s++); d++)
;
}
*d++ = '\0';
return buffer;
}
/* Issue an error message for a bad initializer component.
GMSGID identifies the message.
The component name is taken from the spelling stack. */
void
error_init (const char *gmsgid)
{
char *ofwhat;
/* The gmsgid may be a format string with %< and %>. */
error (gmsgid);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat)
error ("(near initialization for %qs)", ofwhat);
}
/* Issue a pedantic warning for a bad initializer component. OPT is
the option OPT_* (from options.h) controlling this warning or 0 if
it is unconditionally given. GMSGID identifies the message. The
component name is taken from the spelling stack. */
void
pedwarn_init (location_t location, int opt, const char *gmsgid)
{
char *ofwhat;
/* The gmsgid may be a format string with %< and %>. */
pedwarn (location, opt, gmsgid);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat)
pedwarn (location, opt, "(near initialization for %qs)", ofwhat);
}
/* Issue a warning for a bad initializer component.
OPT is the OPT_W* value corresponding to the warning option that
controls this warning. GMSGID identifies the message. The
component name is taken from the spelling stack. */
static void
warning_init (int opt, const char *gmsgid)
{
char *ofwhat;
/* The gmsgid may be a format string with %< and %>. */
warning (opt, gmsgid);
ofwhat = print_spelling ((char *) alloca (spelling_length () + 1));
if (*ofwhat)
warning (opt, "(near initialization for %qs)", ofwhat);
}
/* If TYPE is an array type and EXPR is a parenthesized string
constant, warn if pedantic that EXPR is being used to initialize an
object of type TYPE. */
void
maybe_warn_string_init (tree type, struct c_expr expr)
{
if (pedantic
&& TREE_CODE (type) == ARRAY_TYPE
&& TREE_CODE (expr.value) == STRING_CST
&& expr.original_code != STRING_CST)
pedwarn_init (input_location, OPT_pedantic,
"array initialized from parenthesized string constant");
}
/* Digest the parser output INIT as an initializer for type TYPE.
Return a C expression of type TYPE to represent the initial value.
If ORIGTYPE is not NULL_TREE, it is the original type of INIT.
NULL_POINTER_CONSTANT is true if INIT is a null pointer constant.
If INIT is a string constant, STRICT_STRING is true if it is
unparenthesized or we should not warn here for it being parenthesized.
For other types of INIT, STRICT_STRING is not used.
INIT_LOC is the location of the INIT.
REQUIRE_CONSTANT requests an error if non-constant initializers or
elements are seen. */
static tree
digest_init (location_t init_loc, tree type, tree init, tree origtype,
bool null_pointer_constant, bool strict_string,
int require_constant)
{
enum tree_code code = TREE_CODE (type);
tree inside_init = init;
tree semantic_type = NULL_TREE;
bool maybe_const = true;
if (type == error_mark_node
|| !init
|| init == error_mark_node
|| TREE_TYPE (init) == error_mark_node)
return error_mark_node;
STRIP_TYPE_NOPS (inside_init);
if (TREE_CODE (inside_init) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (inside_init);
inside_init = TREE_OPERAND (inside_init, 0);
}
inside_init = c_fully_fold (inside_init, require_constant, &maybe_const);
inside_init = decl_constant_value_for_optimization (inside_init);
/* Initialization of an array of chars from a string constant
optionally enclosed in braces. */
if (code == ARRAY_TYPE && inside_init
&& TREE_CODE (inside_init) == STRING_CST)
{
tree typ1 = TYPE_MAIN_VARIANT (TREE_TYPE (type));
/* Note that an array could be both an array of character type
and an array of wchar_t if wchar_t is signed char or unsigned
char. */
bool char_array = (typ1 == char_type_node
|| typ1 == signed_char_type_node
|| typ1 == unsigned_char_type_node);
bool wchar_array = !!comptypes (typ1, wchar_type_node);
bool char16_array = !!comptypes (typ1, char16_type_node);
bool char32_array = !!comptypes (typ1, char32_type_node);
if (char_array || wchar_array || char16_array || char32_array)
{
struct c_expr expr;
tree typ2 = TYPE_MAIN_VARIANT (TREE_TYPE (TREE_TYPE (inside_init)));
expr.value = inside_init;
expr.original_code = (strict_string ? STRING_CST : ERROR_MARK);
expr.original_type = NULL;
maybe_warn_string_init (type, expr);
if (TYPE_DOMAIN (type) && !TYPE_MAX_VALUE (TYPE_DOMAIN (type)))
pedwarn_init (init_loc, OPT_pedantic,
"initialization of a flexible array member");
if (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type)))
return inside_init;
if (char_array)
{
if (typ2 != char_type_node)
{
error_init ("char-array initialized from wide string");
return error_mark_node;
}
}
else
{
if (typ2 == char_type_node)
{
error_init ("wide character array initialized from non-wide "
"string");
return error_mark_node;
}
else if (!comptypes(typ1, typ2))
{
error_init ("wide character array initialized from "
"incompatible wide string");
return error_mark_node;
}
}
TREE_TYPE (inside_init) = type;
if (TYPE_DOMAIN (type) != 0
&& TYPE_SIZE (type) != 0
&& TREE_CODE (TYPE_SIZE (type)) == INTEGER_CST)
{
unsigned HOST_WIDE_INT len = TREE_STRING_LENGTH (inside_init);
/* Subtract the size of a single (possibly wide) character
because it's ok to ignore the terminating null char
that is counted in the length of the constant. */
if (0 > compare_tree_int (TYPE_SIZE_UNIT (type),
(len
- (TYPE_PRECISION (typ1)
/ BITS_PER_UNIT))))
pedwarn_init (init_loc, 0,
("initializer-string for array of chars "
"is too long"));
else if (warn_cxx_compat
&& 0 > compare_tree_int (TYPE_SIZE_UNIT (type), len))
warning_at (init_loc, OPT_Wc___compat,
("initializer-string for array chars "
"is too long for C++"));
}
return inside_init;
}
else if (INTEGRAL_TYPE_P (typ1))
{
error_init ("array of inappropriate type initialized "
"from string constant");
return error_mark_node;
}
}
/* Build a VECTOR_CST from a *constant* vector constructor. If the
vector constructor is not constant (e.g. {1,2,3,foo()}) then punt
below and handle as a constructor. */
if (code == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (inside_init)) == VECTOR_TYPE
&& vector_types_convertible_p (TREE_TYPE (inside_init), type, true)
&& TREE_CONSTANT (inside_init))
{
if (TREE_CODE (inside_init) == VECTOR_CST
&& comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type)))
return inside_init;
if (TREE_CODE (inside_init) == CONSTRUCTOR)
{
unsigned HOST_WIDE_INT ix;
tree value;
bool constant_p = true;
/* Iterate through elements and check if all constructor
elements are *_CSTs. */
FOR_EACH_CONSTRUCTOR_VALUE (CONSTRUCTOR_ELTS (inside_init), ix, value)
if (!CONSTANT_CLASS_P (value))
{
constant_p = false;
break;
}
if (constant_p)
return build_vector_from_ctor (type,
CONSTRUCTOR_ELTS (inside_init));
}
}
if (warn_sequence_point)
verify_sequence_points (inside_init);
/* Any type can be initialized
from an expression of the same type, optionally with braces. */
if (inside_init && TREE_TYPE (inside_init) != 0
&& (comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (inside_init)),
TYPE_MAIN_VARIANT (type))
|| (code == ARRAY_TYPE
&& comptypes (TREE_TYPE (inside_init), type))
|| (code == VECTOR_TYPE
&& comptypes (TREE_TYPE (inside_init), type))
|| (code == POINTER_TYPE
&& TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE
&& comptypes (TREE_TYPE (TREE_TYPE (inside_init)),
TREE_TYPE (type)))))
{
if (code == POINTER_TYPE)
{
if (TREE_CODE (TREE_TYPE (inside_init)) == ARRAY_TYPE)
{
if (TREE_CODE (inside_init) == STRING_CST
|| TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
inside_init = array_to_pointer_conversion
(init_loc, inside_init);
else
{
error_init ("invalid use of non-lvalue array");
return error_mark_node;
}
}
}
if (code == VECTOR_TYPE)
/* Although the types are compatible, we may require a
conversion. */
inside_init = convert (type, inside_init);
if (require_constant
&& (code == VECTOR_TYPE || !flag_isoc99)
&& TREE_CODE (inside_init) == COMPOUND_LITERAL_EXPR)
{
/* As an extension, allow initializing objects with static storage
duration with compound literals (which are then treated just as
the brace enclosed list they contain). Also allow this for
vectors, as we can only assign them with compound literals. */
tree decl = COMPOUND_LITERAL_EXPR_DECL (inside_init);
inside_init = DECL_INITIAL (decl);
}
if (code == ARRAY_TYPE && TREE_CODE (inside_init) != STRING_CST
&& TREE_CODE (inside_init) != CONSTRUCTOR)
{
error_init ("array initialized from non-constant array expression");
return error_mark_node;
}
/* Compound expressions can only occur here if -pedantic or
-pedantic-errors is specified. In the later case, we always want
an error. In the former case, we simply want a warning. */
if (require_constant && pedantic
&& TREE_CODE (inside_init) == COMPOUND_EXPR)
{
inside_init
= valid_compound_expr_initializer (inside_init,
TREE_TYPE (inside_init));
if (inside_init == error_mark_node)
error_init ("initializer element is not constant");
else
pedwarn_init (init_loc, OPT_pedantic,
"initializer element is not constant");
if (flag_pedantic_errors)
inside_init = error_mark_node;
}
else if (require_constant
&& !initializer_constant_valid_p (inside_init,
TREE_TYPE (inside_init)))
{
error_init ("initializer element is not constant");
inside_init = error_mark_node;
}
else if (require_constant && !maybe_const)
pedwarn_init (init_loc, 0,
"initializer element is not a constant expression");
/* Added to enable additional -Wmissing-format-attribute warnings. */
if (TREE_CODE (TREE_TYPE (inside_init)) == POINTER_TYPE)
inside_init = convert_for_assignment (init_loc, type, inside_init,
origtype,
ic_init, null_pointer_constant,
NULL_TREE, NULL_TREE, 0);
return inside_init;
}
/* Handle scalar types, including conversions. */
if (code == INTEGER_TYPE || code == REAL_TYPE || code == FIXED_POINT_TYPE
|| code == POINTER_TYPE || code == ENUMERAL_TYPE || code == BOOLEAN_TYPE
|| code == COMPLEX_TYPE || code == VECTOR_TYPE)
{
if (TREE_CODE (TREE_TYPE (init)) == ARRAY_TYPE
&& (TREE_CODE (init) == STRING_CST
|| TREE_CODE (init) == COMPOUND_LITERAL_EXPR))
inside_init = init = array_to_pointer_conversion (init_loc, init);
if (semantic_type)
inside_init = build1 (EXCESS_PRECISION_EXPR, semantic_type,
inside_init);
inside_init
= convert_for_assignment (init_loc, type, inside_init, origtype,
ic_init, null_pointer_constant,
NULL_TREE, NULL_TREE, 0);
/* Check to see if we have already given an error message. */
if (inside_init == error_mark_node)
;
else if (require_constant && !TREE_CONSTANT (inside_init))
{
error_init ("initializer element is not constant");
inside_init = error_mark_node;
}
else if (require_constant
&& !initializer_constant_valid_p (inside_init,
TREE_TYPE (inside_init)))
{
error_init ("initializer element is not computable at load time");
inside_init = error_mark_node;
}
else if (require_constant && !maybe_const)
pedwarn_init (init_loc, 0,
"initializer element is not a constant expression");
return inside_init;
}
/* Come here only for records and arrays. */
if (COMPLETE_TYPE_P (type) && TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
{
error_init ("variable-sized object may not be initialized");
return error_mark_node;
}
error_init ("invalid initializer");
return error_mark_node;
}
/* Handle initializers that use braces. */
/* Type of object we are accumulating a constructor for.
This type is always a RECORD_TYPE, UNION_TYPE or ARRAY_TYPE. */
static tree constructor_type;
/* For a RECORD_TYPE or UNION_TYPE, this is the chain of fields
left to fill. */
static tree constructor_fields;
/* For an ARRAY_TYPE, this is the specified index
at which to store the next element we get. */
static tree constructor_index;
/* For an ARRAY_TYPE, this is the maximum index. */
static tree constructor_max_index;
/* For a RECORD_TYPE, this is the first field not yet written out. */
static tree constructor_unfilled_fields;
/* For an ARRAY_TYPE, this is the index of the first element
not yet written out. */
static tree constructor_unfilled_index;
/* In a RECORD_TYPE, the byte index of the next consecutive field.
This is so we can generate gaps between fields, when appropriate. */
static tree constructor_bit_index;
/* If we are saving up the elements rather than allocating them,
this is the list of elements so far (in reverse order,
most recent first). */
static VEC(constructor_elt,gc) *constructor_elements;
/* 1 if constructor should be incrementally stored into a constructor chain,
0 if all the elements should be kept in AVL tree. */
static int constructor_incremental;
/* 1 if so far this constructor's elements are all compile-time constants. */
static int constructor_constant;
/* 1 if so far this constructor's elements are all valid address constants. */
static int constructor_simple;
/* 1 if this constructor has an element that cannot be part of a
constant expression. */
static int constructor_nonconst;
/* 1 if this constructor is erroneous so far. */
static int constructor_erroneous;
/* Structure for managing pending initializer elements, organized as an
AVL tree. */
struct init_node
{
struct init_node *left, *right;
struct init_node *parent;
int balance;
tree purpose;
tree value;
tree origtype;
};
/* Tree of pending elements at this constructor level.
These are elements encountered out of order
which belong at places we haven't reached yet in actually
writing the output.
Will never hold tree nodes across GC runs. */
static struct init_node *constructor_pending_elts;
/* The SPELLING_DEPTH of this constructor. */
static int constructor_depth;
/* DECL node for which an initializer is being read.
0 means we are reading a constructor expression
such as (struct foo) {...}. */
static tree constructor_decl;
/* Nonzero if this is an initializer for a top-level decl. */
static int constructor_top_level;
/* Nonzero if there were any member designators in this initializer. */
static int constructor_designated;
/* Nesting depth of designator list. */
static int designator_depth;
/* Nonzero if there were diagnosed errors in this designator list. */
static int designator_erroneous;
/* This stack has a level for each implicit or explicit level of
structuring in the initializer, including the outermost one. It
saves the values of most of the variables above. */
struct constructor_range_stack;
struct constructor_stack
{
struct constructor_stack *next;
tree type;
tree fields;
tree index;
tree max_index;
tree unfilled_index;
tree unfilled_fields;
tree bit_index;
VEC(constructor_elt,gc) *elements;
struct init_node *pending_elts;
int offset;
int depth;
/* If value nonzero, this value should replace the entire
constructor at this level. */
struct c_expr replacement_value;
struct constructor_range_stack *range_stack;
char constant;
char simple;
char nonconst;
char implicit;
char erroneous;
char outer;
char incremental;
char designated;
};
static struct constructor_stack *constructor_stack;
/* This stack represents designators from some range designator up to
the last designator in the list. */
struct constructor_range_stack
{
struct constructor_range_stack *next, *prev;
struct constructor_stack *stack;
tree range_start;
tree index;
tree range_end;
tree fields;
};
static struct constructor_range_stack *constructor_range_stack;
/* This stack records separate initializers that are nested.
Nested initializers can't happen in ANSI C, but GNU C allows them
in cases like { ... (struct foo) { ... } ... }. */
struct initializer_stack
{
struct initializer_stack *next;
tree decl;
struct constructor_stack *constructor_stack;
struct constructor_range_stack *constructor_range_stack;
VEC(constructor_elt,gc) *elements;
struct spelling *spelling;
struct spelling *spelling_base;
int spelling_size;
char top_level;
char require_constant_value;
char require_constant_elements;
};
static struct initializer_stack *initializer_stack;
/* Prepare to parse and output the initializer for variable DECL. */
void
start_init (tree decl, tree asmspec_tree ATTRIBUTE_UNUSED, int top_level)
{
const char *locus;
struct initializer_stack *p = XNEW (struct initializer_stack);
p->decl = constructor_decl;
p->require_constant_value = require_constant_value;
p->require_constant_elements = require_constant_elements;
p->constructor_stack = constructor_stack;
p->constructor_range_stack = constructor_range_stack;
p->elements = constructor_elements;
p->spelling = spelling;
p->spelling_base = spelling_base;
p->spelling_size = spelling_size;
p->top_level = constructor_top_level;
p->next = initializer_stack;
initializer_stack = p;
constructor_decl = decl;
constructor_designated = 0;
constructor_top_level = top_level;
if (decl != 0 && decl != error_mark_node)
{
require_constant_value = TREE_STATIC (decl);
require_constant_elements
= ((TREE_STATIC (decl) || (pedantic && !flag_isoc99))
/* For a scalar, you can always use any value to initialize,
even within braces. */
&& (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == UNION_TYPE
|| TREE_CODE (TREE_TYPE (decl)) == QUAL_UNION_TYPE));
locus = identifier_to_locale (IDENTIFIER_POINTER (DECL_NAME (decl)));
}
else
{
require_constant_value = 0;
require_constant_elements = 0;
locus = _("(anonymous)");
}
constructor_stack = 0;
constructor_range_stack = 0;
missing_braces_mentioned = 0;
spelling_base = 0;
spelling_size = 0;
RESTORE_SPELLING_DEPTH (0);
if (locus)
push_string (locus);
}
void
finish_init (void)
{
struct initializer_stack *p = initializer_stack;
/* Free the whole constructor stack of this initializer. */
while (constructor_stack)
{
struct constructor_stack *q = constructor_stack;
constructor_stack = q->next;
free (q);
}
gcc_assert (!constructor_range_stack);
/* Pop back to the data of the outer initializer (if any). */
free (spelling_base);
constructor_decl = p->decl;
require_constant_value = p->require_constant_value;
require_constant_elements = p->require_constant_elements;
constructor_stack = p->constructor_stack;
constructor_range_stack = p->constructor_range_stack;
constructor_elements = p->elements;
spelling = p->spelling;
spelling_base = p->spelling_base;
spelling_size = p->spelling_size;
constructor_top_level = p->top_level;
initializer_stack = p->next;
free (p);
}
/* Call here when we see the initializer is surrounded by braces.
This is instead of a call to push_init_level;
it is matched by a call to pop_init_level.
TYPE is the type to initialize, for a constructor expression.
For an initializer for a decl, TYPE is zero. */
void
really_start_incremental_init (tree type)
{
struct constructor_stack *p = XNEW (struct constructor_stack);
if (type == 0)
type = TREE_TYPE (constructor_decl);
if (TREE_CODE (type) == VECTOR_TYPE
&& TYPE_VECTOR_OPAQUE (type))
error ("opaque vector types cannot be initialized");
p->type = constructor_type;
p->fields = constructor_fields;
p->index = constructor_index;
p->max_index = constructor_max_index;
p->unfilled_index = constructor_unfilled_index;
p->unfilled_fields = constructor_unfilled_fields;
p->bit_index = constructor_bit_index;
p->elements = constructor_elements;
p->constant = constructor_constant;
p->simple = constructor_simple;
p->nonconst = constructor_nonconst;
p->erroneous = constructor_erroneous;
p->pending_elts = constructor_pending_elts;
p->depth = constructor_depth;
p->replacement_value.value = 0;
p->replacement_value.original_code = ERROR_MARK;
p->replacement_value.original_type = NULL;
p->implicit = 0;
p->range_stack = 0;
p->outer = 0;
p->incremental = constructor_incremental;
p->designated = constructor_designated;
p->next = 0;
constructor_stack = p;
constructor_constant = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
constructor_elements = 0;
constructor_pending_elts = 0;
constructor_type = type;
constructor_incremental = 1;
constructor_designated = 0;
designator_depth = 0;
designator_erroneous = 0;
if (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
{
constructor_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields)
&& DECL_NAME (constructor_fields) == 0)
constructor_fields = DECL_CHAIN (constructor_fields);
constructor_unfilled_fields = constructor_fields;
constructor_bit_index = bitsize_zero_node;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
{
constructor_max_index
= TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
/* Detect non-empty initializations of zero-length arrays. */
if (constructor_max_index == NULL_TREE
&& TYPE_SIZE (constructor_type))
constructor_max_index = integer_minus_one_node;
/* constructor_max_index needs to be an INTEGER_CST. Attempts
to initialize VLAs will cause a proper error; avoid tree
checking errors as well by setting a safe value. */
if (constructor_max_index
&& TREE_CODE (constructor_max_index) != INTEGER_CST)
constructor_max_index = integer_minus_one_node;
constructor_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
}
else
{
constructor_index = bitsize_zero_node;
constructor_max_index = NULL_TREE;
}
constructor_unfilled_index = constructor_index;
}
else if (TREE_CODE (constructor_type) == VECTOR_TYPE)
{
/* Vectors are like simple fixed-size arrays. */
constructor_max_index =
bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1);
constructor_index = bitsize_zero_node;
constructor_unfilled_index = constructor_index;
}
else
{
/* Handle the case of int x = {5}; */
constructor_fields = constructor_type;
constructor_unfilled_fields = constructor_type;
}
}
/* Push down into a subobject, for initialization.
If this is for an explicit set of braces, IMPLICIT is 0.
If it is because the next element belongs at a lower level,
IMPLICIT is 1 (or 2 if the push is because of designator list). */
void
push_init_level (int implicit, struct obstack * braced_init_obstack)
{
struct constructor_stack *p;
tree value = NULL_TREE;
/* If we've exhausted any levels that didn't have braces,
pop them now. If implicit == 1, this will have been done in
process_init_element; do not repeat it here because in the case
of excess initializers for an empty aggregate this leads to an
infinite cycle of popping a level and immediately recreating
it. */
if (implicit != 1)
{
while (constructor_stack->implicit)
{
if ((TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
&& constructor_fields == 0)
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
else if (TREE_CODE (constructor_type) == ARRAY_TYPE
&& constructor_max_index
&& tree_int_cst_lt (constructor_max_index,
constructor_index))
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
else
break;
}
}
/* Unless this is an explicit brace, we need to preserve previous
content if any. */
if (implicit)
{
if ((TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
&& constructor_fields)
value = find_init_member (constructor_fields, braced_init_obstack);
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
value = find_init_member (constructor_index, braced_init_obstack);
}
p = XNEW (struct constructor_stack);
p->type = constructor_type;
p->fields = constructor_fields;
p->index = constructor_index;
p->max_index = constructor_max_index;
p->unfilled_index = constructor_unfilled_index;
p->unfilled_fields = constructor_unfilled_fields;
p->bit_index = constructor_bit_index;
p->elements = constructor_elements;
p->constant = constructor_constant;
p->simple = constructor_simple;
p->nonconst = constructor_nonconst;
p->erroneous = constructor_erroneous;
p->pending_elts = constructor_pending_elts;
p->depth = constructor_depth;
p->replacement_value.value = 0;
p->replacement_value.original_code = ERROR_MARK;
p->replacement_value.original_type = NULL;
p->implicit = implicit;
p->outer = 0;
p->incremental = constructor_incremental;
p->designated = constructor_designated;
p->next = constructor_stack;
p->range_stack = 0;
constructor_stack = p;
constructor_constant = 1;
constructor_simple = 1;
constructor_nonconst = 0;
constructor_depth = SPELLING_DEPTH ();
constructor_elements = 0;
constructor_incremental = 1;
constructor_designated = 0;
constructor_pending_elts = 0;
if (!implicit)
{
p->range_stack = constructor_range_stack;
constructor_range_stack = 0;
designator_depth = 0;
designator_erroneous = 0;
}
/* Don't die if an entire brace-pair level is superfluous
in the containing level. */
if (constructor_type == 0)
;
else if (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
{
/* Don't die if there are extra init elts at the end. */
if (constructor_fields == 0)
constructor_type = 0;
else
{
constructor_type = TREE_TYPE (constructor_fields);
push_member_name (constructor_fields);
constructor_depth++;
}
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
constructor_type = TREE_TYPE (constructor_type);
push_array_bounds (tree_low_cst (constructor_index, 1));
constructor_depth++;
}
if (constructor_type == 0)
{
error_init ("extra brace group at end of initializer");
constructor_fields = 0;
constructor_unfilled_fields = 0;
return;
}
if (value && TREE_CODE (value) == CONSTRUCTOR)
{
constructor_constant = TREE_CONSTANT (value);
constructor_simple = TREE_STATIC (value);
constructor_nonconst = CONSTRUCTOR_NON_CONST (value);
constructor_elements = CONSTRUCTOR_ELTS (value);
if (!VEC_empty (constructor_elt, constructor_elements)
&& (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == ARRAY_TYPE))
set_nonincremental_init (braced_init_obstack);
}
if (implicit == 1 && warn_missing_braces && !missing_braces_mentioned)
{
missing_braces_mentioned = 1;
warning_init (OPT_Wmissing_braces, "missing braces around initializer");
}
if (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
{
constructor_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != 0 && DECL_C_BIT_FIELD (constructor_fields)
&& DECL_NAME (constructor_fields) == 0)
constructor_fields = DECL_CHAIN (constructor_fields);
constructor_unfilled_fields = constructor_fields;
constructor_bit_index = bitsize_zero_node;
}
else if (TREE_CODE (constructor_type) == VECTOR_TYPE)
{
/* Vectors are like simple fixed-size arrays. */
constructor_max_index =
bitsize_int (TYPE_VECTOR_SUBPARTS (constructor_type) - 1);
constructor_index = bitsize_int (0);
constructor_unfilled_index = constructor_index;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
{
constructor_max_index
= TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type));
/* Detect non-empty initializations of zero-length arrays. */
if (constructor_max_index == NULL_TREE
&& TYPE_SIZE (constructor_type))
constructor_max_index = integer_minus_one_node;
/* constructor_max_index needs to be an INTEGER_CST. Attempts
to initialize VLAs will cause a proper error; avoid tree
checking errors as well by setting a safe value. */
if (constructor_max_index
&& TREE_CODE (constructor_max_index) != INTEGER_CST)
constructor_max_index = integer_minus_one_node;
constructor_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
}
else
constructor_index = bitsize_zero_node;
constructor_unfilled_index = constructor_index;
if (value && TREE_CODE (value) == STRING_CST)
{
/* We need to split the char/wchar array into individual
characters, so that we don't have to special case it
everywhere. */
set_nonincremental_init_from_string (value, braced_init_obstack);
}
}
else
{
if (constructor_type != error_mark_node)
warning_init (0, "braces around scalar initializer");
constructor_fields = constructor_type;
constructor_unfilled_fields = constructor_type;
}
}
/* At the end of an implicit or explicit brace level,
finish up that level of constructor. If a single expression
with redundant braces initialized that level, return the
c_expr structure for that expression. Otherwise, the original_code
element is set to ERROR_MARK.
If we were outputting the elements as they are read, return 0 as the value
from inner levels (process_init_element ignores that),
but return error_mark_node as the value from the outermost level
(that's what we want to put in DECL_INITIAL).
Otherwise, return a CONSTRUCTOR expression as the value. */
struct c_expr
pop_init_level (int implicit, struct obstack * braced_init_obstack)
{
struct constructor_stack *p;
struct c_expr ret;
ret.value = 0;
ret.original_code = ERROR_MARK;
ret.original_type = NULL;
if (implicit == 0)
{
/* When we come to an explicit close brace,
pop any inner levels that didn't have explicit braces. */
while (constructor_stack->implicit)
{
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
}
gcc_assert (!constructor_range_stack);
}
/* Now output all pending elements. */
constructor_incremental = 1;
output_pending_init_elements (1, braced_init_obstack);
p = constructor_stack;
/* Error for initializing a flexible array member, or a zero-length
array member in an inappropriate context. */
if (constructor_type && constructor_fields
&& TREE_CODE (constructor_type) == ARRAY_TYPE
&& TYPE_DOMAIN (constructor_type)
&& !TYPE_MAX_VALUE (TYPE_DOMAIN (constructor_type)))
{
/* Silently discard empty initializations. The parser will
already have pedwarned for empty brackets. */
if (integer_zerop (constructor_unfilled_index))
constructor_type = NULL_TREE;
else
{
gcc_assert (!TYPE_SIZE (constructor_type));
if (constructor_depth > 2)
error_init ("initialization of flexible array member in a nested context");
else
pedwarn_init (input_location, OPT_pedantic,
"initialization of a flexible array member");
/* We have already issued an error message for the existence
of a flexible array member not at the end of the structure.
Discard the initializer so that we do not die later. */
if (DECL_CHAIN (constructor_fields) != NULL_TREE)
constructor_type = NULL_TREE;
}
}
/* Warn when some struct elements are implicitly initialized to zero. */
if (warn_missing_field_initializers
&& constructor_type
&& TREE_CODE (constructor_type) == RECORD_TYPE
&& constructor_unfilled_fields)
{
bool constructor_zeroinit =
(VEC_length (constructor_elt, constructor_elements) == 1
&& integer_zerop
(VEC_index (constructor_elt, constructor_elements, 0)->value));
/* Do not warn for flexible array members or zero-length arrays. */
while (constructor_unfilled_fields
&& (!DECL_SIZE (constructor_unfilled_fields)
|| integer_zerop (DECL_SIZE (constructor_unfilled_fields))))
constructor_unfilled_fields = DECL_CHAIN (constructor_unfilled_fields);
if (constructor_unfilled_fields
/* Do not warn if this level of the initializer uses member
designators; it is likely to be deliberate. */
&& !constructor_designated
/* Do not warn about initializing with ` = {0}'. */
&& !constructor_zeroinit)
{
push_member_name (constructor_unfilled_fields);
warning_init (OPT_Wmissing_field_initializers,
"missing initializer");
RESTORE_SPELLING_DEPTH (constructor_depth);
}
}
/* Pad out the end of the structure. */
if (p->replacement_value.value)
/* If this closes a superfluous brace pair,
just pass out the element between them. */
ret = p->replacement_value;
else if (constructor_type == 0)
;
else if (TREE_CODE (constructor_type) != RECORD_TYPE
&& TREE_CODE (constructor_type) != UNION_TYPE
&& TREE_CODE (constructor_type) != ARRAY_TYPE
&& TREE_CODE (constructor_type) != VECTOR_TYPE)
{
/* A nonincremental scalar initializer--just return
the element, after verifying there is just one. */
if (VEC_empty (constructor_elt,constructor_elements))
{
if (!constructor_erroneous)
error_init ("empty scalar initializer");
ret.value = error_mark_node;
}
else if (VEC_length (constructor_elt,constructor_elements) != 1)
{
error_init ("extra elements in scalar initializer");
ret.value = VEC_index (constructor_elt,constructor_elements,0)->value;
}
else
ret.value = VEC_index (constructor_elt,constructor_elements,0)->value;
}
else
{
if (constructor_erroneous)
ret.value = error_mark_node;
else
{
ret.value = build_constructor (constructor_type,
constructor_elements);
if (constructor_constant)
TREE_CONSTANT (ret.value) = 1;
if (constructor_constant && constructor_simple)
TREE_STATIC (ret.value) = 1;
if (constructor_nonconst)
CONSTRUCTOR_NON_CONST (ret.value) = 1;
}
}
if (ret.value && TREE_CODE (ret.value) != CONSTRUCTOR)
{
if (constructor_nonconst)
ret.original_code = C_MAYBE_CONST_EXPR;
else if (ret.original_code == C_MAYBE_CONST_EXPR)
ret.original_code = ERROR_MARK;
}
constructor_type = p->type;
constructor_fields = p->fields;
constructor_index = p->index;
constructor_max_index = p->max_index;
constructor_unfilled_index = p->unfilled_index;
constructor_unfilled_fields = p->unfilled_fields;
constructor_bit_index = p->bit_index;
constructor_elements = p->elements;
constructor_constant = p->constant;
constructor_simple = p->simple;
constructor_nonconst = p->nonconst;
constructor_erroneous = p->erroneous;
constructor_incremental = p->incremental;
constructor_designated = p->designated;
constructor_pending_elts = p->pending_elts;
constructor_depth = p->depth;
if (!p->implicit)
constructor_range_stack = p->range_stack;
RESTORE_SPELLING_DEPTH (constructor_depth);
constructor_stack = p->next;
free (p);
if (ret.value == 0 && constructor_stack == 0)
ret.value = error_mark_node;
return ret;
}
/* Common handling for both array range and field name designators.
ARRAY argument is nonzero for array ranges. Returns zero for success. */
static int
set_designator (int array, struct obstack * braced_init_obstack)
{
tree subtype;
enum tree_code subcode;
/* Don't die if an entire brace-pair level is superfluous
in the containing level. */
if (constructor_type == 0)
return 1;
/* If there were errors in this designator list already, bail out
silently. */
if (designator_erroneous)
return 1;
if (!designator_depth)
{
gcc_assert (!constructor_range_stack);
/* Designator list starts at the level of closest explicit
braces. */
while (constructor_stack->implicit)
{
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
}
constructor_designated = 1;
return 0;
}
switch (TREE_CODE (constructor_type))
{
case RECORD_TYPE:
case UNION_TYPE:
subtype = TREE_TYPE (constructor_fields);
if (subtype != error_mark_node)
subtype = TYPE_MAIN_VARIANT (subtype);
break;
case ARRAY_TYPE:
subtype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
break;
default:
gcc_unreachable ();
}
subcode = TREE_CODE (subtype);
if (array && subcode != ARRAY_TYPE)
{
error_init ("array index in non-array initializer");
return 1;
}
else if (!array && subcode != RECORD_TYPE && subcode != UNION_TYPE)
{
error_init ("field name not in record or union initializer");
return 1;
}
constructor_designated = 1;
push_init_level (2, braced_init_obstack);
return 0;
}
/* If there are range designators in designator list, push a new designator
to constructor_range_stack. RANGE_END is end of such stack range or
NULL_TREE if there is no range designator at this level. */
static void
push_range_stack (tree range_end, struct obstack * braced_init_obstack)
{
struct constructor_range_stack *p;
p = (struct constructor_range_stack *)
obstack_alloc (braced_init_obstack,
sizeof (struct constructor_range_stack));
p->prev = constructor_range_stack;
p->next = 0;
p->fields = constructor_fields;
p->range_start = constructor_index;
p->index = constructor_index;
p->stack = constructor_stack;
p->range_end = range_end;
if (constructor_range_stack)
constructor_range_stack->next = p;
constructor_range_stack = p;
}
/* Within an array initializer, specify the next index to be initialized.
FIRST is that index. If LAST is nonzero, then initialize a range
of indices, running from FIRST through LAST. */
void
set_init_index (tree first, tree last,
struct obstack * braced_init_obstack)
{
if (set_designator (1, braced_init_obstack))
return;
designator_erroneous = 1;
if (!INTEGRAL_TYPE_P (TREE_TYPE (first))
|| (last && !INTEGRAL_TYPE_P (TREE_TYPE (last))))
{
error_init ("array index in initializer not of integer type");
return;
}
if (TREE_CODE (first) != INTEGER_CST)
{
first = c_fully_fold (first, false, NULL);
if (TREE_CODE (first) == INTEGER_CST)
pedwarn_init (input_location, OPT_pedantic,
"array index in initializer is not "
"an integer constant expression");
}
if (last && TREE_CODE (last) != INTEGER_CST)
{
last = c_fully_fold (last, false, NULL);
if (TREE_CODE (last) == INTEGER_CST)
pedwarn_init (input_location, OPT_pedantic,
"array index in initializer is not "
"an integer constant expression");
}
if (TREE_CODE (first) != INTEGER_CST)
error_init ("nonconstant array index in initializer");
else if (last != 0 && TREE_CODE (last) != INTEGER_CST)
error_init ("nonconstant array index in initializer");
else if (TREE_CODE (constructor_type) != ARRAY_TYPE)
error_init ("array index in non-array initializer");
else if (tree_int_cst_sgn (first) == -1)
error_init ("array index in initializer exceeds array bounds");
else if (constructor_max_index
&& tree_int_cst_lt (constructor_max_index, first))
error_init ("array index in initializer exceeds array bounds");
else
{
constant_expression_warning (first);
if (last)
constant_expression_warning (last);
constructor_index = convert (bitsizetype, first);
if (last)
{
if (tree_int_cst_equal (first, last))
last = 0;
else if (tree_int_cst_lt (last, first))
{
error_init ("empty index range in initializer");
last = 0;
}
else
{
last = convert (bitsizetype, last);
if (constructor_max_index != 0
&& tree_int_cst_lt (constructor_max_index, last))
{
error_init ("array index range in initializer exceeds array bounds");
last = 0;
}
}
}
designator_depth++;
designator_erroneous = 0;
if (constructor_range_stack || last)
push_range_stack (last, braced_init_obstack);
}
}
/* Within a struct initializer, specify the next field to be initialized. */
void
set_init_label (tree fieldname, struct obstack * braced_init_obstack)
{
tree field;
if (set_designator (0, braced_init_obstack))
return;
designator_erroneous = 1;
if (TREE_CODE (constructor_type) != RECORD_TYPE
&& TREE_CODE (constructor_type) != UNION_TYPE)
{
error_init ("field name not in record or union initializer");
return;
}
field = lookup_field (constructor_type, fieldname);
if (field == 0)
error ("unknown field %qE specified in initializer", fieldname);
else
do
{
constructor_fields = TREE_VALUE (field);
designator_depth++;
designator_erroneous = 0;
if (constructor_range_stack)
push_range_stack (NULL_TREE, braced_init_obstack);
field = TREE_CHAIN (field);
if (field)
{
if (set_designator (0, braced_init_obstack))
return;
}
}
while (field != NULL_TREE);
}
/* Add a new initializer to the tree of pending initializers. PURPOSE
identifies the initializer, either array index or field in a structure.
VALUE is the value of that index or field. If ORIGTYPE is not
NULL_TREE, it is the original type of VALUE.
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
static void
add_pending_init (tree purpose, tree value, tree origtype, bool implicit,
struct obstack * braced_init_obstack)
{
struct init_node *p, **q, *r;
q = &constructor_pending_elts;
p = 0;
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
while (*q != 0)
{
p = *q;
if (tree_int_cst_lt (purpose, p->purpose))
q = &p->left;
else if (tree_int_cst_lt (p->purpose, purpose))
q = &p->right;
else
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (p->value))
warning_init (0, "initialized field with side-effects overwritten");
else if (warn_override_init)
warning_init (OPT_Woverride_init, "initialized field overwritten");
}
p->value = value;
p->origtype = origtype;
return;
}
}
}
else
{
tree bitpos;
bitpos = bit_position (purpose);
while (*q != NULL)
{
p = *q;
if (tree_int_cst_lt (bitpos, bit_position (p->purpose)))
q = &p->left;
else if (p->purpose != purpose)
q = &p->right;
else
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (p->value))
warning_init (0, "initialized field with side-effects overwritten");
else if (warn_override_init)
warning_init (OPT_Woverride_init, "initialized field overwritten");
}
p->value = value;
p->origtype = origtype;
return;
}
}
}
r = (struct init_node *) obstack_alloc (braced_init_obstack,
sizeof (struct init_node));
r->purpose = purpose;
r->value = value;
r->origtype = origtype;
*q = r;
r->parent = p;
r->left = 0;
r->right = 0;
r->balance = 0;
while (p)
{
struct init_node *s;
if (r == p->left)
{
if (p->balance == 0)
p->balance = -1;
else if (p->balance < 0)
{
if (r->balance < 0)
{
/* L rotation. */
p->left = r->right;
if (p->left)
p->left->parent = p;
r->right = p;
p->balance = 0;
r->balance = 0;
s = p->parent;
p->parent = r;
r->parent = s;
if (s)
{
if (s->left == p)
s->left = r;
else
s->right = r;
}
else
constructor_pending_elts = r;
}
else
{
/* LR rotation. */
struct init_node *t = r->right;
r->right = t->left;
if (r->right)
r->right->parent = r;
t->left = r;
p->left = t->right;
if (p->left)
p->left->parent = p;
t->right = p;
p->balance = t->balance < 0;
r->balance = -(t->balance > 0);
t->balance = 0;
s = p->parent;
p->parent = t;
r->parent = t;
t->parent = s;
if (s)
{
if (s->left == p)
s->left = t;
else
s->right = t;
}
else
constructor_pending_elts = t;
}
break;
}
else
{
/* p->balance == +1; growth of left side balances the node. */
p->balance = 0;
break;
}
}
else /* r == p->right */
{
if (p->balance == 0)
/* Growth propagation from right side. */
p->balance++;
else if (p->balance > 0)
{
if (r->balance > 0)
{
/* R rotation. */
p->right = r->left;
if (p->right)
p->right->parent = p;
r->left = p;
p->balance = 0;
r->balance = 0;
s = p->parent;
p->parent = r;
r->parent = s;
if (s)
{
if (s->left == p)
s->left = r;
else
s->right = r;
}
else
constructor_pending_elts = r;
}
else /* r->balance == -1 */
{
/* RL rotation */
struct init_node *t = r->left;
r->left = t->right;
if (r->left)
r->left->parent = r;
t->right = r;
p->right = t->left;
if (p->right)
p->right->parent = p;
t->left = p;
r->balance = (t->balance < 0);
p->balance = -(t->balance > 0);
t->balance = 0;
s = p->parent;
p->parent = t;
r->parent = t;
t->parent = s;
if (s)
{
if (s->left == p)
s->left = t;
else
s->right = t;
}
else
constructor_pending_elts = t;
}
break;
}
else
{
/* p->balance == -1; growth of right side balances the node. */
p->balance = 0;
break;
}
}
r = p;
p = p->parent;
}
}
/* Build AVL tree from a sorted chain. */
static void
set_nonincremental_init (struct obstack * braced_init_obstack)
{
unsigned HOST_WIDE_INT ix;
tree index, value;
if (TREE_CODE (constructor_type) != RECORD_TYPE
&& TREE_CODE (constructor_type) != ARRAY_TYPE)
return;
FOR_EACH_CONSTRUCTOR_ELT (constructor_elements, ix, index, value)
{
add_pending_init (index, value, NULL_TREE, false,
braced_init_obstack);
}
constructor_elements = 0;
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields = TYPE_FIELDS (constructor_type);
/* Skip any nameless bit fields at the beginning. */
while (constructor_unfilled_fields != 0
&& DECL_C_BIT_FIELD (constructor_unfilled_fields)
&& DECL_NAME (constructor_unfilled_fields) == 0)
constructor_unfilled_fields = TREE_CHAIN (constructor_unfilled_fields);
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (TYPE_DOMAIN (constructor_type))
constructor_unfilled_index
= convert (bitsizetype,
TYPE_MIN_VALUE (TYPE_DOMAIN (constructor_type)));
else
constructor_unfilled_index = bitsize_zero_node;
}
constructor_incremental = 0;
}
/* Build AVL tree from a string constant. */
static void
set_nonincremental_init_from_string (tree str,
struct obstack * braced_init_obstack)
{
tree value, purpose, type;
HOST_WIDE_INT val[2];
const char *p, *end;
int byte, wchar_bytes, charwidth, bitpos;
gcc_assert (TREE_CODE (constructor_type) == ARRAY_TYPE);
wchar_bytes = TYPE_PRECISION (TREE_TYPE (TREE_TYPE (str))) / BITS_PER_UNIT;
charwidth = TYPE_PRECISION (char_type_node);
type = TREE_TYPE (constructor_type);
p = TREE_STRING_POINTER (str);
end = p + TREE_STRING_LENGTH (str);
for (purpose = bitsize_zero_node;
p < end && !tree_int_cst_lt (constructor_max_index, purpose);
purpose = size_binop (PLUS_EXPR, purpose, bitsize_one_node))
{
if (wchar_bytes == 1)
{
val[1] = (unsigned char) *p++;
val[0] = 0;
}
else
{
val[0] = 0;
val[1] = 0;
for (byte = 0; byte < wchar_bytes; byte++)
{
if (BYTES_BIG_ENDIAN)
bitpos = (wchar_bytes - byte - 1) * charwidth;
else
bitpos = byte * charwidth;
val[bitpos < HOST_BITS_PER_WIDE_INT]
|= ((unsigned HOST_WIDE_INT) ((unsigned char) *p++))
<< (bitpos % HOST_BITS_PER_WIDE_INT);
}
}
if (!TYPE_UNSIGNED (type))
{
bitpos = ((wchar_bytes - 1) * charwidth) + HOST_BITS_PER_CHAR;
if (bitpos < HOST_BITS_PER_WIDE_INT)
{
if (val[1] & (((HOST_WIDE_INT) 1) << (bitpos - 1)))
{
val[1] |= ((HOST_WIDE_INT) -1) << bitpos;
val[0] = -1;
}
}
else if (bitpos == HOST_BITS_PER_WIDE_INT)
{
if (val[1] < 0)
val[0] = -1;
}
else if (val[0] & (((HOST_WIDE_INT) 1)
<< (bitpos - 1 - HOST_BITS_PER_WIDE_INT)))
val[0] |= ((HOST_WIDE_INT) -1)
<< (bitpos - HOST_BITS_PER_WIDE_INT);
}
value = build_int_cst_wide (type, val[1], val[0]);
add_pending_init (purpose, value, NULL_TREE, false,
braced_init_obstack);
}
constructor_incremental = 0;
}
/* Return value of FIELD in pending initializer or zero if the field was
not initialized yet. */
static tree
find_init_member (tree field, struct obstack * braced_init_obstack)
{
struct init_node *p;
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (constructor_incremental
&& tree_int_cst_lt (field, constructor_unfilled_index))
set_nonincremental_init (braced_init_obstack);
p = constructor_pending_elts;
while (p)
{
if (tree_int_cst_lt (field, p->purpose))
p = p->left;
else if (tree_int_cst_lt (p->purpose, field))
p = p->right;
else
return p->value;
}
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
tree bitpos = bit_position (field);
if (constructor_incremental
&& (!constructor_unfilled_fields
|| tree_int_cst_lt (bitpos,
bit_position (constructor_unfilled_fields))))
set_nonincremental_init (braced_init_obstack);
p = constructor_pending_elts;
while (p)
{
if (field == p->purpose)
return p->value;
else if (tree_int_cst_lt (bitpos, bit_position (p->purpose)))
p = p->left;
else
p = p->right;
}
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
if (!VEC_empty (constructor_elt, constructor_elements)
&& (VEC_last (constructor_elt, constructor_elements)->index
== field))
return VEC_last (constructor_elt, constructor_elements)->value;
}
return 0;
}
/* "Output" the next constructor element.
At top level, really output it to assembler code now.
Otherwise, collect it in a list from which we will make a CONSTRUCTOR.
If ORIGTYPE is not NULL_TREE, it is the original type of VALUE.
TYPE is the data type that the containing data type wants here.
FIELD is the field (a FIELD_DECL) or the index that this element fills.
If VALUE is a string constant, STRICT_STRING is true if it is
unparenthesized or we should not warn here for it being parenthesized.
For other types of VALUE, STRICT_STRING is not used.
PENDING if non-nil means output pending elements that belong
right after this element. (PENDING is normally 1;
it is 0 while outputting pending elements, to avoid recursion.)
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
static void
output_init_element (tree value, tree origtype, bool strict_string, tree type,
tree field, int pending, bool implicit,
struct obstack * braced_init_obstack)
{
tree semantic_type = NULL_TREE;
constructor_elt *celt;
bool maybe_const = true;
bool npc;
if (type == error_mark_node || value == error_mark_node)
{
constructor_erroneous = 1;
return;
}
if (TREE_CODE (TREE_TYPE (value)) == ARRAY_TYPE
&& (TREE_CODE (value) == STRING_CST
|| TREE_CODE (value) == COMPOUND_LITERAL_EXPR)
&& !(TREE_CODE (value) == STRING_CST
&& TREE_CODE (type) == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (type)))
&& !comptypes (TYPE_MAIN_VARIANT (TREE_TYPE (value)),
TYPE_MAIN_VARIANT (type)))
value = array_to_pointer_conversion (input_location, value);
if (TREE_CODE (value) == COMPOUND_LITERAL_EXPR
&& require_constant_value && !flag_isoc99 && pending)
{
/* As an extension, allow initializing objects with static storage
duration with compound literals (which are then treated just as
the brace enclosed list they contain). */
tree decl = COMPOUND_LITERAL_EXPR_DECL (value);
value = DECL_INITIAL (decl);
}
npc = null_pointer_constant_p (value);
if (TREE_CODE (value) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (value);
value = TREE_OPERAND (value, 0);
}
value = c_fully_fold (value, require_constant_value, &maybe_const);
if (value == error_mark_node)
constructor_erroneous = 1;
else if (!TREE_CONSTANT (value))
constructor_constant = 0;
else if (!initializer_constant_valid_p (value, TREE_TYPE (value))
|| ((TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
&& DECL_C_BIT_FIELD (field)
&& TREE_CODE (value) != INTEGER_CST))
constructor_simple = 0;
if (!maybe_const)
constructor_nonconst = 1;
if (!initializer_constant_valid_p (value, TREE_TYPE (value)))
{
if (require_constant_value)
{
error_init ("initializer element is not constant");
value = error_mark_node;
}
else if (require_constant_elements)
pedwarn (input_location, 0,
"initializer element is not computable at load time");
}
else if (!maybe_const
&& (require_constant_value || require_constant_elements))
pedwarn_init (input_location, 0,
"initializer element is not a constant expression");
/* Issue -Wc++-compat warnings about initializing a bitfield with
enum type. */
if (warn_cxx_compat
&& field != NULL_TREE
&& TREE_CODE (field) == FIELD_DECL
&& DECL_BIT_FIELD_TYPE (field) != NULL_TREE
&& (TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field))
!= TYPE_MAIN_VARIANT (type))
&& TREE_CODE (DECL_BIT_FIELD_TYPE (field)) == ENUMERAL_TYPE)
{
tree checktype = origtype != NULL_TREE ? origtype : TREE_TYPE (value);
if (checktype != error_mark_node
&& (TYPE_MAIN_VARIANT (checktype)
!= TYPE_MAIN_VARIANT (DECL_BIT_FIELD_TYPE (field))))
warning_init (OPT_Wc___compat,
"enum conversion in initialization is invalid in C++");
}
/* If this field is empty (and not at the end of structure),
don't do anything other than checking the initializer. */
if (field
&& (TREE_TYPE (field) == error_mark_node
|| (COMPLETE_TYPE_P (TREE_TYPE (field))
&& integer_zerop (TYPE_SIZE (TREE_TYPE (field)))
&& (TREE_CODE (constructor_type) == ARRAY_TYPE
|| DECL_CHAIN (field)))))
return;
if (semantic_type)
value = build1 (EXCESS_PRECISION_EXPR, semantic_type, value);
value = digest_init (input_location, type, value, origtype, npc,
strict_string, require_constant_value);
if (value == error_mark_node)
{
constructor_erroneous = 1;
return;
}
if (require_constant_value || require_constant_elements)
constant_expression_warning (value);
/* If this element doesn't come next in sequence,
put it on constructor_pending_elts. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE
&& (!constructor_incremental
|| !tree_int_cst_equal (field, constructor_unfilled_index)))
{
if (constructor_incremental
&& tree_int_cst_lt (field, constructor_unfilled_index))
set_nonincremental_init (braced_init_obstack);
add_pending_init (field, value, origtype, implicit,
braced_init_obstack);
return;
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE
&& (!constructor_incremental
|| field != constructor_unfilled_fields))
{
/* We do this for records but not for unions. In a union,
no matter which field is specified, it can be initialized
right away since it starts at the beginning of the union. */
if (constructor_incremental)
{
if (!constructor_unfilled_fields)
set_nonincremental_init (braced_init_obstack);
else
{
tree bitpos, unfillpos;
bitpos = bit_position (field);
unfillpos = bit_position (constructor_unfilled_fields);
if (tree_int_cst_lt (bitpos, unfillpos))
set_nonincremental_init (braced_init_obstack);
}
}
add_pending_init (field, value, origtype, implicit,
braced_init_obstack);
return;
}
else if (TREE_CODE (constructor_type) == UNION_TYPE
&& !VEC_empty (constructor_elt, constructor_elements))
{
if (!implicit)
{
if (TREE_SIDE_EFFECTS (VEC_last (constructor_elt,
constructor_elements)->value))
warning_init (0,
"initialized field with side-effects overwritten");
else if (warn_override_init)
warning_init (OPT_Woverride_init, "initialized field overwritten");
}
/* We can have just one union field set. */
constructor_elements = 0;
}
/* Otherwise, output this element either to
constructor_elements or to the assembler file. */
celt = VEC_safe_push (constructor_elt, gc, constructor_elements, NULL);
celt->index = field;
celt->value = value;
/* Advance the variable that indicates sequential elements output. */
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
constructor_unfilled_index
= size_binop_loc (input_location, PLUS_EXPR, constructor_unfilled_index,
bitsize_one_node);
else if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
constructor_unfilled_fields
= DECL_CHAIN (constructor_unfilled_fields);
/* Skip any nameless bit fields. */
while (constructor_unfilled_fields != 0
&& DECL_C_BIT_FIELD (constructor_unfilled_fields)
&& DECL_NAME (constructor_unfilled_fields) == 0)
constructor_unfilled_fields =
DECL_CHAIN (constructor_unfilled_fields);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
constructor_unfilled_fields = 0;
/* Now output any pending elements which have become next. */
if (pending)
output_pending_init_elements (0, braced_init_obstack);
}
/* Output any pending elements which have become next.
As we output elements, constructor_unfilled_{fields,index}
advances, which may cause other elements to become next;
if so, they too are output.
If ALL is 0, we return when there are
no more pending elements to output now.
If ALL is 1, we output space as necessary so that
we can output all the pending elements. */
static void
output_pending_init_elements (int all, struct obstack * braced_init_obstack)
{
struct init_node *elt = constructor_pending_elts;
tree next;
retry:
/* Look through the whole pending tree.
If we find an element that should be output now,
output it. Otherwise, set NEXT to the element
that comes first among those still pending. */
next = 0;
while (elt)
{
if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
if (tree_int_cst_equal (elt->purpose,
constructor_unfilled_index))
output_init_element (elt->value, elt->origtype, true,
TREE_TYPE (constructor_type),
constructor_unfilled_index, 0, false,
braced_init_obstack);
else if (tree_int_cst_lt (constructor_unfilled_index,
elt->purpose))
{
/* Advance to the next smaller node. */
if (elt->left)
elt = elt->left;
else
{
/* We have reached the smallest node bigger than the
current unfilled index. Fill the space first. */
next = elt->purpose;
break;
}
}
else
{
/* Advance to the next bigger node. */
if (elt->right)
elt = elt->right;
else
{
/* We have reached the biggest node in a subtree. Find
the parent of it, which is the next bigger node. */
while (elt->parent && elt->parent->right == elt)
elt = elt->parent;
elt = elt->parent;
if (elt && tree_int_cst_lt (constructor_unfilled_index,
elt->purpose))
{
next = elt->purpose;
break;
}
}
}
}
else if (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
{
tree ctor_unfilled_bitpos, elt_bitpos;
/* If the current record is complete we are done. */
if (constructor_unfilled_fields == 0)
break;
ctor_unfilled_bitpos = bit_position (constructor_unfilled_fields);
elt_bitpos = bit_position (elt->purpose);
/* We can't compare fields here because there might be empty
fields in between. */
if (tree_int_cst_equal (elt_bitpos, ctor_unfilled_bitpos))
{
constructor_unfilled_fields = elt->purpose;
output_init_element (elt->value, elt->origtype, true,
TREE_TYPE (elt->purpose),
elt->purpose, 0, false,
braced_init_obstack);
}
else if (tree_int_cst_lt (ctor_unfilled_bitpos, elt_bitpos))
{
/* Advance to the next smaller node. */
if (elt->left)
elt = elt->left;
else
{
/* We have reached the smallest node bigger than the
current unfilled field. Fill the space first. */
next = elt->purpose;
break;
}
}
else
{
/* Advance to the next bigger node. */
if (elt->right)
elt = elt->right;
else
{
/* We have reached the biggest node in a subtree. Find
the parent of it, which is the next bigger node. */
while (elt->parent && elt->parent->right == elt)
elt = elt->parent;
elt = elt->parent;
if (elt
&& (tree_int_cst_lt (ctor_unfilled_bitpos,
bit_position (elt->purpose))))
{
next = elt->purpose;
break;
}
}
}
}
}
/* Ordinarily return, but not if we want to output all
and there are elements left. */
if (!(all && next != 0))
return;
/* If it's not incremental, just skip over the gap, so that after
jumping to retry we will output the next successive element. */
if (TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
constructor_unfilled_fields = next;
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
constructor_unfilled_index = next;
/* ELT now points to the node in the pending tree with the next
initializer to output. */
goto retry;
}
/* Add one non-braced element to the current constructor level.
This adjusts the current position within the constructor's type.
This may also start or terminate implicit levels
to handle a partly-braced initializer.
Once this has found the correct level for the new element,
it calls output_init_element.
IMPLICIT is true if value comes from pop_init_level (1),
the new initializer has been merged with the existing one
and thus no warnings should be emitted about overriding an
existing initializer. */
void
process_init_element (struct c_expr value, bool implicit,
struct obstack * braced_init_obstack)
{
tree orig_value = value.value;
int string_flag = orig_value != 0 && TREE_CODE (orig_value) == STRING_CST;
bool strict_string = value.original_code == STRING_CST;
designator_depth = 0;
designator_erroneous = 0;
/* Handle superfluous braces around string cst as in
char x[] = {"foo"}; */
if (string_flag
&& constructor_type
&& TREE_CODE (constructor_type) == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (constructor_type))
&& integer_zerop (constructor_unfilled_index))
{
if (constructor_stack->replacement_value.value)
error_init ("excess elements in char array initializer");
constructor_stack->replacement_value = value;
return;
}
if (constructor_stack->replacement_value.value != 0)
{
error_init ("excess elements in struct initializer");
return;
}
/* Ignore elements of a brace group if it is entirely superfluous
and has already been diagnosed. */
if (constructor_type == 0)
return;
/* If we've exhausted any levels that didn't have braces,
pop them now. */
while (constructor_stack->implicit)
{
if ((TREE_CODE (constructor_type) == RECORD_TYPE
|| TREE_CODE (constructor_type) == UNION_TYPE)
&& constructor_fields == 0)
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
else if ((TREE_CODE (constructor_type) == ARRAY_TYPE
|| TREE_CODE (constructor_type) == VECTOR_TYPE)
&& (constructor_max_index == 0
|| tree_int_cst_lt (constructor_max_index,
constructor_index)))
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
else
break;
}
/* In the case of [LO ... HI] = VALUE, only evaluate VALUE once. */
if (constructor_range_stack)
{
/* If value is a compound literal and we'll be just using its
content, don't put it into a SAVE_EXPR. */
if (TREE_CODE (value.value) != COMPOUND_LITERAL_EXPR
|| !require_constant_value
|| flag_isoc99)
{
tree semantic_type = NULL_TREE;
if (TREE_CODE (value.value) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (value.value);
value.value = TREE_OPERAND (value.value, 0);
}
value.value = c_save_expr (value.value);
if (semantic_type)
value.value = build1 (EXCESS_PRECISION_EXPR, semantic_type,
value.value);
}
}
while (1)
{
if (TREE_CODE (constructor_type) == RECORD_TYPE)
{
tree fieldtype;
enum tree_code fieldcode;
if (constructor_fields == 0)
{
pedwarn_init (input_location, 0,
"excess elements in struct initializer");
break;
}
fieldtype = TREE_TYPE (constructor_fields);
if (fieldtype != error_mark_node)
fieldtype = TYPE_MAIN_VARIANT (fieldtype);
fieldcode = TREE_CODE (fieldtype);
/* Error for non-static initialization of a flexible array member. */
if (fieldcode == ARRAY_TYPE
&& !require_constant_value
&& TYPE_SIZE (fieldtype) == NULL_TREE
&& DECL_CHAIN (constructor_fields) == NULL_TREE)
{
error_init ("non-static initialization of a flexible array member");
break;
}
/* Accept a string constant to initialize a subarray. */
if (value.value != 0
&& fieldcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (fieldtype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != 0
&& value.value != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (value.value)) != fieldtype
&& (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE
|| fieldcode == UNION_TYPE || fieldcode == VECTOR_TYPE))
{
push_init_level (1, braced_init_obstack);
continue;
}
if (value.value)
{
push_member_name (constructor_fields);
output_init_element (value.value, value.original_type,
strict_string, fieldtype,
constructor_fields, 1, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
else
/* Do the bookkeeping for an element that was
directly output as a constructor. */
{
/* For a record, keep track of end position of last field. */
if (DECL_SIZE (constructor_fields))
constructor_bit_index
= size_binop_loc (input_location, PLUS_EXPR,
bit_position (constructor_fields),
DECL_SIZE (constructor_fields));
/* If the current field was the first one not yet written out,
it isn't now, so update. */
if (constructor_unfilled_fields == constructor_fields)
{
constructor_unfilled_fields = DECL_CHAIN (constructor_fields);
/* Skip any nameless bit fields. */
while (constructor_unfilled_fields != 0
&& DECL_C_BIT_FIELD (constructor_unfilled_fields)
&& DECL_NAME (constructor_unfilled_fields) == 0)
constructor_unfilled_fields =
DECL_CHAIN (constructor_unfilled_fields);
}
}
constructor_fields = DECL_CHAIN (constructor_fields);
/* Skip any nameless bit fields at the beginning. */
while (constructor_fields != 0
&& DECL_C_BIT_FIELD (constructor_fields)
&& DECL_NAME (constructor_fields) == 0)
constructor_fields = DECL_CHAIN (constructor_fields);
}
else if (TREE_CODE (constructor_type) == UNION_TYPE)
{
tree fieldtype;
enum tree_code fieldcode;
if (constructor_fields == 0)
{
pedwarn_init (input_location, 0,
"excess elements in union initializer");
break;
}
fieldtype = TREE_TYPE (constructor_fields);
if (fieldtype != error_mark_node)
fieldtype = TYPE_MAIN_VARIANT (fieldtype);
fieldcode = TREE_CODE (fieldtype);
/* Warn that traditional C rejects initialization of unions.
We skip the warning if the value is zero. This is done
under the assumption that the zero initializer in user
code appears conditioned on e.g. __STDC__ to avoid
"missing initializer" warnings and relies on default
initialization to zero in the traditional C case.
We also skip the warning if the initializer is designated,
again on the assumption that this must be conditional on
__STDC__ anyway (and we've already complained about the
member-designator already). */
if (!in_system_header && !constructor_designated
&& !(value.value && (integer_zerop (value.value)
|| real_zerop (value.value))))
warning (OPT_Wtraditional, "traditional C rejects initialization "
"of unions");
/* Accept a string constant to initialize a subarray. */
if (value.value != 0
&& fieldcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (fieldtype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != 0
&& value.value != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (value.value)) != fieldtype
&& (fieldcode == RECORD_TYPE || fieldcode == ARRAY_TYPE
|| fieldcode == UNION_TYPE || fieldcode == VECTOR_TYPE))
{
push_init_level (1, braced_init_obstack);
continue;
}
if (value.value)
{
push_member_name (constructor_fields);
output_init_element (value.value, value.original_type,
strict_string, fieldtype,
constructor_fields, 1, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
else
/* Do the bookkeeping for an element that was
directly output as a constructor. */
{
constructor_bit_index = DECL_SIZE (constructor_fields);
constructor_unfilled_fields = DECL_CHAIN (constructor_fields);
}
constructor_fields = 0;
}
else if (TREE_CODE (constructor_type) == ARRAY_TYPE)
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
enum tree_code eltcode = TREE_CODE (elttype);
/* Accept a string constant to initialize a subarray. */
if (value.value != 0
&& eltcode == ARRAY_TYPE
&& INTEGRAL_TYPE_P (TREE_TYPE (elttype))
&& string_flag)
value.value = orig_value;
/* Otherwise, if we have come to a subaggregate,
and we don't have an element of its type, push into it. */
else if (value.value != 0
&& value.value != error_mark_node
&& TYPE_MAIN_VARIANT (TREE_TYPE (value.value)) != elttype
&& (eltcode == RECORD_TYPE || eltcode == ARRAY_TYPE
|| eltcode == UNION_TYPE || eltcode == VECTOR_TYPE))
{
push_init_level (1, braced_init_obstack);
continue;
}
if (constructor_max_index != 0
&& (tree_int_cst_lt (constructor_max_index, constructor_index)
|| integer_all_onesp (constructor_max_index)))
{
pedwarn_init (input_location, 0,
"excess elements in array initializer");
break;
}
/* Now output the actual element. */
if (value.value)
{
push_array_bounds (tree_low_cst (constructor_index, 1));
output_init_element (value.value, value.original_type,
strict_string, elttype,
constructor_index, 1, implicit,
braced_init_obstack);
RESTORE_SPELLING_DEPTH (constructor_depth);
}
constructor_index
= size_binop_loc (input_location, PLUS_EXPR,
constructor_index, bitsize_one_node);
if (!value.value)
/* If we are doing the bookkeeping for an element that was
directly output as a constructor, we must update
constructor_unfilled_index. */
constructor_unfilled_index = constructor_index;
}
else if (TREE_CODE (constructor_type) == VECTOR_TYPE)
{
tree elttype = TYPE_MAIN_VARIANT (TREE_TYPE (constructor_type));
/* Do a basic check of initializer size. Note that vectors
always have a fixed size derived from their type. */
if (tree_int_cst_lt (constructor_max_index, constructor_index))
{
pedwarn_init (input_location, 0,
"excess elements in vector initializer");
break;
}
/* Now output the actual element. */
if (value.value)
{
if (TREE_CODE (value.value) == VECTOR_CST)
elttype = TYPE_MAIN_VARIANT (constructor_type);
output_init_element (value.value, value.original_type,
strict_string, elttype,
constructor_index, 1, implicit,
braced_init_obstack);
}
constructor_index
= size_binop_loc (input_location,
PLUS_EXPR, constructor_index, bitsize_one_node);
if (!value.value)
/* If we are doing the bookkeeping for an element that was
directly output as a constructor, we must update
constructor_unfilled_index. */
constructor_unfilled_index = constructor_index;
}
/* Handle the sole element allowed in a braced initializer
for a scalar variable. */
else if (constructor_type != error_mark_node
&& constructor_fields == 0)
{
pedwarn_init (input_location, 0,
"excess elements in scalar initializer");
break;
}
else
{
if (value.value)
output_init_element (value.value, value.original_type,
strict_string, constructor_type,
NULL_TREE, 1, implicit,
braced_init_obstack);
constructor_fields = 0;
}
/* Handle range initializers either at this level or anywhere higher
in the designator stack. */
if (constructor_range_stack)
{
struct constructor_range_stack *p, *range_stack;
int finish = 0;
range_stack = constructor_range_stack;
constructor_range_stack = 0;
while (constructor_stack != range_stack->stack)
{
gcc_assert (constructor_stack->implicit);
process_init_element (pop_init_level (1,
braced_init_obstack),
true, braced_init_obstack);
}
for (p = range_stack;
!p->range_end || tree_int_cst_equal (p->index, p->range_end);
p = p->prev)
{
gcc_assert (constructor_stack->implicit);
process_init_element (pop_init_level (1, braced_init_obstack),
true, braced_init_obstack);
}
p->index = size_binop_loc (input_location,
PLUS_EXPR, p->index, bitsize_one_node);
if (tree_int_cst_equal (p->index, p->range_end) && !p->prev)
finish = 1;
while (1)
{
constructor_index = p->index;
constructor_fields = p->fields;
if (finish && p->range_end && p->index == p->range_start)
{
finish = 0;
p->prev = 0;
}
p = p->next;
if (!p)
break;
push_init_level (2, braced_init_obstack);
p->stack = constructor_stack;
if (p->range_end && tree_int_cst_equal (p->index, p->range_end))
p->index = p->range_start;
}
if (!finish)
constructor_range_stack = range_stack;
continue;
}
break;
}
constructor_range_stack = 0;
}
/* Build a complete asm-statement, whose components are a CV_QUALIFIER
(guaranteed to be 'volatile' or null) and ARGS (represented using
an ASM_EXPR node). */
tree
build_asm_stmt (tree cv_qualifier, tree args)
{
if (!ASM_VOLATILE_P (args) && cv_qualifier)
ASM_VOLATILE_P (args) = 1;
return add_stmt (args);
}
/* Build an asm-expr, whose components are a STRING, some OUTPUTS,
some INPUTS, and some CLOBBERS. The latter three may be NULL.
SIMPLE indicates whether there was anything at all after the
string in the asm expression -- asm("blah") and asm("blah" : )
are subtly different. We use a ASM_EXPR node to represent this. */
tree
build_asm_expr (location_t loc, tree string, tree outputs, tree inputs,
tree clobbers, tree labels, bool simple)
{
tree tail;
tree args;
int i;
const char *constraint;
const char **oconstraints;
bool allows_mem, allows_reg, is_inout;
int ninputs, noutputs;
ninputs = list_length (inputs);
noutputs = list_length (outputs);
oconstraints = (const char **) alloca (noutputs * sizeof (const char *));
string = resolve_asm_operand_names (string, outputs, inputs, labels);
/* Remove output conversions that change the type but not the mode. */
for (i = 0, tail = outputs; tail; ++i, tail = TREE_CHAIN (tail))
{
tree output = TREE_VALUE (tail);
/* ??? Really, this should not be here. Users should be using a
proper lvalue, dammit. But there's a long history of using casts
in the output operands. In cases like longlong.h, this becomes a
primitive form of typechecking -- if the cast can be removed, then
the output operand had a type of the proper width; otherwise we'll
get an error. Gross, but ... */
STRIP_NOPS (output);
if (!lvalue_or_else (loc, output, lv_asm))
output = error_mark_node;
if (output != error_mark_node
&& (TREE_READONLY (output)
|| TYPE_READONLY (TREE_TYPE (output))
|| ((TREE_CODE (TREE_TYPE (output)) == RECORD_TYPE
|| TREE_CODE (TREE_TYPE (output)) == UNION_TYPE)
&& C_TYPE_FIELDS_READONLY (TREE_TYPE (output)))))
readonly_error (output, lv_asm);
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
oconstraints[i] = constraint;
if (parse_output_constraint (&constraint, i, ninputs, noutputs,
&allows_mem, &allows_reg, &is_inout))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && !c_mark_addressable (output))
output = error_mark_node;
if (!(!allows_reg && allows_mem)
&& output != error_mark_node
&& VOID_TYPE_P (TREE_TYPE (output)))
{
error_at (loc, "invalid use of void expression");
output = error_mark_node;
}
}
else
output = error_mark_node;
TREE_VALUE (tail) = output;
}
for (i = 0, tail = inputs; tail; ++i, tail = TREE_CHAIN (tail))
{
tree input;
constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (tail)));
input = TREE_VALUE (tail);
if (parse_input_constraint (&constraint, i, ninputs, noutputs, 0,
oconstraints, &allows_mem, &allows_reg))
{
/* If the operand is going to end up in memory,
mark it addressable. */
if (!allows_reg && allows_mem)
{
/* Strip the nops as we allow this case. FIXME, this really
should be rejected or made deprecated. */
STRIP_NOPS (input);
if (!c_mark_addressable (input))
input = error_mark_node;
}
else if (input != error_mark_node && VOID_TYPE_P (TREE_TYPE (input)))
{
error_at (loc, "invalid use of void expression");
input = error_mark_node;
}
}
else
input = error_mark_node;
TREE_VALUE (tail) = input;
}
/* ASMs with labels cannot have outputs. This should have been
enforced by the parser. */
gcc_assert (outputs == NULL || labels == NULL);
args = build_stmt (loc, ASM_EXPR, string, outputs, inputs, clobbers, labels);
/* asm statements without outputs, including simple ones, are treated
as volatile. */
ASM_INPUT_P (args) = simple;
ASM_VOLATILE_P (args) = (noutputs == 0);
return args;
}
/* Generate a goto statement to LABEL. LOC is the location of the
GOTO. */
tree
c_finish_goto_label (location_t loc, tree label)
{
tree decl = lookup_label_for_goto (loc, label);
if (!decl)
return NULL_TREE;
TREE_USED (decl) = 1;
{
tree t = build1 (GOTO_EXPR, void_type_node, decl);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
}
/* Generate a computed goto statement to EXPR. LOC is the location of
the GOTO. */
tree
c_finish_goto_ptr (location_t loc, tree expr)
{
tree t;
pedwarn (loc, OPT_pedantic, "ISO C forbids %<goto *expr;%>");
expr = c_fully_fold (expr, false, NULL);
expr = convert (ptr_type_node, expr);
t = build1 (GOTO_EXPR, void_type_node, expr);
SET_EXPR_LOCATION (t, loc);
return add_stmt (t);
}
/* Generate a C `return' statement. RETVAL is the expression for what
to return, or a null pointer for `return;' with no value. LOC is
the location of the return statement. If ORIGTYPE is not NULL_TREE, it
is the original type of RETVAL. */
tree
c_finish_return (location_t loc, tree retval, tree origtype)
{
tree valtype = TREE_TYPE (TREE_TYPE (current_function_decl)), ret_stmt;
bool no_warning = false;
bool npc = false;
if (TREE_THIS_VOLATILE (current_function_decl))
warning_at (loc, 0,
"function declared %<noreturn%> has a %<return%> statement");
if (retval)
{
tree semantic_type = NULL_TREE;
npc = null_pointer_constant_p (retval);
if (TREE_CODE (retval) == EXCESS_PRECISION_EXPR)
{
semantic_type = TREE_TYPE (retval);
retval = TREE_OPERAND (retval, 0);
}
retval = c_fully_fold (retval, false, NULL);
if (semantic_type)
retval = build1 (EXCESS_PRECISION_EXPR, semantic_type, retval);
}
if (!retval)
{
current_function_returns_null = 1;
if ((warn_return_type || flag_isoc99)
&& valtype != 0 && TREE_CODE (valtype) != VOID_TYPE)
{
pedwarn_c99 (loc, flag_isoc99 ? 0 : OPT_Wreturn_type,
"%<return%> with no value, in "
"function returning non-void");
no_warning = true;
}
}
else if (valtype == 0 || TREE_CODE (valtype) == VOID_TYPE)
{
current_function_returns_null = 1;
if (TREE_CODE (TREE_TYPE (retval)) != VOID_TYPE)
pedwarn (loc, 0,
"%<return%> with a value, in function returning void");
else
pedwarn (loc, OPT_pedantic, "ISO C forbids "
"%<return%> with expression, in function returning void");
}
else
{
tree t = convert_for_assignment (loc, valtype, retval, origtype,
ic_return,
npc, NULL_TREE, NULL_TREE, 0);
tree res = DECL_RESULT (current_function_decl);
tree inner;
current_function_returns_value = 1;
if (t == error_mark_node)
return NULL_TREE;
inner = t = convert (TREE_TYPE (res), t);
/* Strip any conversions, additions, and subtractions, and see if
we are returning the address of a local variable. Warn if so. */
while (1)
{
switch (TREE_CODE (inner))
{
CASE_CONVERT:
case NON_LVALUE_EXPR:
case PLUS_EXPR:
case POINTER_PLUS_EXPR:
inner = TREE_OPERAND (inner, 0);
continue;
case MINUS_EXPR:
/* If the second operand of the MINUS_EXPR has a pointer
type (or is converted from it), this may be valid, so
don't give a warning. */
{
tree op1 = TREE_OPERAND (inner, 1);
while (!POINTER_TYPE_P (TREE_TYPE (op1))
&& (CONVERT_EXPR_P (op1)
|| TREE_CODE (op1) == NON_LVALUE_EXPR))
op1 = TREE_OPERAND (op1, 0);
if (POINTER_TYPE_P (TREE_TYPE (op1)))
break;
inner = TREE_OPERAND (inner, 0);
continue;
}
case ADDR_EXPR:
inner = TREE_OPERAND (inner, 0);
while (REFERENCE_CLASS_P (inner)
&& TREE_CODE (inner) != INDIRECT_REF)
inner = TREE_OPERAND (inner, 0);
if (DECL_P (inner)
&& !DECL_EXTERNAL (inner)
&& !TREE_STATIC (inner)
&& DECL_CONTEXT (inner) == current_function_decl)
warning_at (loc,
0, "function returns address of local variable");
break;
default:
break;
}
break;
}
retval = build2 (MODIFY_EXPR, TREE_TYPE (res), res, t);
SET_EXPR_LOCATION (retval, loc);
if (warn_sequence_point)
verify_sequence_points (retval);
}
ret_stmt = build_stmt (loc, RETURN_EXPR, retval);
TREE_NO_WARNING (ret_stmt) |= no_warning;
return add_stmt (ret_stmt);
}
struct c_switch {
/* The SWITCH_EXPR being built. */
tree switch_expr;
/* The original type of the testing expression, i.e. before the
default conversion is applied. */
tree orig_type;
/* A splay-tree mapping the low element of a case range to the high
element, or NULL_TREE if there is no high element. Used to
determine whether or not a new case label duplicates an old case
label. We need a tree, rather than simply a hash table, because
of the GNU case range extension. */
splay_tree cases;
/* The bindings at the point of the switch. This is used for
warnings crossing decls when branching to a case label. */
struct c_spot_bindings *bindings;
/* The next node on the stack. */
struct c_switch *next;
};
/* A stack of the currently active switch statements. The innermost
switch statement is on the top of the stack. There is no need to
mark the stack for garbage collection because it is only active
during the processing of the body of a function, and we never
collect at that point. */
struct c_switch *c_switch_stack;
/* Start a C switch statement, testing expression EXP. Return the new
SWITCH_EXPR. SWITCH_LOC is the location of the `switch'.
SWITCH_COND_LOC is the location of the switch's condition. */
tree
c_start_case (location_t switch_loc,
location_t switch_cond_loc,
tree exp)
{
tree orig_type = error_mark_node;
struct c_switch *cs;
if (exp != error_mark_node)
{
orig_type = TREE_TYPE (exp);
if (!INTEGRAL_TYPE_P (orig_type))
{
if (orig_type != error_mark_node)
{
error_at (switch_cond_loc, "switch quantity not an integer");
orig_type = error_mark_node;
}
exp = integer_zero_node;
}
else
{
tree type = TYPE_MAIN_VARIANT (orig_type);
if (!in_system_header
&& (type == long_integer_type_node
|| type == long_unsigned_type_node))
warning_at (switch_cond_loc,
OPT_Wtraditional, "%<long%> switch expression not "
"converted to %<int%> in ISO C");
exp = c_fully_fold (exp, false, NULL);
exp = default_conversion (exp);
if (warn_sequence_point)
verify_sequence_points (exp);
}
}
/* Add this new SWITCH_EXPR to the stack. */
cs = XNEW (struct c_switch);
cs->switch_expr = build3 (SWITCH_EXPR, orig_type, exp, NULL_TREE, NULL_TREE);
SET_EXPR_LOCATION (cs->switch_expr, switch_loc);
cs->orig_type = orig_type;
cs->cases = splay_tree_new (case_compare, NULL, NULL);
cs->bindings = c_get_switch_bindings ();
cs->next = c_switch_stack;
c_switch_stack = cs;
return add_stmt (cs->switch_expr);
}
/* Process a case label at location LOC. */
tree
do_case (location_t loc, tree low_value, tree high_value)
{
tree label = NULL_TREE;
if (low_value && TREE_CODE (low_value) != INTEGER_CST)
{
low_value = c_fully_fold (low_value, false, NULL);
if (TREE_CODE (low_value) == INTEGER_CST)
pedwarn (input_location, OPT_pedantic,
"case label is not an integer constant expression");
}
if (high_value && TREE_CODE (high_value) != INTEGER_CST)
{
high_value = c_fully_fold (high_value, false, NULL);
if (TREE_CODE (high_value) == INTEGER_CST)
pedwarn (input_location, OPT_pedantic,
"case label is not an integer constant expression");
}
if (c_switch_stack == NULL)
{
if (low_value)
error_at (loc, "case label not within a switch statement");
else
error_at (loc, "%<default%> label not within a switch statement");
return NULL_TREE;
}
if (c_check_switch_jump_warnings (c_switch_stack->bindings,
EXPR_LOCATION (c_switch_stack->switch_expr),
loc))
return NULL_TREE;
label = c_add_case_label (loc, c_switch_stack->cases,
SWITCH_COND (c_switch_stack->switch_expr),
c_switch_stack->orig_type,
low_value, high_value);
if (label == error_mark_node)
label = NULL_TREE;
return label;
}
/* Finish the switch statement. */
void
c_finish_case (tree body)
{
struct c_switch *cs = c_switch_stack;
location_t switch_location;
SWITCH_BODY (cs->switch_expr) = body;
/* Emit warnings as needed. */
switch_location = EXPR_LOCATION (cs->switch_expr);
c_do_switch_warnings (cs->cases, switch_location,
TREE_TYPE (cs->switch_expr),
SWITCH_COND (cs->switch_expr));
/* Pop the stack. */
c_switch_stack = cs->next;
splay_tree_delete (cs->cases);
c_release_switch_bindings (cs->bindings);
XDELETE (cs);
}
/* Emit an if statement. IF_LOCUS is the location of the 'if'. COND,
THEN_BLOCK and ELSE_BLOCK are expressions to be used; ELSE_BLOCK
may be null. NESTED_IF is true if THEN_BLOCK contains another IF
statement, and was not surrounded with parenthesis. */
void
c_finish_if_stmt (location_t if_locus, tree cond, tree then_block,
tree else_block, bool nested_if)
{
tree stmt;
/* Diagnose an ambiguous else if if-then-else is nested inside if-then. */
if (warn_parentheses && nested_if && else_block == NULL)
{
tree inner_if = then_block;
/* We know from the grammar productions that there is an IF nested
within THEN_BLOCK. Due to labels and c99 conditional declarations,
it might not be exactly THEN_BLOCK, but should be the last
non-container statement within. */
while (1)
switch (TREE_CODE (inner_if))
{
case COND_EXPR:
goto found;
case BIND_EXPR:
inner_if = BIND_EXPR_BODY (inner_if);
break;
case STATEMENT_LIST:
inner_if = expr_last (then_block);
break;
case TRY_FINALLY_EXPR:
case TRY_CATCH_EXPR:
inner_if = TREE_OPERAND (inner_if, 0);
break;
default:
gcc_unreachable ();
}
found:
if (COND_EXPR_ELSE (inner_if))
warning_at (if_locus, OPT_Wparentheses,
"suggest explicit braces to avoid ambiguous %<else%>");
}
stmt = build3 (COND_EXPR, void_type_node, cond, then_block, else_block);
SET_EXPR_LOCATION (stmt, if_locus);
add_stmt (stmt);
}
/* Emit a general-purpose loop construct. START_LOCUS is the location of
the beginning of the loop. COND is the loop condition. COND_IS_FIRST
is false for DO loops. INCR is the FOR increment expression. BODY is
the statement controlled by the loop. BLAB is the break label. CLAB is
the continue label. Everything is allowed to be NULL. */
void
c_finish_loop (location_t start_locus, tree cond, tree incr, tree body,
tree blab, tree clab, bool cond_is_first)
{
tree entry = NULL, exit = NULL, t;
/* If the condition is zero don't generate a loop construct. */
if (cond && integer_zerop (cond))
{
if (cond_is_first)
{
t = build_and_jump (&blab);
SET_EXPR_LOCATION (t, start_locus);
add_stmt (t);
}
}
else
{
tree top = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
/* If we have an exit condition, then we build an IF with gotos either
out of the loop, or to the top of it. If there's no exit condition,
then we just build a jump back to the top. */
exit = build_and_jump (&LABEL_EXPR_LABEL (top));
if (cond && !integer_nonzerop (cond))
{
/* Canonicalize the loop condition to the end. This means
generating a branch to the loop condition. Reuse the
continue label, if possible. */
if (cond_is_first)
{
if (incr || !clab)
{
entry = build1 (LABEL_EXPR, void_type_node, NULL_TREE);
t = build_and_jump (&LABEL_EXPR_LABEL (entry));
}
else
t = build1 (GOTO_EXPR, void_type_node, clab);
SET_EXPR_LOCATION (t, start_locus);
add_stmt (t);
}
t = build_and_jump (&blab);
if (cond_is_first)
exit = fold_build3_loc (start_locus,
COND_EXPR, void_type_node, cond, exit, t);
else
exit = fold_build3_loc (input_location,
COND_EXPR, void_type_node, cond, exit, t);
}
add_stmt (top);
}
if (body)
add_stmt (body);
if (clab)
add_stmt (build1 (LABEL_EXPR, void_type_node, clab));
if (incr)
add_stmt (incr);
if (entry)
add_stmt (entry);
if (exit)
add_stmt (exit);
if (blab)
add_stmt (build1 (LABEL_EXPR, void_type_node, blab));
}
tree
c_finish_bc_stmt (location_t loc, tree *label_p, bool is_break)
{
bool skip;
tree label = *label_p;
/* In switch statements break is sometimes stylistically used after
a return statement. This can lead to spurious warnings about
control reaching the end of a non-void function when it is
inlined. Note that we are calling block_may_fallthru with
language specific tree nodes; this works because
block_may_fallthru returns true when given something it does not
understand. */
skip = !block_may_fallthru (cur_stmt_list);
if (!label)
{
if (!skip)
*label_p = label = create_artificial_label (loc);
}
else if (TREE_CODE (label) == LABEL_DECL)
;
else switch (TREE_INT_CST_LOW (label))
{
case 0:
if (is_break)
error_at (loc, "break statement not within loop or switch");
else
error_at (loc, "continue statement not within a loop");
return NULL_TREE;
case 1:
gcc_assert (is_break);
error_at (loc, "break statement used with OpenMP for loop");
return NULL_TREE;
default:
gcc_unreachable ();
}
if (skip)
return NULL_TREE;
if (!is_break)
add_stmt (build_predict_expr (PRED_CONTINUE, NOT_TAKEN));
return add_stmt (build1 (GOTO_EXPR, void_type_node, label));
}
/* A helper routine for c_process_expr_stmt and c_finish_stmt_expr. */
static void
emit_side_effect_warnings (location_t loc, tree expr)
{
if (expr == error_mark_node)
;
else if (!TREE_SIDE_EFFECTS (expr))
{
if (!VOID_TYPE_P (TREE_TYPE (expr)) && !TREE_NO_WARNING (expr))
warning_at (loc, OPT_Wunused_value, "statement with no effect");
}
else
warn_if_unused_value (expr, loc);
}
/* Process an expression as if it were a complete statement. Emit
diagnostics, but do not call ADD_STMT. LOC is the location of the
statement. */
tree
c_process_expr_stmt (location_t loc, tree expr)
{
tree exprv;
if (!expr)
return NULL_TREE;
expr = c_fully_fold (expr, false, NULL);
if (warn_sequence_point)
verify_sequence_points (expr);
if (TREE_TYPE (expr) != error_mark_node
&& !COMPLETE_OR_VOID_TYPE_P (TREE_TYPE (expr))
&& TREE_CODE (TREE_TYPE (expr)) != ARRAY_TYPE)
error_at (loc, "expression statement has incomplete type");
/* If we're not processing a statement expression, warn about unused values.
Warnings for statement expressions will be emitted later, once we figure
out which is the result. */
if (!STATEMENT_LIST_STMT_EXPR (cur_stmt_list)
&& warn_unused_value)
emit_side_effect_warnings (loc, expr);
exprv = expr;
while (TREE_CODE (exprv) == COMPOUND_EXPR)
exprv = TREE_OPERAND (exprv, 1);
while (CONVERT_EXPR_P (exprv))
exprv = TREE_OPERAND (exprv, 0);
if (DECL_P (exprv)
|| handled_component_p (exprv)
|| TREE_CODE (exprv) == ADDR_EXPR)
mark_exp_read (exprv);
/* If the expression is not of a type to which we cannot assign a line
number, wrap the thing in a no-op NOP_EXPR. */
if (DECL_P (expr) || CONSTANT_CLASS_P (expr))
{
expr = build1 (NOP_EXPR, TREE_TYPE (expr), expr);
SET_EXPR_LOCATION (expr, loc);
}
return expr;
}
/* Emit an expression as a statement. LOC is the location of the
expression. */
tree
c_finish_expr_stmt (location_t loc, tree expr)
{
if (expr)
return add_stmt (c_process_expr_stmt (loc, expr));
else
return NULL;
}
/* Do the opposite and emit a statement as an expression. To begin,
create a new binding level and return it. */
tree
c_begin_stmt_expr (void)
{
tree ret;
/* We must force a BLOCK for this level so that, if it is not expanded
later, there is a way to turn off the entire subtree of blocks that
are contained in it. */
keep_next_level ();
ret = c_begin_compound_stmt (true);
c_bindings_start_stmt_expr (c_switch_stack == NULL
? NULL
: c_switch_stack->bindings);
/* Mark the current statement list as belonging to a statement list. */
STATEMENT_LIST_STMT_EXPR (ret) = 1;
return ret;
}
/* LOC is the location of the compound statement to which this body
belongs. */
tree
c_finish_stmt_expr (location_t loc, tree body)
{
tree last, type, tmp, val;
tree *last_p;
body = c_end_compound_stmt (loc, body, true);
c_bindings_end_stmt_expr (c_switch_stack == NULL
? NULL
: c_switch_stack->bindings);
/* Locate the last statement in BODY. See c_end_compound_stmt
about always returning a BIND_EXPR. */
last_p = &BIND_EXPR_BODY (body);
last = BIND_EXPR_BODY (body);
continue_searching:
if (TREE_CODE (last) == STATEMENT_LIST)
{
tree_stmt_iterator i;
/* This can happen with degenerate cases like ({ }). No value. */
if (!TREE_SIDE_EFFECTS (last))
return body;
/* If we're supposed to generate side effects warnings, process
all of the statements except the last. */
if (warn_unused_value)
{
for (i = tsi_start (last); !tsi_one_before_end_p (i); tsi_next (&i))
{
location_t tloc;
tree t = tsi_stmt (i);
tloc = EXPR_HAS_LOCATION (t) ? EXPR_LOCATION (t) : loc;
emit_side_effect_warnings (tloc, t);
}
}
else
i = tsi_last (last);
last_p = tsi_stmt_ptr (i);
last = *last_p;
}
/* If the end of the list is exception related, then the list was split
by a call to push_cleanup. Continue searching. */
if (TREE_CODE (last) == TRY_FINALLY_EXPR
|| TREE_CODE (last) == TRY_CATCH_EXPR)
{
last_p = &TREE_OPERAND (last, 0);
last = *last_p;
goto continue_searching;
}
if (last == error_mark_node)
return last;
/* In the case that the BIND_EXPR is not necessary, return the
expression out from inside it. */
if (last == BIND_EXPR_BODY (body)
&& BIND_EXPR_VARS (body) == NULL)
{
/* Even if this looks constant, do not allow it in a constant
expression. */
last = c_wrap_maybe_const (last, true);
/* Do not warn if the return value of a statement expression is
unused. */
TREE_NO_WARNING (last) = 1;
return last;
}
/* Extract the type of said expression. */
type = TREE_TYPE (last);
/* If we're not returning a value at all, then the BIND_EXPR that
we already have is a fine expression to return. */
if (!type || VOID_TYPE_P (type))
return body;
/* Now that we've located the expression containing the value, it seems
silly to make voidify_wrapper_expr repeat the process. Create a
temporary of the appropriate type and stick it in a TARGET_EXPR. */
tmp = create_tmp_var_raw (type, NULL);
/* Unwrap a no-op NOP_EXPR as added by c_finish_expr_stmt. This avoids
tree_expr_nonnegative_p giving up immediately. */
val = last;
if (TREE_CODE (val) == NOP_EXPR
&& TREE_TYPE (val) == TREE_TYPE (TREE_OPERAND (val, 0)))
val = TREE_OPERAND (val, 0);
*last_p = build2 (MODIFY_EXPR, void_type_node, tmp, val);
SET_EXPR_LOCATION (*last_p, EXPR_LOCATION (last));
{
tree t = build4 (TARGET_EXPR, type, tmp, body, NULL_TREE, NULL_TREE);
SET_EXPR_LOCATION (t, loc);
return t;
}
}
/* Begin and end compound statements. This is as simple as pushing
and popping new statement lists from the tree. */
tree
c_begin_compound_stmt (bool do_scope)
{
tree stmt = push_stmt_list ();
if (do_scope)
push_scope ();
return stmt;
}
/* End a compound statement. STMT is the statement. LOC is the
location of the compound statement-- this is usually the location
of the opening brace. */
tree
c_end_compound_stmt (location_t loc, tree stmt, bool do_scope)
{
tree block = NULL;
if (do_scope)
{
if (c_dialect_objc ())
objc_clear_super_receiver ();
block = pop_scope ();
}
stmt = pop_stmt_list (stmt);
stmt = c_build_bind_expr (loc, block, stmt);
/* If this compound statement is nested immediately inside a statement
expression, then force a BIND_EXPR to be created. Otherwise we'll
do the wrong thing for ({ { 1; } }) or ({ 1; { } }). In particular,
STATEMENT_LISTs merge, and thus we can lose track of what statement
was really last. */
if (building_stmt_list_p ()
&& STATEMENT_LIST_STMT_EXPR (cur_stmt_list)
&& TREE_CODE (stmt) != BIND_EXPR)
{
stmt = build3 (BIND_EXPR, void_type_node, NULL, stmt, NULL);
TREE_SIDE_EFFECTS (stmt) = 1;
SET_EXPR_LOCATION (stmt, loc);
}
return stmt;
}
/* Queue a cleanup. CLEANUP is an expression/statement to be executed
when the current scope is exited. EH_ONLY is true when this is not
meant to apply to normal control flow transfer. */
void
push_cleanup (tree decl, tree cleanup, bool eh_only)
{
enum tree_code code;
tree stmt, list;
bool stmt_expr;
code = eh_only ? TRY_CATCH_EXPR : TRY_FINALLY_EXPR;
stmt = build_stmt (DECL_SOURCE_LOCATION (decl), code, NULL, cleanup);
add_stmt (stmt);
stmt_expr = STATEMENT_LIST_STMT_EXPR (cur_stmt_list);
list = push_stmt_list ();
TREE_OPERAND (stmt, 0) = list;
STATEMENT_LIST_STMT_EXPR (list) = stmt_expr;
}
/* Convert scalar to vector for the range of operations. */
static enum stv_conv
scalar_to_vector (location_t loc, enum tree_code code, tree op0, tree op1)
{
tree type0 = TREE_TYPE (op0);
tree type1 = TREE_TYPE (op1);
bool integer_only_op = false;
enum stv_conv ret = stv_firstarg;
gcc_assert (TREE_CODE (type0) == VECTOR_TYPE
|| TREE_CODE (type1) == VECTOR_TYPE);
switch (code)
{
case RSHIFT_EXPR:
case LSHIFT_EXPR:
if (TREE_CODE (type0) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE)
{
if (unsafe_conversion_p (TREE_TYPE (type1), op0, false))
{
error_at (loc, "conversion of scalar to vector "
"involves truncation");
return stv_error;
}
else
return stv_firstarg;
}
break;
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
case BIT_AND_EXPR:
integer_only_op = true;
/* ... fall through ... */
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case TRUNC_MOD_EXPR:
case RDIV_EXPR:
if (TREE_CODE (type0) == VECTOR_TYPE)
{
tree tmp;
ret = stv_secondarg;
/* Swap TYPE0 with TYPE1 and OP0 with OP1 */
tmp = type0; type0 = type1; type1 = tmp;
tmp = op0; op0 = op1; op1 = tmp;
}
if (TREE_CODE (type0) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE)
{
if (unsafe_conversion_p (TREE_TYPE (type1), op0, false))
{
error_at (loc, "conversion of scalar to vector "
"involves truncation");
return stv_error;
}
return ret;
}
else if (!integer_only_op
/* Allow integer --> real conversion if safe. */
&& (TREE_CODE (type0) == REAL_TYPE
|| TREE_CODE (type0) == INTEGER_TYPE)
&& SCALAR_FLOAT_TYPE_P (TREE_TYPE (type1)))
{
if (unsafe_conversion_p (TREE_TYPE (type1), op0, false))
{
error_at (loc, "conversion of scalar to vector "
"involves truncation");
return stv_error;
}
return ret;
}
default:
break;
}
return stv_nothing;
}
/* Build a binary-operation expression without default conversions.
CODE is the kind of expression to build.
LOCATION is the operator's location.
This function differs from `build' in several ways:
the data type of the result is computed and recorded in it,
warnings are generated if arg data types are invalid,
special handling for addition and subtraction of pointers is known,
and some optimization is done (operations on narrow ints
are done in the narrower type when that gives the same result).
Constant folding is also done before the result is returned.
Note that the operands will never have enumeral types, or function
or array types, because either they will have the default conversions
performed or they have both just been converted to some other type in which
the arithmetic is to be done. */
tree
build_binary_op (location_t location, enum tree_code code,
tree orig_op0, tree orig_op1, int convert_p)
{
tree type0, type1, orig_type0, orig_type1;
tree eptype;
enum tree_code code0, code1;
tree op0, op1;
tree ret = error_mark_node;
const char *invalid_op_diag;
bool op0_int_operands, op1_int_operands;
bool int_const, int_const_or_overflow, int_operands;
/* Expression code to give to the expression when it is built.
Normally this is CODE, which is what the caller asked for,
but in some special cases we change it. */
enum tree_code resultcode = code;
/* Data type in which the computation is to be performed.
In the simplest cases this is the common type of the arguments. */
tree result_type = NULL;
/* When the computation is in excess precision, the type of the
final EXCESS_PRECISION_EXPR. */
tree semantic_result_type = NULL;
/* Nonzero means operands have already been type-converted
in whatever way is necessary.
Zero means they need to be converted to RESULT_TYPE. */
int converted = 0;
/* Nonzero means create the expression with this type, rather than
RESULT_TYPE. */
tree build_type = 0;
/* Nonzero means after finally constructing the expression
convert it to this type. */
tree final_type = 0;
/* Nonzero if this is an operation like MIN or MAX which can
safely be computed in short if both args are promoted shorts.
Also implies COMMON.
-1 indicates a bitwise operation; this makes a difference
in the exact conditions for when it is safe to do the operation
in a narrower mode. */
int shorten = 0;
/* Nonzero if this is a comparison operation;
if both args are promoted shorts, compare the original shorts.
Also implies COMMON. */
int short_compare = 0;
/* Nonzero if this is a right-shift operation, which can be computed on the
original short and then promoted if the operand is a promoted short. */
int short_shift = 0;
/* Nonzero means set RESULT_TYPE to the common type of the args. */
int common = 0;
/* True means types are compatible as far as ObjC is concerned. */
bool objc_ok;
/* True means this is an arithmetic operation that may need excess
precision. */
bool may_need_excess_precision;
/* True means this is a boolean operation that converts both its
operands to truth-values. */
bool boolean_op = false;
if (location == UNKNOWN_LOCATION)
location = input_location;
op0 = orig_op0;
op1 = orig_op1;
op0_int_operands = EXPR_INT_CONST_OPERANDS (orig_op0);
if (op0_int_operands)
op0 = remove_c_maybe_const_expr (op0);
op1_int_operands = EXPR_INT_CONST_OPERANDS (orig_op1);
if (op1_int_operands)
op1 = remove_c_maybe_const_expr (op1);
int_operands = (op0_int_operands && op1_int_operands);
if (int_operands)
{
int_const_or_overflow = (TREE_CODE (orig_op0) == INTEGER_CST
&& TREE_CODE (orig_op1) == INTEGER_CST);
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& !TREE_OVERFLOW (orig_op1));
}
else
int_const = int_const_or_overflow = false;
/* Do not apply default conversion in mixed vector/scalar expression. */
if (convert_p
&& !((TREE_CODE (TREE_TYPE (op0)) == VECTOR_TYPE)
!= (TREE_CODE (TREE_TYPE (op1)) == VECTOR_TYPE)))
{
op0 = default_conversion (op0);
op1 = default_conversion (op1);
}
orig_type0 = type0 = TREE_TYPE (op0);
orig_type1 = type1 = TREE_TYPE (op1);
/* The expression codes of the data types of the arguments tell us
whether the arguments are integers, floating, pointers, etc. */
code0 = TREE_CODE (type0);
code1 = TREE_CODE (type1);
/* Strip NON_LVALUE_EXPRs, etc., since we aren't using as an lvalue. */
STRIP_TYPE_NOPS (op0);
STRIP_TYPE_NOPS (op1);
/* If an error was already reported for one of the arguments,
avoid reporting another error. */
if (code0 == ERROR_MARK || code1 == ERROR_MARK)
return error_mark_node;
if ((invalid_op_diag
= targetm.invalid_binary_op (code, type0, type1)))
{
error_at (location, invalid_op_diag);
return error_mark_node;
}
switch (code)
{
case PLUS_EXPR:
case MINUS_EXPR:
case MULT_EXPR:
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
may_need_excess_precision = true;
break;
default:
may_need_excess_precision = false;
break;
}
if (TREE_CODE (op0) == EXCESS_PRECISION_EXPR)
{
op0 = TREE_OPERAND (op0, 0);
type0 = TREE_TYPE (op0);
}
else if (may_need_excess_precision
&& (eptype = excess_precision_type (type0)) != NULL_TREE)
{
type0 = eptype;
op0 = convert (eptype, op0);
}
if (TREE_CODE (op1) == EXCESS_PRECISION_EXPR)
{
op1 = TREE_OPERAND (op1, 0);
type1 = TREE_TYPE (op1);
}
else if (may_need_excess_precision
&& (eptype = excess_precision_type (type1)) != NULL_TREE)
{
type1 = eptype;
op1 = convert (eptype, op1);
}
objc_ok = objc_compare_types (type0, type1, -3, NULL_TREE);
/* In case when one of the operands of the binary operation is
a vector and another is a scalar -- convert scalar to vector. */
if ((code0 == VECTOR_TYPE) != (code1 == VECTOR_TYPE))
{
enum stv_conv convert_flag = scalar_to_vector (location, code, op0, op1);
switch (convert_flag)
{
case stv_error:
return error_mark_node;
case stv_firstarg:
{
bool maybe_const = true;
tree sc;
sc = c_fully_fold (op0, false, &maybe_const);
sc = save_expr (sc);
sc = convert (TREE_TYPE (type1), sc);
op0 = build_vector_from_val (type1, sc);
if (!maybe_const)
op0 = c_wrap_maybe_const (op0, true);
orig_type0 = type0 = TREE_TYPE (op0);
code0 = TREE_CODE (type0);
converted = 1;
break;
}
case stv_secondarg:
{
bool maybe_const = true;
tree sc;
sc = c_fully_fold (op1, false, &maybe_const);
sc = save_expr (sc);
sc = convert (TREE_TYPE (type0), sc);
op1 = build_vector_from_val (type0, sc);
if (!maybe_const)
op0 = c_wrap_maybe_const (op1, true);
orig_type1 = type1 = TREE_TYPE (op1);
code1 = TREE_CODE (type1);
converted = 1;
break;
}
default:
break;
}
}
switch (code)
{
case PLUS_EXPR:
/* Handle the pointer + int case. */
if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, PLUS_EXPR, op0, op1);
goto return_build_binary_op;
}
else if (code1 == POINTER_TYPE && code0 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, PLUS_EXPR, op1, op0);
goto return_build_binary_op;
}
else
common = 1;
break;
case MINUS_EXPR:
/* Subtraction of two similar pointers.
We must subtract them as integers, then divide by object size. */
if (code0 == POINTER_TYPE && code1 == POINTER_TYPE
&& comp_target_types (location, type0, type1))
{
ret = pointer_diff (location, op0, op1);
goto return_build_binary_op;
}
/* Handle pointer minus int. Just like pointer plus int. */
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
ret = pointer_int_sum (location, MINUS_EXPR, op0, op1);
goto return_build_binary_op;
}
else
common = 1;
break;
case MULT_EXPR:
common = 1;
break;
case TRUNC_DIV_EXPR:
case CEIL_DIV_EXPR:
case FLOOR_DIV_EXPR:
case ROUND_DIV_EXPR:
case EXACT_DIV_EXPR:
warn_for_div_by_zero (location, op1);
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE
|| code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE
|| code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE))
{
enum tree_code tcode0 = code0, tcode1 = code1;
if (code0 == COMPLEX_TYPE || code0 == VECTOR_TYPE)
tcode0 = TREE_CODE (TREE_TYPE (TREE_TYPE (op0)));
if (code1 == COMPLEX_TYPE || code1 == VECTOR_TYPE)
tcode1 = TREE_CODE (TREE_TYPE (TREE_TYPE (op1)));
if (!((tcode0 == INTEGER_TYPE && tcode1 == INTEGER_TYPE)
|| (tcode0 == FIXED_POINT_TYPE && tcode1 == FIXED_POINT_TYPE)))
resultcode = RDIV_EXPR;
else
/* Although it would be tempting to shorten always here, that
loses on some targets, since the modulo instruction is
undefined if the quotient can't be represented in the
computation mode. We shorten only if unsigned or if
dividing by something we know != -1. */
shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0))
|| (TREE_CODE (op1) == INTEGER_CST
&& !integer_all_onesp (op1)));
common = 1;
}
break;
case BIT_AND_EXPR:
case BIT_IOR_EXPR:
case BIT_XOR_EXPR:
if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
shorten = -1;
/* Allow vector types which are not floating point types. */
else if (code0 == VECTOR_TYPE
&& code1 == VECTOR_TYPE
&& !VECTOR_FLOAT_TYPE_P (type0)
&& !VECTOR_FLOAT_TYPE_P (type1))
common = 1;
break;
case TRUNC_MOD_EXPR:
case FLOOR_MOD_EXPR:
warn_for_div_by_zero (location, op1);
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE)
common = 1;
else if (code0 == INTEGER_TYPE && code1 == INTEGER_TYPE)
{
/* Although it would be tempting to shorten always here, that loses
on some targets, since the modulo instruction is undefined if the
quotient can't be represented in the computation mode. We shorten
only if unsigned or if dividing by something we know != -1. */
shorten = (TYPE_UNSIGNED (TREE_TYPE (orig_op0))
|| (TREE_CODE (op1) == INTEGER_CST
&& !integer_all_onesp (op1)));
common = 1;
}
break;
case TRUTH_ANDIF_EXPR:
case TRUTH_ORIF_EXPR:
case TRUTH_AND_EXPR:
case TRUTH_OR_EXPR:
case TRUTH_XOR_EXPR:
if ((code0 == INTEGER_TYPE || code0 == POINTER_TYPE
|| code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == FIXED_POINT_TYPE)
&& (code1 == INTEGER_TYPE || code1 == POINTER_TYPE
|| code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE))
{
/* Result of these operations is always an int,
but that does not mean the operands should be
converted to ints! */
result_type = integer_type_node;
op0 = c_common_truthvalue_conversion (location, op0);
op1 = c_common_truthvalue_conversion (location, op1);
converted = 1;
boolean_op = true;
}
if (code == TRUTH_ANDIF_EXPR)
{
int_const_or_overflow = (int_operands
&& TREE_CODE (orig_op0) == INTEGER_CST
&& (op0 == truthvalue_false_node
|| TREE_CODE (orig_op1) == INTEGER_CST));
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& (op0 == truthvalue_false_node
|| !TREE_OVERFLOW (orig_op1)));
}
else if (code == TRUTH_ORIF_EXPR)
{
int_const_or_overflow = (int_operands
&& TREE_CODE (orig_op0) == INTEGER_CST
&& (op0 == truthvalue_true_node
|| TREE_CODE (orig_op1) == INTEGER_CST));
int_const = (int_const_or_overflow
&& !TREE_OVERFLOW (orig_op0)
&& (op0 == truthvalue_true_node
|| !TREE_OVERFLOW (orig_op1)));
}
break;
/* Shift operations: result has same type as first operand;
always convert second operand to int.
Also set SHORT_SHIFT if shifting rightward. */
case RSHIFT_EXPR:
if (code0 == VECTOR_TYPE && code1 == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE)
{
result_type = type0;
converted = 1;
}
else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
&& TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
{
result_type = type0;
converted = 1;
}
else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE)
&& code1 == INTEGER_TYPE)
{
if (TREE_CODE (op1) == INTEGER_CST)
{
if (tree_int_cst_sgn (op1) < 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning (0, "right shift count is negative");
}
else
{
if (!integer_zerop (op1))
short_shift = 1;
if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning (0, "right shift count >= width of type");
}
}
}
/* Use the type of the value to be shifted. */
result_type = type0;
/* Convert the non vector shift-count to an integer, regardless
of size of value being shifted. */
if (TREE_CODE (TREE_TYPE (op1)) != VECTOR_TYPE
&& TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
op1 = convert (integer_type_node, op1);
/* Avoid converting op1 to result_type later. */
converted = 1;
}
break;
case LSHIFT_EXPR:
if (code0 == VECTOR_TYPE && code1 == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE)
{
result_type = type0;
converted = 1;
}
else if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& TREE_CODE (TREE_TYPE (type0)) == INTEGER_TYPE
&& TREE_CODE (TREE_TYPE (type1)) == INTEGER_TYPE
&& TYPE_VECTOR_SUBPARTS (type0) == TYPE_VECTOR_SUBPARTS (type1))
{
result_type = type0;
converted = 1;
}
else if ((code0 == INTEGER_TYPE || code0 == FIXED_POINT_TYPE)
&& code1 == INTEGER_TYPE)
{
if (TREE_CODE (op1) == INTEGER_CST)
{
if (tree_int_cst_sgn (op1) < 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning (0, "left shift count is negative");
}
else if (compare_tree_int (op1, TYPE_PRECISION (type0)) >= 0)
{
int_const = false;
if (c_inhibit_evaluation_warnings == 0)
warning (0, "left shift count >= width of type");
}
}
/* Use the type of the value to be shifted. */
result_type = type0;
/* Convert the non vector shift-count to an integer, regardless
of size of value being shifted. */
if (TREE_CODE (TREE_TYPE (op1)) != VECTOR_TYPE
&& TYPE_MAIN_VARIANT (TREE_TYPE (op1)) != integer_type_node)
op1 = convert (integer_type_node, op1);
/* Avoid converting op1 to result_type later. */
converted = 1;
}
break;
case EQ_EXPR:
case NE_EXPR:
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE)
{
tree intt;
if (TREE_TYPE (type0) != TREE_TYPE (type1))
{
error_at (location, "comparing vectors with different "
"element types");
return error_mark_node;
}
if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
(TYPE_MODE (TREE_TYPE (type0))), 0);
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
break;
}
if (FLOAT_TYPE_P (type0) || FLOAT_TYPE_P (type1))
warning_at (location,
OPT_Wfloat_equal,
"comparing floating point with == or != is unsafe");
/* Result of comparison is always int,
but don't convert the args to int! */
build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE || code0 == COMPLEX_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == COMPLEX_TYPE))
short_compare = 1;
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
if (TREE_CODE (op0) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op0, 0)))
{
if (code == EQ_EXPR)
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<false%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
else
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<true%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op0, 0));
}
result_type = type0;
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
if (TREE_CODE (op1) == ADDR_EXPR
&& decl_with_nonnull_addr_p (TREE_OPERAND (op1, 0)))
{
if (code == EQ_EXPR)
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<false%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
else
warning_at (location,
OPT_Waddress,
"the comparison will always evaluate as %<true%> "
"for the address of %qD will never be NULL",
TREE_OPERAND (op1, 0));
}
result_type = type1;
}
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{
tree tt0 = TREE_TYPE (type0);
tree tt1 = TREE_TYPE (type1);
addr_space_t as0 = TYPE_ADDR_SPACE (tt0);
addr_space_t as1 = TYPE_ADDR_SPACE (tt1);
addr_space_t as_common = ADDR_SPACE_GENERIC;
/* Anything compares with void *. void * compares with anything.
Otherwise, the targets must be compatible
and both must be object or both incomplete. */
if (comp_target_types (location, type0, type1))
result_type = common_pointer_type (type0, type1);
else if (!addr_space_superset (as0, as1, &as_common))
{
error_at (location, "comparison of pointers to "
"disjoint address spaces");
return error_mark_node;
}
else if (VOID_TYPE_P (tt0))
{
if (pedantic && TREE_CODE (tt1) == FUNCTION_TYPE)
pedwarn (location, OPT_pedantic, "ISO C forbids "
"comparison of %<void *%> with function pointer");
}
else if (VOID_TYPE_P (tt1))
{
if (pedantic && TREE_CODE (tt0) == FUNCTION_TYPE)
pedwarn (location, OPT_pedantic, "ISO C forbids "
"comparison of %<void *%> with function pointer");
}
else
/* Avoid warning about the volatile ObjC EH puts on decls. */
if (!objc_ok)
pedwarn (location, 0,
"comparison of distinct pointer types lacks a cast");
if (result_type == NULL_TREE)
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
}
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
result_type = type0;
pedwarn (location, 0, "comparison between pointer and integer");
}
else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
{
result_type = type1;
pedwarn (location, 0, "comparison between pointer and integer");
}
break;
case LE_EXPR:
case GE_EXPR:
case LT_EXPR:
case GT_EXPR:
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE)
{
tree intt;
if (TREE_TYPE (type0) != TREE_TYPE (type1))
{
error_at (location, "comparing vectors with different "
"element types");
return error_mark_node;
}
if (TYPE_VECTOR_SUBPARTS (type0) != TYPE_VECTOR_SUBPARTS (type1))
{
error_at (location, "comparing vectors with different "
"number of elements");
return error_mark_node;
}
/* Always construct signed integer vector type. */
intt = c_common_type_for_size (GET_MODE_BITSIZE
(TYPE_MODE (TREE_TYPE (type0))), 0);
result_type = build_opaque_vector_type (intt,
TYPE_VECTOR_SUBPARTS (type0));
converted = 1;
break;
}
build_type = integer_type_node;
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE
|| code0 == FIXED_POINT_TYPE)
&& (code1 == INTEGER_TYPE || code1 == REAL_TYPE
|| code1 == FIXED_POINT_TYPE))
short_compare = 1;
else if (code0 == POINTER_TYPE && code1 == POINTER_TYPE)
{
addr_space_t as0 = TYPE_ADDR_SPACE (TREE_TYPE (type0));
addr_space_t as1 = TYPE_ADDR_SPACE (TREE_TYPE (type1));
addr_space_t as_common;
if (comp_target_types (location, type0, type1))
{
result_type = common_pointer_type (type0, type1);
if (!COMPLETE_TYPE_P (TREE_TYPE (type0))
!= !COMPLETE_TYPE_P (TREE_TYPE (type1)))
pedwarn (location, 0,
"comparison of complete and incomplete pointers");
else if (TREE_CODE (TREE_TYPE (type0)) == FUNCTION_TYPE)
pedwarn (location, OPT_pedantic, "ISO C forbids "
"ordered comparisons of pointers to functions");
else if (null_pointer_constant_p (orig_op0)
|| null_pointer_constant_p (orig_op1))
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with null pointer");
}
else if (!addr_space_superset (as0, as1, &as_common))
{
error_at (location, "comparison of pointers to "
"disjoint address spaces");
return error_mark_node;
}
else
{
int qual = ENCODE_QUAL_ADDR_SPACE (as_common);
result_type = build_pointer_type
(build_qualified_type (void_type_node, qual));
pedwarn (location, 0,
"comparison of distinct pointer types lacks a cast");
}
}
else if (code0 == POINTER_TYPE && null_pointer_constant_p (orig_op1))
{
result_type = type0;
if (pedantic)
pedwarn (location, OPT_pedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with integer zero");
}
else if (code1 == POINTER_TYPE && null_pointer_constant_p (orig_op0))
{
result_type = type1;
if (pedantic)
pedwarn (location, OPT_pedantic,
"ordered comparison of pointer with integer zero");
else if (extra_warnings)
warning_at (location, OPT_Wextra,
"ordered comparison of pointer with integer zero");
}
else if (code0 == POINTER_TYPE && code1 == INTEGER_TYPE)
{
result_type = type0;
pedwarn (location, 0, "comparison between pointer and integer");
}
else if (code0 == INTEGER_TYPE && code1 == POINTER_TYPE)
{
result_type = type1;
pedwarn (location, 0, "comparison between pointer and integer");
}
break;
default:
gcc_unreachable ();
}
if (code0 == ERROR_MARK || code1 == ERROR_MARK)
return error_mark_node;
if (code0 == VECTOR_TYPE && code1 == VECTOR_TYPE
&& (!tree_int_cst_equal (TYPE_SIZE (type0), TYPE_SIZE (type1))
|| !same_scalar_type_ignoring_signedness (TREE_TYPE (type0),
TREE_TYPE (type1))))
{
binary_op_error (location, code, type0, type1);
return error_mark_node;
}
if ((code0 == INTEGER_TYPE || code0 == REAL_TYPE || code0 == COMPLEX_TYPE
|| code0 == FIXED_POINT_TYPE || code0 == VECTOR_TYPE)
&&
(code1 == INTEGER_TYPE || code1 == REAL_TYPE || code1 == COMPLEX_TYPE
|| code1 == FIXED_POINT_TYPE || code1 == VECTOR_TYPE))
{
bool first_complex = (code0 == COMPLEX_TYPE);
bool second_complex = (code1 == COMPLEX_TYPE);
int none_complex = (!first_complex && !second_complex);
if (shorten || common || short_compare)
{
result_type = c_common_type (type0, type1);
do_warn_double_promotion (result_type, type0, type1,
"implicit conversion from %qT to %qT "
"to match other operand of binary "
"expression",
location);
if (result_type == error_mark_node)
return error_mark_node;
}
if (first_complex != second_complex
&& (code == PLUS_EXPR
|| code == MINUS_EXPR
|| code == MULT_EXPR
|| (code == TRUNC_DIV_EXPR && first_complex))
&& TREE_CODE (TREE_TYPE (result_type)) == REAL_TYPE
&& flag_signed_zeros)
{
/* An operation on mixed real/complex operands must be
handled specially, but the language-independent code can
more easily optimize the plain complex arithmetic if
-fno-signed-zeros. */
tree real_type = TREE_TYPE (result_type);
tree real, imag;
if (type0 != orig_type0 || type1 != orig_type1)
{
gcc_assert (may_need_excess_precision && common);
semantic_result_type = c_common_type (orig_type0, orig_type1);
}
if (first_complex)
{
if (TREE_TYPE (op0) != result_type)
op0 = convert_and_check (result_type, op0);
if (TREE_TYPE (op1) != real_type)
op1 = convert_and_check (real_type, op1);
}
else
{
if (TREE_TYPE (op0) != real_type)
op0 = convert_and_check (real_type, op0);
if (TREE_TYPE (op1) != result_type)
op1 = convert_and_check (result_type, op1);
}
if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK)
return error_mark_node;
if (first_complex)
{
op0 = c_save_expr (op0);
real = build_unary_op (EXPR_LOCATION (orig_op0), REALPART_EXPR,
op0, 1);
imag = build_unary_op (EXPR_LOCATION (orig_op0), IMAGPART_EXPR,
op0, 1);
switch (code)
{
case MULT_EXPR:
case TRUNC_DIV_EXPR:
op1 = c_save_expr (op1);
imag = build2 (resultcode, real_type, imag, op1);
/* Fall through. */
case PLUS_EXPR:
case MINUS_EXPR:
real = build2 (resultcode, real_type, real, op1);
break;
default:
gcc_unreachable();
}
}
else
{
op1 = c_save_expr (op1);
real = build_unary_op (EXPR_LOCATION (orig_op1), REALPART_EXPR,
op1, 1);
imag = build_unary_op (EXPR_LOCATION (orig_op1), IMAGPART_EXPR,
op1, 1);
switch (code)
{
case MULT_EXPR:
op0 = c_save_expr (op0);
imag = build2 (resultcode, real_type, op0, imag);
/* Fall through. */
case PLUS_EXPR:
real = build2 (resultcode, real_type, op0, real);
break;
case MINUS_EXPR:
real = build2 (resultcode, real_type, op0, real);
imag = build1 (NEGATE_EXPR, real_type, imag);
break;
default:
gcc_unreachable();
}
}
ret = build2 (COMPLEX_EXPR, result_type, real, imag);
goto return_build_binary_op;
}
/* For certain operations (which identify themselves by shorten != 0)
if both args were extended from the same smaller type,
do the arithmetic in that type and then extend.
shorten !=0 and !=1 indicates a bitwise operation.
For them, this optimization is safe only if
both args are zero-extended or both are sign-extended.
Otherwise, we might change the result.
Eg, (short)-1 | (unsigned short)-1 is (int)-1
but calculated in (unsigned short) it would be (unsigned short)-1. */
if (shorten && none_complex)
{
final_type = result_type;
result_type = shorten_binary_op (result_type, op0, op1,
shorten == -1);
}
/* Shifts can be shortened if shifting right. */
if (short_shift)
{
int unsigned_arg;
tree arg0 = get_narrower (op0, &unsigned_arg);
final_type = result_type;
if (arg0 == op0 && final_type == TREE_TYPE (op0))
unsigned_arg = TYPE_UNSIGNED (TREE_TYPE (op0));
if (TYPE_PRECISION (TREE_TYPE (arg0)) < TYPE_PRECISION (result_type)
&& tree_int_cst_sgn (op1) > 0
/* We can shorten only if the shift count is less than the
number of bits in the smaller type size. */
&& compare_tree_int (op1, TYPE_PRECISION (TREE_TYPE (arg0))) < 0
/* We cannot drop an unsigned shift after sign-extension. */
&& (!TYPE_UNSIGNED (final_type) || unsigned_arg))
{
/* Do an unsigned shift if the operand was zero-extended. */
result_type
= c_common_signed_or_unsigned_type (unsigned_arg,
TREE_TYPE (arg0));
/* Convert value-to-be-shifted to that type. */
if (TREE_TYPE (op0) != result_type)
op0 = convert (result_type, op0);
converted = 1;
}
}
/* Comparison operations are shortened too but differently.
They identify themselves by setting short_compare = 1. */
if (short_compare)
{
/* Don't write &op0, etc., because that would prevent op0
from being kept in a register.
Instead, make copies of the our local variables and
pass the copies by reference, then copy them back afterward. */
tree xop0 = op0, xop1 = op1, xresult_type = result_type;
enum tree_code xresultcode = resultcode;
tree val
= shorten_compare (&xop0, &xop1, &xresult_type, &xresultcode);
if (val != 0)
{
ret = val;
goto return_build_binary_op;
}
op0 = xop0, op1 = xop1;
converted = 1;
resultcode = xresultcode;
if (c_inhibit_evaluation_warnings == 0)
{
bool op0_maybe_const = true;
bool op1_maybe_const = true;
tree orig_op0_folded, orig_op1_folded;
if (in_late_binary_op)
{
orig_op0_folded = orig_op0;
orig_op1_folded = orig_op1;
}
else
{
/* Fold for the sake of possible warnings, as in
build_conditional_expr. This requires the
"original" values to be folded, not just op0 and
op1. */
c_inhibit_evaluation_warnings++;
op0 = c_fully_fold (op0, require_constant_value,
&op0_maybe_const);
op1 = c_fully_fold (op1, require_constant_value,
&op1_maybe_const);
c_inhibit_evaluation_warnings--;
orig_op0_folded = c_fully_fold (orig_op0,
require_constant_value,
NULL);
orig_op1_folded = c_fully_fold (orig_op1,
require_constant_value,
NULL);
}
if (warn_sign_compare)
warn_for_sign_compare (location, orig_op0_folded,
orig_op1_folded, op0, op1,
result_type, resultcode);
if (!in_late_binary_op && !int_operands)
{
if (!op0_maybe_const || TREE_CODE (op0) != INTEGER_CST)
op0 = c_wrap_maybe_const (op0, !op0_maybe_const);
if (!op1_maybe_const || TREE_CODE (op1) != INTEGER_CST)
op1 = c_wrap_maybe_const (op1, !op1_maybe_const);
}
}
}
}
/* At this point, RESULT_TYPE must be nonzero to avoid an error message.
If CONVERTED is zero, both args will be converted to type RESULT_TYPE.
Then the expression will be built.
It will be given type FINAL_TYPE if that is nonzero;
otherwise, it will be given type RESULT_TYPE. */
if (!result_type)
{
binary_op_error (location, code, TREE_TYPE (op0), TREE_TYPE (op1));
return error_mark_node;
}
if (build_type == NULL_TREE)
{
build_type = result_type;
if ((type0 != orig_type0 || type1 != orig_type1)
&& !boolean_op)
{
gcc_assert (may_need_excess_precision && common);
semantic_result_type = c_common_type (orig_type0, orig_type1);
}
}
if (!converted)
{
op0 = ep_convert_and_check (result_type, op0, semantic_result_type);
op1 = ep_convert_and_check (result_type, op1, semantic_result_type);
/* This can happen if one operand has a vector type, and the other
has a different type. */
if (TREE_CODE (op0) == ERROR_MARK || TREE_CODE (op1) == ERROR_MARK)
return error_mark_node;
}
/* Treat expressions in initializers specially as they can't trap. */
if (int_const_or_overflow)
ret = (require_constant_value
? fold_build2_initializer_loc (location, resultcode, build_type,
op0, op1)
: fold_build2_loc (location, resultcode, build_type, op0, op1));
else
ret = build2 (resultcode, build_type, op0, op1);
if (final_type != 0)
ret = convert (final_type, ret);
return_build_binary_op:
gcc_assert (ret != error_mark_node);
if (TREE_CODE (ret) == INTEGER_CST && !TREE_OVERFLOW (ret) && !int_const)
ret = (int_operands
? note_integer_operands (ret)
: build1 (NOP_EXPR, TREE_TYPE (ret), ret));
else if (TREE_CODE (ret) != INTEGER_CST && int_operands
&& !in_late_binary_op)
ret = note_integer_operands (ret);
if (semantic_result_type)
ret = build1 (EXCESS_PRECISION_EXPR, semantic_result_type, ret);
protected_set_expr_location (ret, location);
return ret;
}
/* Convert EXPR to be a truth-value, validating its type for this
purpose. LOCATION is the source location for the expression. */
tree
c_objc_common_truthvalue_conversion (location_t location, tree expr)
{
bool int_const, int_operands;
switch (TREE_CODE (TREE_TYPE (expr)))
{
case ARRAY_TYPE:
error_at (location, "used array that cannot be converted to pointer where scalar is required");
return error_mark_node;
case RECORD_TYPE:
error_at (location, "used struct type value where scalar is required");
return error_mark_node;
case UNION_TYPE:
error_at (location, "used union type value where scalar is required");
return error_mark_node;
case VOID_TYPE:
error_at (location, "void value not ignored as it ought to be");
return error_mark_node;
case FUNCTION_TYPE:
gcc_unreachable ();
case VECTOR_TYPE:
error_at (location, "used vector type where scalar is required");
return error_mark_node;
default:
break;
}
int_const = (TREE_CODE (expr) == INTEGER_CST && !TREE_OVERFLOW (expr));
int_operands = EXPR_INT_CONST_OPERANDS (expr);
if (int_operands)
expr = remove_c_maybe_const_expr (expr);
/* ??? Should we also give an error for vectors rather than leaving
those to give errors later? */
expr = c_common_truthvalue_conversion (location, expr);
if (TREE_CODE (expr) == INTEGER_CST && int_operands && !int_const)
{
if (TREE_OVERFLOW (expr))
return expr;
else
return note_integer_operands (expr);
}
if (TREE_CODE (expr) == INTEGER_CST && !int_const)
return build1 (NOP_EXPR, TREE_TYPE (expr), expr);
return expr;
}
/* Convert EXPR to a contained DECL, updating *TC, *TI and *SE as
required. */
tree
c_expr_to_decl (tree expr, bool *tc ATTRIBUTE_UNUSED, bool *se)
{
if (TREE_CODE (expr) == COMPOUND_LITERAL_EXPR)
{
tree decl = COMPOUND_LITERAL_EXPR_DECL (expr);
/* Executing a compound literal inside a function reinitializes
it. */
if (!TREE_STATIC (decl))
*se = true;
return decl;
}
else
return expr;
}
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_parallel (void)
{
tree block;
keep_next_level ();
block = c_begin_compound_stmt (true);
return block;
}
/* Generate OMP_PARALLEL, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the OMP_PARALLEL. */
tree
c_finish_omp_parallel (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OMP_PARALLEL);
TREE_TYPE (stmt) = void_type_node;
OMP_PARALLEL_CLAUSES (stmt) = clauses;
OMP_PARALLEL_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* Like c_begin_compound_stmt, except force the retention of the BLOCK. */
tree
c_begin_omp_task (void)
{
tree block;
keep_next_level ();
block = c_begin_compound_stmt (true);
return block;
}
/* Generate OMP_TASK, with CLAUSES and BLOCK as its compound
statement. LOC is the location of the #pragma. */
tree
c_finish_omp_task (location_t loc, tree clauses, tree block)
{
tree stmt;
block = c_end_compound_stmt (loc, block, true);
stmt = make_node (OMP_TASK);
TREE_TYPE (stmt) = void_type_node;
OMP_TASK_CLAUSES (stmt) = clauses;
OMP_TASK_BODY (stmt) = block;
SET_EXPR_LOCATION (stmt, loc);
return add_stmt (stmt);
}
/* For all elements of CLAUSES, validate them vs OpenMP constraints.
Remove any elements from the list that are invalid. */
tree
c_finish_omp_clauses (tree clauses)
{
bitmap_head generic_head, firstprivate_head, lastprivate_head;
tree c, t, *pc = &clauses;
const char *name;
bitmap_obstack_initialize (NULL);
bitmap_initialize (&generic_head, &bitmap_default_obstack);
bitmap_initialize (&firstprivate_head, &bitmap_default_obstack);
bitmap_initialize (&lastprivate_head, &bitmap_default_obstack);
for (pc = &clauses, c = clauses; c ; c = *pc)
{
bool remove = false;
bool need_complete = false;
bool need_implicitly_determined = false;
switch (OMP_CLAUSE_CODE (c))
{
case OMP_CLAUSE_SHARED:
name = "shared";
need_implicitly_determined = true;
goto check_dup_generic;
case OMP_CLAUSE_PRIVATE:
name = "private";
need_complete = true;
need_implicitly_determined = true;
goto check_dup_generic;
case OMP_CLAUSE_REDUCTION:
name = "reduction";
need_implicitly_determined = true;
t = OMP_CLAUSE_DECL (c);
if (AGGREGATE_TYPE_P (TREE_TYPE (t))
|| POINTER_TYPE_P (TREE_TYPE (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE has invalid type for %<reduction%>", t);
remove = true;
}
else if (FLOAT_TYPE_P (TREE_TYPE (t)))
{
enum tree_code r_code = OMP_CLAUSE_REDUCTION_CODE (c);
const char *r_name = NULL;
switch (r_code)
{
case PLUS_EXPR:
case MULT_EXPR:
case MINUS_EXPR:
case MIN_EXPR:
case MAX_EXPR:
break;
case BIT_AND_EXPR:
r_name = "&";
break;
case BIT_XOR_EXPR:
r_name = "^";
break;
case BIT_IOR_EXPR:
r_name = "|";
break;
case TRUTH_ANDIF_EXPR:
r_name = "&&";
break;
case TRUTH_ORIF_EXPR:
r_name = "||";
break;
default:
gcc_unreachable ();
}
if (r_name)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE has invalid type for %<reduction(%s)%>",
t, r_name);
remove = true;
}
}
goto check_dup_generic;
case OMP_CLAUSE_COPYPRIVATE:
name = "copyprivate";
goto check_dup_generic;
case OMP_CLAUSE_COPYIN:
name = "copyin";
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != VAR_DECL || !DECL_THREAD_LOCAL_P (t))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE must be %<threadprivate%> for %<copyin%>", t);
remove = true;
}
goto check_dup_generic;
check_dup_generic:
t = OMP_CLAUSE_DECL (c);
if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %qs", t, name);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else
bitmap_set_bit (&generic_head, DECL_UID (t));
break;
case OMP_CLAUSE_FIRSTPRIVATE:
name = "firstprivate";
t = OMP_CLAUSE_DECL (c);
need_complete = true;
need_implicitly_determined = true;
if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %<firstprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&firstprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else
bitmap_set_bit (&firstprivate_head, DECL_UID (t));
break;
case OMP_CLAUSE_LASTPRIVATE:
name = "lastprivate";
t = OMP_CLAUSE_DECL (c);
need_complete = true;
need_implicitly_determined = true;
if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is not a variable in clause %<lastprivate%>", t);
remove = true;
}
else if (bitmap_bit_p (&generic_head, DECL_UID (t))
|| bitmap_bit_p (&lastprivate_head, DECL_UID (t)))
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE appears more than once in data clauses", t);
remove = true;
}
else
bitmap_set_bit (&lastprivate_head, DECL_UID (t));
break;
case OMP_CLAUSE_IF:
case OMP_CLAUSE_NUM_THREADS:
case OMP_CLAUSE_SCHEDULE:
case OMP_CLAUSE_NOWAIT:
case OMP_CLAUSE_ORDERED:
case OMP_CLAUSE_DEFAULT:
case OMP_CLAUSE_UNTIED:
case OMP_CLAUSE_COLLAPSE:
case OMP_CLAUSE_FINAL:
case OMP_CLAUSE_MERGEABLE:
pc = &OMP_CLAUSE_CHAIN (c);
continue;
default:
gcc_unreachable ();
}
if (!remove)
{
t = OMP_CLAUSE_DECL (c);
if (need_complete)
{
t = require_complete_type (t);
if (t == error_mark_node)
remove = true;
}
if (need_implicitly_determined)
{
const char *share_name = NULL;
if (TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t))
share_name = "threadprivate";
else switch (c_omp_predetermined_sharing (t))
{
case OMP_CLAUSE_DEFAULT_UNSPECIFIED:
break;
case OMP_CLAUSE_DEFAULT_SHARED:
/* const vars may be specified in firstprivate clause. */
if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
&& TREE_READONLY (t))
break;
share_name = "shared";
break;
case OMP_CLAUSE_DEFAULT_PRIVATE:
share_name = "private";
break;
default:
gcc_unreachable ();
}
if (share_name)
{
error_at (OMP_CLAUSE_LOCATION (c),
"%qE is predetermined %qs for %qs",
t, share_name, name);
remove = true;
}
}
}
if (remove)
*pc = OMP_CLAUSE_CHAIN (c);
else
pc = &OMP_CLAUSE_CHAIN (c);
}
bitmap_obstack_release (NULL);
return clauses;
}
/* Create a transaction node. */
tree
c_finish_transaction (location_t loc, tree block, int flags)
{
tree stmt = build_stmt (loc, TRANSACTION_EXPR, block);
if (flags & TM_STMT_ATTR_OUTER)
TRANSACTION_EXPR_OUTER (stmt) = 1;
if (flags & TM_STMT_ATTR_RELAXED)
TRANSACTION_EXPR_RELAXED (stmt) = 1;
return add_stmt (stmt);
}
/* Make a variant type in the proper way for C/C++, propagating qualifiers
down to the element type of an array. */
tree
c_build_qualified_type (tree type, int type_quals)
{
if (type == error_mark_node)
return type;
if (TREE_CODE (type) == ARRAY_TYPE)
{
tree t;
tree element_type = c_build_qualified_type (TREE_TYPE (type),
type_quals);
/* See if we already have an identically qualified type. */
for (t = TYPE_MAIN_VARIANT (type); t; t = TYPE_NEXT_VARIANT (t))
{
if (TYPE_QUALS (strip_array_types (t)) == type_quals
&& TYPE_NAME (t) == TYPE_NAME (type)
&& TYPE_CONTEXT (t) == TYPE_CONTEXT (type)
&& attribute_list_equal (TYPE_ATTRIBUTES (t),
TYPE_ATTRIBUTES (type)))
break;
}
if (!t)
{
tree domain = TYPE_DOMAIN (type);
t = build_variant_type_copy (type);
TREE_TYPE (t) = element_type;
if (TYPE_STRUCTURAL_EQUALITY_P (element_type)
|| (domain && TYPE_STRUCTURAL_EQUALITY_P (domain)))
SET_TYPE_STRUCTURAL_EQUALITY (t);
else if (TYPE_CANONICAL (element_type) != element_type
|| (domain && TYPE_CANONICAL (domain) != domain))
{
tree unqualified_canon
= build_array_type (TYPE_CANONICAL (element_type),
domain? TYPE_CANONICAL (domain)
: NULL_TREE);
TYPE_CANONICAL (t)
= c_build_qualified_type (unqualified_canon, type_quals);
}
else
TYPE_CANONICAL (t) = t;
}
return t;
}
/* A restrict-qualified pointer type must be a pointer to object or
incomplete type. Note that the use of POINTER_TYPE_P also allows
REFERENCE_TYPEs, which is appropriate for C++. */
if ((type_quals & TYPE_QUAL_RESTRICT)
&& (!POINTER_TYPE_P (type)
|| !C_TYPE_OBJECT_OR_INCOMPLETE_P (TREE_TYPE (type))))
{
error ("invalid use of %<restrict%>");
type_quals &= ~TYPE_QUAL_RESTRICT;
}
return build_qualified_type (type, type_quals);
}
/* Build a VA_ARG_EXPR for the C parser. */
tree
c_build_va_arg (location_t loc, tree expr, tree type)
{
if (warn_cxx_compat && TREE_CODE (type) == ENUMERAL_TYPE)
warning_at (loc, OPT_Wc___compat,
"C++ requires promoted type, not enum type, in %<va_arg%>");
return build_va_arg (loc, expr, type);
}
|
ptriller/dcpu-gcc
|
gcc/c-typeck.c
|
C
|
gpl-2.0
| 340,349
|
# Portions Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2.
# Copyright Mercurial Contributors
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
from typing import TypeVar, Callable, List, Tuple, Optional
from . import mdiff
from .thirdparty import attr
F = TypeVar("F")
L = TypeVar("L")
def annotate(
base: F,
parents: Callable[[F], List[F]],
decorate: Callable[[F], Tuple[List[L], bytes]],
diffopts: mdiff.diffopts,
skip: Optional[Callable[[F], bool]] = None,
) -> Tuple[List[L], bytes]:
"""annotate algorithm
base: starting point, usually a fctx.
parents: get parents from F.
decorate: get (lines, text) from F.
Return (lines, text) for 'base'.
"""
# This algorithm would prefer to be recursive, but Python is a
# bit recursion-hostile. Instead we do an iterative
# depth-first search.
# 1st DFS pre-calculates pcache and needed
visit = [base]
pcache = {}
needed = {base: 1}
while visit:
f = visit.pop()
if f in pcache:
continue
pl = parents(f)
pcache[f] = pl
for p in pl:
needed[p] = needed.get(p, 0) + 1
if p not in pcache:
visit.append(p)
# 2nd DFS does the actual annotate
visit[:] = [base]
hist = {}
while visit:
f = visit[-1]
if f in hist:
visit.pop()
continue
ready = True
pl = pcache[f]
for p in pl:
if p not in hist:
ready = False
visit.append(p)
if ready:
visit.pop()
curr = decorate(f)
skipchild = False
if skip is not None:
skipchild = skip(f)
curr = _annotatepair([hist[p] for p in pl], f, curr, skipchild, diffopts)
for p in pl:
if needed[p] == 1:
del hist[p]
del needed[p]
else:
needed[p] -= 1
hist[f] = curr
del pcache[f]
return hist[base]
def _annotatepair(parents, childfctx, child, skipchild, diffopts):
r"""
Given parent and child fctxes and annotate data for parents, for all lines
in either parent that match the child, annotate the child with the parent's
data.
Additionally, if `skipchild` is True, replace all other lines with parent
annotate data as well such that child is never blamed for any lines.
See test-annotate.py for unit tests.
"""
pblocks = [
(parent, mdiff.allblocks(parent[1], child[1], opts=diffopts))
for parent in parents
]
if skipchild:
# Need to iterate over the blocks twice -- make it a list
pblocks = [(p, list(blocks)) for (p, blocks) in pblocks]
# Mercurial currently prefers p2 over p1 for annotate.
# TODO: change this?
for parent, blocks in pblocks:
for (a1, a2, b1, b2), t in blocks:
# Changed blocks ('!') or blocks made only of blank lines ('~')
# belong to the child.
if t == "=":
child[0][b1:b2] = parent[0][a1:a2]
if skipchild:
# Now try and match up anything that couldn't be matched,
# Reversing pblocks maintains bias towards p2, matching above
# behavior.
pblocks.reverse()
# The heuristics are:
# * Work on blocks of changed lines (effectively diff hunks with -U0).
# This could potentially be smarter but works well enough.
# * For a non-matching section, do a best-effort fit. Match lines in
# diff hunks 1:1, dropping lines as necessary.
# * Repeat the last line as a last resort.
# First, replace as much as possible without repeating the last line.
remaining = [(parent, []) for parent, _blocks in pblocks]
for idx, (parent, blocks) in enumerate(pblocks):
for (a1, a2, b1, b2), _t in blocks:
if a2 - a1 >= b2 - b1:
for bk in range(b1, b2):
if child[0][bk].fctx == childfctx:
ak = min(a1 + (bk - b1), a2 - 1)
child[0][bk] = attr.evolve(parent[0][ak], skip=True)
else:
remaining[idx][1].append((a1, a2, b1, b2))
# Then, look at anything left, which might involve repeating the last
# line.
for parent, blocks in remaining:
for a1, a2, b1, b2 in blocks:
for bk in range(b1, b2):
if child[0][bk].fctx == childfctx:
ak = min(a1 + (bk - b1), a2 - 1)
child[0][bk] = attr.evolve(parent[0][ak], skip=True)
return child
|
facebookexperimental/eden
|
eden/scm/edenscm/mercurial/annotate.py
|
Python
|
gpl-2.0
| 4,963
|
/*
* To change this license header, choose License Headers in Project Properties.
* To change this template file, choose Tools | Templates
* and open the template in the editor.
*/
package controladora.Movimiento;
import Util.Utils;
import bo.DocumentoBO;
import bo.MovimientoBO;
import bo.TipoMovimientoBO;
import dto.MovimientoDTO;
import dto.TipomovimientoDTO;
import java.util.ArrayList;
import java.util.List;
import javax.annotation.PostConstruct;
import javax.ejb.EJB;
import javax.faces.bean.ManagedBean;
import javax.faces.bean.SessionScoped;
import javax.faces.model.SelectItem;
import org.primefaces.context.RequestContext;
@ManagedBean
@SessionScoped
public class MovimientoMB {
@EJB
private MovimientoBO movimientoBO = new MovimientoBO();
@EJB
private TipoMovimientoBO tipoMovimientoBO = new TipoMovimientoBO();
@EJB
private DocumentoBO documentoBO = new DocumentoBO();
private SessionBeanMovimiento sessionBeanMovimiento = new SessionBeanMovimiento();
Utils ut = new Utils();
@PostConstruct
public void init(){
getSessionBeanMovimiento().setListaMovimiento(movimientoBO.getAllMovimiento());
getSessionBeanMovimiento().setListaTipoMovimiento(tipoMovimientoBO.getAllTipoMovimiento());
getSessionBeanMovimiento().setListaEstados(this.llenarEstados());
getSessionBeanMovimiento().setListaDocumento(documentoBO.getAllDocumentos());
}
public void selectRowTable(){
}
public void verItems(){
MovimientoDTO mov = new MovimientoDTO();
mov.setIdmovimiento(getSessionBeanMovimiento().getMovimientoSeleccionado().getIdmovimiento());
}
public void abrirEditMov(){
}
public ArrayList llenarEstados() {
ArrayList estados = new ArrayList();
estados.add(new SelectItem(0,"Inactivo"));
estados.add(new SelectItem(1,"Activo"));
estados.add(new SelectItem(2,"Usado"));
estados.add(new SelectItem(3,"Agotado"));
estados.add(new SelectItem(4,"Caducado"));
estados.add(new SelectItem(5,"Malogrado"));
return estados;
}
/**
* @return the sessionBeanMovimiento
*/
public SessionBeanMovimiento getSessionBeanMovimiento() {
return sessionBeanMovimiento;
}
/**
* @param sessionBeanMovimiento the sessionBeanMovimiento to set
*/
public void setSessionBeanMovimiento(SessionBeanMovimiento sessionBeanMovimiento) {
this.sessionBeanMovimiento = sessionBeanMovimiento;
}
}
|
jorgeevj/SICOTEC
|
sicotec-war/src/java/controladora/Movimiento/MovimientoMB.java
|
Java
|
gpl-2.0
| 2,565
|
/*
* Hotkey control
*
* Copyright 1998, 1999 Eric Kohl
* Copyright 2002 Gyorgy 'Nog' Jeney
* Copyright 2004 Robert Shearman
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA
*
* This code was audited for completeness against the documented features
* of Comctl32.dll version 6.0 on Sep. 21, 2004, by Robert Shearman.
*
* Unless otherwise noted, we believe this code to be complete, as per
* the specification mentioned above.
* If you discover missing features or bugs please note them below.
*
*/
#include "comctl32.h"
WINE_DEFAULT_DEBUG_CHANNEL(hotkey);
typedef struct tagHOTKEY_INFO
{
HWND hwndSelf;
HWND hwndNotify;
HFONT hFont;
BOOL bFocus;
INT nHeight;
WORD HotKey;
WORD InvComb;
WORD InvMod;
BYTE CurrMod;
INT CaretPos;
DWORD ScanCode;
WCHAR strNone[15]; /* hope it's long enough ... */
} HOTKEY_INFO;
static const WCHAR HOTKEY_plussep[] = { ' ', '+', ' ' };
static LRESULT HOTKEY_SetFont (HOTKEY_INFO *infoPtr, HFONT hFont, BOOL redraw);
#define IsOnlySet(flags) (infoPtr->CurrMod == (flags))
static BOOL
HOTKEY_IsCombInv(const HOTKEY_INFO *infoPtr)
{
TRACE("(infoPtr=%p)\n", infoPtr);
if((infoPtr->InvComb & HKCOMB_NONE) && !infoPtr->CurrMod)
return TRUE;
if((infoPtr->InvComb & HKCOMB_S) && IsOnlySet(HOTKEYF_SHIFT))
return TRUE;
if((infoPtr->InvComb & HKCOMB_C) && IsOnlySet(HOTKEYF_CONTROL))
return TRUE;
if((infoPtr->InvComb & HKCOMB_A) && IsOnlySet(HOTKEYF_ALT))
return TRUE;
if((infoPtr->InvComb & HKCOMB_SC) &&
IsOnlySet(HOTKEYF_SHIFT | HOTKEYF_CONTROL))
return TRUE;
if((infoPtr->InvComb & HKCOMB_SA) && IsOnlySet(HOTKEYF_SHIFT | HOTKEYF_ALT))
return TRUE;
if((infoPtr->InvComb & HKCOMB_CA) &&
IsOnlySet(HOTKEYF_CONTROL | HOTKEYF_ALT))
return TRUE;
if((infoPtr->InvComb & HKCOMB_SCA) &&
IsOnlySet(HOTKEYF_SHIFT | HOTKEYF_CONTROL | HOTKEYF_ALT))
return TRUE;
TRACE("() Modifiers are valid\n");
return FALSE;
}
#undef IsOnlySet
static void
HOTKEY_DrawHotKey(HOTKEY_INFO *infoPtr, HDC hdc, LPCWSTR KeyName, WORD NameLen)
{
SIZE TextSize;
INT nXStart, nYStart;
COLORREF clrOldText, clrOldBk;
HFONT hFontOld;
/* Make a gap from the frame */
nXStart = GetSystemMetrics(SM_CXBORDER);
nYStart = GetSystemMetrics(SM_CYBORDER);
hFontOld = SelectObject(hdc, infoPtr->hFont);
if (GetWindowLongW(infoPtr->hwndSelf, GWL_STYLE) & WS_DISABLED)
{
clrOldText = SetTextColor(hdc, comctl32_color.clrGrayText);
clrOldBk = SetBkColor(hdc, comctl32_color.clrBtnFace);
}
else
{
clrOldText = SetTextColor(hdc, comctl32_color.clrWindowText);
clrOldBk = SetBkColor(hdc, comctl32_color.clrWindow);
}
TextOutW(hdc, nXStart, nYStart, KeyName, NameLen);
/* Get the text width for the caret */
GetTextExtentPoint32W(hdc, KeyName, NameLen, &TextSize);
infoPtr->CaretPos = nXStart + TextSize.cx;
SetBkColor(hdc, clrOldBk);
SetTextColor(hdc, clrOldText);
SelectObject(hdc, hFontOld);
/* position the caret */
SetCaretPos(infoPtr->CaretPos, nYStart);
}
/* Draw the names of the keys in the control */
static void
HOTKEY_Refresh(HOTKEY_INFO *infoPtr, HDC hdc)
{
WCHAR KeyName[64];
WORD NameLen = 0;
BYTE Modifier;
TRACE("(infoPtr=%p hdc=%p)\n", infoPtr, hdc);
if(!infoPtr->CurrMod && !infoPtr->HotKey) {
HOTKEY_DrawHotKey (infoPtr, hdc, infoPtr->strNone, lstrlenW(infoPtr->strNone));
return;
}
if(infoPtr->HotKey)
Modifier = HIBYTE(infoPtr->HotKey);
else if(HOTKEY_IsCombInv(infoPtr))
Modifier = infoPtr->InvMod;
else
Modifier = infoPtr->CurrMod;
if(Modifier & HOTKEYF_CONTROL) {
GetKeyNameTextW(MAKELPARAM(0, MapVirtualKeyW(VK_CONTROL, 0)),
KeyName, 64);
NameLen = lstrlenW(KeyName);
memcpy(&KeyName[NameLen], HOTKEY_plussep, sizeof(HOTKEY_plussep));
NameLen += 3;
}
if(Modifier & HOTKEYF_SHIFT) {
GetKeyNameTextW(MAKELPARAM(0, MapVirtualKeyW(VK_SHIFT, 0)),
&KeyName[NameLen], 64 - NameLen);
NameLen = lstrlenW(KeyName);
memcpy(&KeyName[NameLen], HOTKEY_plussep, sizeof(HOTKEY_plussep));
NameLen += 3;
}
if(Modifier & HOTKEYF_ALT) {
GetKeyNameTextW(MAKELPARAM(0, MapVirtualKeyW(VK_MENU, 0)),
&KeyName[NameLen], 64 - NameLen);
NameLen = lstrlenW(KeyName);
memcpy(&KeyName[NameLen], HOTKEY_plussep, sizeof(HOTKEY_plussep));
NameLen += 3;
}
if(infoPtr->HotKey) {
GetKeyNameTextW(infoPtr->ScanCode, &KeyName[NameLen], 64 - NameLen);
NameLen = lstrlenW(KeyName);
}
else
KeyName[NameLen] = 0;
HOTKEY_DrawHotKey (infoPtr, hdc, KeyName, NameLen);
}
static void
HOTKEY_Paint(HOTKEY_INFO *infoPtr, HDC hdc)
{
if (hdc)
HOTKEY_Refresh(infoPtr, hdc);
else {
PAINTSTRUCT ps;
hdc = BeginPaint (infoPtr->hwndSelf, &ps);
HOTKEY_Refresh (infoPtr, hdc);
EndPaint (infoPtr->hwndSelf, &ps);
}
}
static LRESULT
HOTKEY_GetHotKey(const HOTKEY_INFO *infoPtr)
{
TRACE("(infoPtr=%p) Modifiers: 0x%x, Virtual Key: %d\n", infoPtr,
HIBYTE(infoPtr->HotKey), LOBYTE(infoPtr->HotKey));
return (LRESULT)infoPtr->HotKey;
}
static void
HOTKEY_SetHotKey(HOTKEY_INFO *infoPtr, WORD hotKey)
{
infoPtr->HotKey = hotKey;
infoPtr->ScanCode =
MAKELPARAM(0, MapVirtualKeyW(LOBYTE(infoPtr->HotKey), 0));
TRACE("(infoPtr=%p hotKey=%x) Modifiers: 0x%x, Virtual Key: %d\n", infoPtr,
hotKey, HIBYTE(infoPtr->HotKey), LOBYTE(infoPtr->HotKey));
InvalidateRect(infoPtr->hwndSelf, NULL, TRUE);
}
static void
HOTKEY_SetRules(HOTKEY_INFO *infoPtr, WORD invComb, WORD invMod)
{
infoPtr->InvComb = invComb;
infoPtr->InvMod = invMod;
TRACE("(infoPtr=%p) Invalid Modifers: 0x%x, If Invalid: 0x%x\n", infoPtr,
infoPtr->InvComb, infoPtr->InvMod);
}
static LRESULT
HOTKEY_Create (HOTKEY_INFO *infoPtr, const CREATESTRUCTW *lpcs)
{
infoPtr->hwndNotify = lpcs->hwndParent;
HOTKEY_SetFont(infoPtr, GetStockObject(SYSTEM_FONT), 0);
return 0;
}
static LRESULT
HOTKEY_Destroy (HOTKEY_INFO *infoPtr)
{
/* free hotkey info data */
SetWindowLongPtrW (infoPtr->hwndSelf, 0, 0);
Free (infoPtr);
return 0;
}
static LRESULT
HOTKEY_EraseBackground (const HOTKEY_INFO *infoPtr, HDC hdc)
{
HBRUSH hBrush, hSolidBrush = NULL;
RECT rc;
if (GetWindowLongW(infoPtr->hwndSelf, GWL_STYLE) & WS_DISABLED)
hBrush = hSolidBrush = CreateSolidBrush(comctl32_color.clrBtnFace);
else
{
hBrush = (HBRUSH)SendMessageW(infoPtr->hwndNotify, WM_CTLCOLOREDIT,
(WPARAM)hdc, (LPARAM)infoPtr->hwndSelf);
if (!hBrush)
hBrush = hSolidBrush = CreateSolidBrush(comctl32_color.clrWindow);
}
GetClientRect (infoPtr->hwndSelf, &rc);
FillRect (hdc, &rc, hBrush);
if (hSolidBrush)
DeleteObject(hSolidBrush);
return -1;
}
static inline LRESULT
HOTKEY_GetFont (const HOTKEY_INFO *infoPtr)
{
return (LRESULT)infoPtr->hFont;
}
static LRESULT
HOTKEY_KeyDown (HOTKEY_INFO *infoPtr, DWORD key, DWORD flags)
{
WORD wOldHotKey;
BYTE bOldMod;
if (GetWindowLongW(infoPtr->hwndSelf, GWL_STYLE) & WS_DISABLED)
return 0;
TRACE("() Key: %d\n", key);
wOldHotKey = infoPtr->HotKey;
bOldMod = infoPtr->CurrMod;
/* If any key is Pressed, we have to reset the hotkey in the control */
infoPtr->HotKey = 0;
switch (key)
{
case VK_RETURN:
case VK_TAB:
case VK_SPACE:
case VK_DELETE:
case VK_ESCAPE:
case VK_BACK:
InvalidateRect(infoPtr->hwndSelf, NULL, TRUE);
return DefWindowProcW (infoPtr->hwndSelf, WM_KEYDOWN, key, flags);
case VK_SHIFT:
infoPtr->CurrMod |= HOTKEYF_SHIFT;
break;
case VK_CONTROL:
infoPtr->CurrMod |= HOTKEYF_CONTROL;
break;
case VK_MENU:
infoPtr->CurrMod |= HOTKEYF_ALT;
break;
default:
if(HOTKEY_IsCombInv(infoPtr))
infoPtr->HotKey = MAKEWORD(key, infoPtr->InvMod);
else
infoPtr->HotKey = MAKEWORD(key, infoPtr->CurrMod);
infoPtr->ScanCode = flags;
break;
}
if ((wOldHotKey != infoPtr->HotKey) || (bOldMod != infoPtr->CurrMod))
{
InvalidateRect(infoPtr->hwndSelf, NULL, TRUE);
/* send EN_CHANGE notification */
SendMessageW(infoPtr->hwndNotify, WM_COMMAND,
MAKEWPARAM(GetDlgCtrlID(infoPtr->hwndSelf), EN_CHANGE),
(LPARAM)infoPtr->hwndSelf);
}
return 0;
}
static LRESULT
HOTKEY_KeyUp (HOTKEY_INFO *infoPtr, DWORD key)
{
BYTE bOldMod;
if (GetWindowLongW(infoPtr->hwndSelf, GWL_STYLE) & WS_DISABLED)
return 0;
TRACE("() Key: %d\n", key);
bOldMod = infoPtr->CurrMod;
switch (key)
{
case VK_SHIFT:
infoPtr->CurrMod &= ~HOTKEYF_SHIFT;
break;
case VK_CONTROL:
infoPtr->CurrMod &= ~HOTKEYF_CONTROL;
break;
case VK_MENU:
infoPtr->CurrMod &= ~HOTKEYF_ALT;
break;
default:
return 1;
}
if (bOldMod != infoPtr->CurrMod)
{
InvalidateRect(infoPtr->hwndSelf, NULL, TRUE);
/* send EN_CHANGE notification */
SendMessageW(infoPtr->hwndNotify, WM_COMMAND,
MAKEWPARAM(GetDlgCtrlID(infoPtr->hwndSelf), EN_CHANGE),
(LPARAM)infoPtr->hwndSelf);
}
return 0;
}
static LRESULT
HOTKEY_KillFocus (HOTKEY_INFO *infoPtr)
{
infoPtr->bFocus = FALSE;
DestroyCaret ();
return 0;
}
static LRESULT
HOTKEY_LButtonDown (const HOTKEY_INFO *infoPtr)
{
if (!(GetWindowLongW(infoPtr->hwndSelf, GWL_STYLE) & WS_DISABLED))
SetFocus (infoPtr->hwndSelf);
return 0;
}
static inline LRESULT
HOTKEY_NCCreate (HWND hwnd, const CREATESTRUCTW *lpcs)
{
HOTKEY_INFO *infoPtr;
DWORD dwExStyle = GetWindowLongW (hwnd, GWL_EXSTYLE);
SetWindowLongW (hwnd, GWL_EXSTYLE,
dwExStyle | WS_EX_CLIENTEDGE);
/* allocate memory for info structure */
infoPtr = Alloc (sizeof(HOTKEY_INFO));
SetWindowLongPtrW(hwnd, 0, (DWORD_PTR)infoPtr);
/* initialize info structure */
infoPtr->HotKey = infoPtr->InvComb = infoPtr->InvMod = infoPtr->CurrMod = 0;
infoPtr->CaretPos = GetSystemMetrics(SM_CXBORDER);
infoPtr->hwndSelf = hwnd;
LoadStringW(COMCTL32_hModule, HKY_NONE, infoPtr->strNone, 15);
return DefWindowProcW (infoPtr->hwndSelf, WM_NCCREATE, 0, (LPARAM)lpcs);
}
static LRESULT
HOTKEY_SetFocus (HOTKEY_INFO *infoPtr)
{
infoPtr->bFocus = TRUE;
CreateCaret (infoPtr->hwndSelf, NULL, 1, infoPtr->nHeight);
SetCaretPos (infoPtr->CaretPos, GetSystemMetrics(SM_CYBORDER));
ShowCaret (infoPtr->hwndSelf);
return 0;
}
static LRESULT
HOTKEY_SetFont (HOTKEY_INFO *infoPtr, HFONT hFont, BOOL redraw)
{
TEXTMETRICW tm;
HDC hdc;
HFONT hOldFont = 0;
infoPtr->hFont = hFont;
hdc = GetDC (infoPtr->hwndSelf);
if (infoPtr->hFont)
hOldFont = SelectObject (hdc, infoPtr->hFont);
GetTextMetricsW (hdc, &tm);
infoPtr->nHeight = tm.tmHeight;
if (infoPtr->hFont)
SelectObject (hdc, hOldFont);
ReleaseDC (infoPtr->hwndSelf, hdc);
if (redraw)
InvalidateRect (infoPtr->hwndSelf, NULL, TRUE);
return 0;
}
static LRESULT WINAPI
HOTKEY_WindowProc (HWND hwnd, UINT uMsg, WPARAM wParam, LPARAM lParam)
{
HOTKEY_INFO *infoPtr = (HOTKEY_INFO *)GetWindowLongPtrW (hwnd, 0);
TRACE("hwnd=%p msg=%x wparam=%lx lparam=%lx\n", hwnd, uMsg, wParam, lParam);
if (!infoPtr && (uMsg != WM_NCCREATE))
return DefWindowProcW (hwnd, uMsg, wParam, lParam);
switch (uMsg)
{
case HKM_GETHOTKEY:
return HOTKEY_GetHotKey (infoPtr);
case HKM_SETHOTKEY:
HOTKEY_SetHotKey (infoPtr, (WORD)wParam);
break;
case HKM_SETRULES:
HOTKEY_SetRules (infoPtr, (WORD)wParam, (WORD)lParam);
break;
case WM_CHAR:
case WM_SYSCHAR:
return HOTKEY_KeyDown (infoPtr, MapVirtualKeyW(LOBYTE(HIWORD(lParam)), 1), lParam);
case WM_CREATE:
return HOTKEY_Create (infoPtr, (LPCREATESTRUCTW)lParam);
case WM_DESTROY:
return HOTKEY_Destroy (infoPtr);
case WM_ERASEBKGND:
return HOTKEY_EraseBackground (infoPtr, (HDC)wParam);
case WM_GETDLGCODE:
return DLGC_WANTCHARS | DLGC_WANTARROWS;
case WM_GETFONT:
return HOTKEY_GetFont (infoPtr);
case WM_KEYDOWN:
case WM_SYSKEYDOWN:
return HOTKEY_KeyDown (infoPtr, wParam, lParam);
case WM_KEYUP:
case WM_SYSKEYUP:
return HOTKEY_KeyUp (infoPtr, wParam);
case WM_KILLFOCUS:
return HOTKEY_KillFocus (infoPtr);
case WM_LBUTTONDOWN:
return HOTKEY_LButtonDown (infoPtr);
case WM_NCCREATE:
return HOTKEY_NCCreate (hwnd, (LPCREATESTRUCTW)lParam);
case WM_PRINTCLIENT:
case WM_PAINT:
HOTKEY_Paint(infoPtr, (HDC)wParam);
return 0;
case WM_SETFOCUS:
return HOTKEY_SetFocus (infoPtr);
case WM_SETFONT:
return HOTKEY_SetFont (infoPtr, (HFONT)wParam, LOWORD(lParam));
default:
if ((uMsg >= WM_USER) && (uMsg < WM_APP) && !COMCTL32_IsReflectedMessage(uMsg))
ERR("unknown msg %04x wp=%08lx lp=%08lx\n",
uMsg, wParam, lParam);
return DefWindowProcW (hwnd, uMsg, wParam, lParam);
}
return 0;
}
void
HOTKEY_Register (void)
{
WNDCLASSW wndClass;
ZeroMemory (&wndClass, sizeof(WNDCLASSW));
wndClass.style = CS_GLOBALCLASS;
wndClass.lpfnWndProc = HOTKEY_WindowProc;
wndClass.cbClsExtra = 0;
wndClass.cbWndExtra = sizeof(HOTKEY_INFO *);
wndClass.hCursor = 0;
wndClass.hbrBackground = 0;
wndClass.lpszClassName = HOTKEY_CLASSW;
RegisterClassW (&wndClass);
}
void
HOTKEY_Unregister (void)
{
UnregisterClassW (HOTKEY_CLASSW, NULL);
}
|
sunnyden/reactos
|
dll/win32/comctl32/hotkey.c
|
C
|
gpl-2.0
| 14,333
|
// --------------------------------------------------------------------------
// United Business Technologies
// Copyright (c) 2000 - 2010 All Rights Reserved.
//
// Source in this file is released to the public under the following license:
// --------------------------------------------------------------------------
// This toolkit may be used free of charge for any purpose including corporate
// and academic use. For profit, and Non-Profit uses are permitted.
//
// This source code and any work derived from this source code must retain
// this copyright at the top of each source file.
// --------------------------------------------------------------------------
#include "GlobalInclude.h"
#include "GString.h"
#include <stdlib.h>
#ifndef _IOS
#include <malloc.h>
#endif
// You must #define new to NEWER_NEW in GlobalInclude.h for this code to be called
//
// char chUniqueOverload is not used, it's purpose is so that this memory overload does not
// cause link conflicts with other memory tools like that from MicroQuill or the overloaded new
// within MFC.
//
// This may be used to find memory leaks by entering every alloc in a list or hash then removing it when it is free'd
// anything left over is technically a memory leak, however not all leaks are bad. Bad leaks grow
// during the course of the application. Good leaks are one time allocations that never grow and they are never
// released when the application shuts down because there is no real purpose since the process is ending and the process
// memory will be released by the operating system.
//
//
// This was added to find performance errors, such as adding 1Meg into a GString that was never pre-allocated.
// Such an error will cause the GString to alloc many times over and over until it has grown to the needed size.
// Calling GString::PreAlloc(1024000) will be much faster, and this handy overload will help you find such errors.
#ifdef _WIN64
void * operator new( unsigned __int64 n, const char *pzFile, int nLine, char chUniqueOverload )
#elif defined(_IOS)
void * operator new( size_t n, const char *pzFile, int nLine, char chUniqueOverload )
#else
void * operator new( unsigned int n, const char *pzFile, int nLine, char chUniqueOverload )
#endif
{
void *p = malloc(n);
/*
GString strLeakDebug;
strLeakDebug.Format("Alloc % 10d bytes [",n);
strLeakDebug << pzFile << " line " << nLine << "]:" << (unsigned long)p << "\r\n";
strLeakDebug.ToFileAppend("d:\\mem.txt");
*/
return p;
}
void operator delete( void *p, const char *pzFile, int nLine, char chUniqueOverload )
{
/*
GString strLeakDebug("Free ");
strLeakDebug << "[" << pzFile << " line " << pzFile << "]" << (unsigned long)p << "\r\n";
strLeakDebug.ToFileAppend("d:\\mem.txt");
*/
free(p);
}
|
ambilight-4-mediaportal/AtmoWin
|
XMLFoundation/Libraries/XMLFoundation/src/Utils/GMemory.cpp
|
C++
|
gpl-2.0
| 2,774
|
/*
* Java Terrain and Stellar System Ports
*
* Copyright (C) 2006 Martin H. Smith based on work by original
* authors.
*
* Released under the terms of the GNU General Public License
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
* MA 02110-1301, USA.
*
* Linking TerraJ statically or dynamically with other modules is making a
* combined work based on TerraJ. Thus, the terms and conditions of the
* GNU General Public License cover the whole combination.
*
* In addition, as a special exception, the copyright holders of TerraJ
* give you permission to combine this program with free software programs
* or libraries that are released under the GNU LGPL and with code included
* in the standard release of JOGL, Java Getopt and FreeMarker under the BSD
* license (or modified versions of such code, with unchanged license) and with
* Apache Commons and Log4J libraries under the Apache license (or modified versions
* of such code. You may copy and distribute such a system following the terms
* of the GNU GPL for TerraJ and the licenses of the other code concerned,
* provided that you include the source code of that other code when and as the
* GNU GPL requires distribution of source code.
*
* Note that people who make modified versions of TerraJ are not obligated to grant
* this special exception for their modified versions; it is their choice whether
* to do so. The GNU General Public License gives permission to release a modified
* version without this exception; this exception also makes it possible to release
* a modified version which carries forward this exception.
*/
/*
* CameraPosDialog.java
*
* Created on 21 January 2006, 13:02
*/
package com.alvermont.terraj.fracplanet.ui;
import com.alvermont.terraj.fracplanet.render.CameraPosition;
import com.alvermont.terraj.fracplanet.render.TriangleMeshViewerDisplay;
import java.awt.event.MouseAdapter;
import java.awt.event.MouseEvent;
import java.util.ArrayList;
import java.util.List;
import javax.swing.DefaultCellEditor;
import javax.swing.DefaultListSelectionModel;
import javax.swing.JButton;
import javax.swing.ListSelectionModel;
import javax.swing.event.ListSelectionEvent;
import javax.swing.event.ListSelectionListener;
/**
* A dialog that manages a list of saved camera positions
*
* @author martin
* @version $Id: CameraPosDialog.java,v 1.10 2006/07/06 06:58:34 martin Exp $
*/
public class CameraPosDialog extends javax.swing.JDialog
{
private List<CameraPositionAdapter> positions =
new ArrayList<CameraPositionAdapter>();
private TriangleMeshViewerDisplay display;
// NETBEANS SWING CODE USE RELAXED CHECKSTYLE SETTINGS
/** A listener class to update the dialog state */
private class MySelectionListener implements ListSelectionListener
{
/** Create a new instance of MySelectionListener */
public MySelectionListener()
{
}
public void valueChanged(ListSelectionEvent e)
{
final DefaultListSelectionModel dlsm =
(DefaultListSelectionModel) cameraTable.getSelectionModel();
final CameraTableModel model =
(CameraTableModel) cameraTable.getModel();
// can always delete or goto if something is selected
deleteButton.setEnabled(!dlsm.isSelectionEmpty());
gotoButton.setEnabled(!dlsm.isSelectionEmpty());
// can only move up / down if not already at top or bottom
if (dlsm.isSelectionEmpty())
{
upButton.setEnabled(false);
downButton.setEnabled(false);
}
else
{
upButton.setEnabled(dlsm.getMinSelectionIndex() > 0);
downButton.setEnabled(
dlsm.getMinSelectionIndex() < (model.getRowCount() - 1));
}
}
}
private class MyMouseListener extends MouseAdapter
{
public MyMouseListener()
{
}
public void mouseClicked(MouseEvent e)
{
super.mouseClicked(e);
// we look for a double click of button 1
if (
(e.getClickCount() == 2) &&
(e.getButton() == MouseEvent.BUTTON1))
{
final int row =
cameraTable.getSelectionModel()
.getMinSelectionIndex();
final CameraPositionAdapter adapt = positions.get(row);
display.setCameraPosition(adapt.getPos());
}
}
}
private class CameraPositionAdapter
{
private CameraPosition pos;
public CameraPositionAdapter(CameraPosition pos)
{
this.pos = pos;
}
public CameraPositionAdapter(CameraPosition pos, int number)
{
this.pos = pos;
this.number = number;
}
public Object[] toObjectArray()
{
final Object[] data = new Object[7];
int i = 0;
data[i] = new Integer(number);
data[++i] = this.pos.getName();
data[++i] = this.pos.getEye();
data[++i] = this.pos.getCentre();
data[++i] = this.pos.getUp();
data[++i] = this.pos.getEyeXRotation();
data[++i] = this.pos.getEyeYRotation();
return data;
}
/**
* Holds value of property number.
*/
private int number;
/**
* Getter for property number.
* @return Value of property number.
*/
public int getNumber()
{
return this.number;
}
/**
* Setter for property number.
* @param number New value of property number.
*/
public void setNumber(int number)
{
this.number = number;
}
/**
* Getter for property pos.
* @return Value of property pos.
*/
public CameraPosition getPos()
{
return this.pos;
}
/**
* Setter for property pos.
* @param pos New value of property pos.
*/
public void setPos(CameraPosition pos)
{
this.pos = pos;
}
}
// NETBEANS SWING CODE USE RELAXED CHECKSTYLE SETTINGS
/**
* Creates new form CameraPosDialog
*
* @param parent The parent object for this form
* @param modal Indicates whether this is a modal dialog
*/
public CameraPosDialog(AbstractTerrainViewerFrame parent, boolean modal)
{
super(parent, modal);
initComponents();
final DefaultListSelectionModel dlsm =
(DefaultListSelectionModel) cameraTable.getSelectionModel();
dlsm.setSelectionMode(ListSelectionModel.SINGLE_SELECTION);
dlsm.addListSelectionListener(new MySelectionListener());
cameraTable.addMouseListener(new MyMouseListener());
((DefaultCellEditor) cameraTable.getDefaultEditor(String.class)).setClickCountToStart(
1);
}
/**
* Get the list of camera positions from this object
*
* @return The list of current camera positions that the user has created
* or edited
*/
public List<CameraPosition> getPositions()
{
final List<CameraPosition> poslist = new ArrayList<CameraPosition>();
final CameraTableModel model = new CameraTableModel();
for (CameraPositionAdapter cpa : this.positions)
{
final CameraPosition pos = cpa.getPos();
// TODO: can't edit name because this is wrong. Fix it.
//pos.setName((String) model.getValueAt(poslist.size(), 1));
poslist.add(pos);
}
return poslist;
}
/**
* Set the list of camera positions
*
* @param poslist A list of camera positions that is to be set as the
* current list in this object
*/
void setPositions(List<CameraPosition> poslist)
{
final CameraTableModel model = new CameraTableModel();
for (CameraPosition pos : poslist)
{
final CameraPositionAdapter cpa = new CameraPositionAdapter(pos);
this.positions.add(cpa);
model.addRow(cpa.toObjectArray());
}
cameraTable.setModel(model);
}
/**
* Set the display object being used by this form
*
* @param display The mesh display object we are associated with
*/
void setDisplay(TriangleMeshViewerDisplay display)
{
this.display = display;
}
/** This method is called from within the constructor to
* initialize the form.
* WARNING: Do NOT modify this code. The content of this method is
* always regenerated by the Form Editor.
*/
// <editor-fold defaultstate="collapsed" desc=" Generated Code ">//GEN-BEGIN:initComponents
private void initComponents()
{
jScrollPane1 = new javax.swing.JScrollPane();
cameraTable = new javax.swing.JTable();
addButton = new javax.swing.JButton();
deleteButton = new javax.swing.JButton();
upButton = new javax.swing.JButton();
downButton = new javax.swing.JButton();
jButton5 = new javax.swing.JButton();
gotoButton = new javax.swing.JButton();
setTitle("Camera Positions");
cameraTable.setModel(new CameraTableModel());
cameraTable.setToolTipText("Table of camera position data");
cameraTable.setAutoResizeMode(
javax.swing.JTable.AUTO_RESIZE_ALL_COLUMNS);
jScrollPane1.setViewportView(cameraTable);
addButton.setText("Add");
addButton.setToolTipText("Add the current camera position to the list");
addButton.addActionListener(
new java.awt.event.ActionListener()
{
public void actionPerformed(java.awt.event.ActionEvent evt)
{
addButtonActionPerformed(evt);
}
});
deleteButton.setText("Delete");
deleteButton.setToolTipText("Delete the selected camera position");
deleteButton.setEnabled(false);
deleteButton.addActionListener(
new java.awt.event.ActionListener()
{
public void actionPerformed(java.awt.event.ActionEvent evt)
{
deleteButtonActionPerformed(evt);
}
});
upButton.setText("Up");
upButton.setToolTipText("Move the selected position up in the list");
upButton.setEnabled(false);
upButton.addActionListener(
new java.awt.event.ActionListener()
{
public void actionPerformed(java.awt.event.ActionEvent evt)
{
upButtonActionPerformed(evt);
}
});
downButton.setText("Down");
downButton.setToolTipText(
"Move the selected position down in the list");
downButton.setEnabled(false);
downButton.addActionListener(
new java.awt.event.ActionListener()
{
public void actionPerformed(java.awt.event.ActionEvent evt)
{
downButtonActionPerformed(evt);
}
});
jButton5.setText("More >>");
jButton5.setToolTipText("Display or hide the detailed information");
jButton5.addActionListener(
new java.awt.event.ActionListener()
{
public void actionPerformed(java.awt.event.ActionEvent evt)
{
jButton5ActionPerformed(evt);
}
});
gotoButton.setText("Goto");
gotoButton.setToolTipText("Move the camera to the selected position");
gotoButton.setEnabled(false);
gotoButton.addActionListener(
new java.awt.event.ActionListener()
{
public void actionPerformed(java.awt.event.ActionEvent evt)
{
gotoButtonActionPerformed(evt);
}
});
org.jdesktop.layout.GroupLayout layout =
new org.jdesktop.layout.GroupLayout(getContentPane());
getContentPane()
.setLayout(layout);
layout.setHorizontalGroup(
layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING).add(
layout.createSequentialGroup().addContainerGap().add(
layout.createParallelGroup(
org.jdesktop.layout.GroupLayout.LEADING).add(
jScrollPane1,
org.jdesktop.layout.GroupLayout.DEFAULT_SIZE, 366,
Short.MAX_VALUE).add(
layout.createSequentialGroup().add(addButton).addPreferredGap(
org.jdesktop.layout.LayoutStyle.RELATED).add(
deleteButton).addPreferredGap(
org.jdesktop.layout.LayoutStyle.RELATED, 128,
Short.MAX_VALUE).add(upButton).addPreferredGap(
org.jdesktop.layout.LayoutStyle.RELATED).add(
downButton)).add(
org.jdesktop.layout.GroupLayout.TRAILING,
layout.createSequentialGroup().add(gotoButton).addPreferredGap(
org.jdesktop.layout.LayoutStyle.RELATED, 232,
Short.MAX_VALUE).add(jButton5))).addContainerGap()));
layout.setVerticalGroup(
layout.createParallelGroup(org.jdesktop.layout.GroupLayout.LEADING).add(
org.jdesktop.layout.GroupLayout.TRAILING,
layout.createSequentialGroup().addContainerGap().add(
jScrollPane1, org.jdesktop.layout.GroupLayout.DEFAULT_SIZE,
363, Short.MAX_VALUE).addPreferredGap(
org.jdesktop.layout.LayoutStyle.RELATED).add(
layout.createParallelGroup(
org.jdesktop.layout.GroupLayout.BASELINE).add(
addButton).add(deleteButton).add(downButton).add(
upButton)).addPreferredGap(
org.jdesktop.layout.LayoutStyle.RELATED).add(
layout.createParallelGroup(
org.jdesktop.layout.GroupLayout.BASELINE).add(jButton5).add(
gotoButton)).addContainerGap()));
java.awt.Dimension screenSize =
java.awt.Toolkit.getDefaultToolkit()
.getScreenSize();
setBounds(
(screenSize.width - 394) / 2, (screenSize.height - 470) / 2, 394,
470);
} // </editor-fold>//GEN-END:initComponents
private void gotoButtonActionPerformed(java.awt.event.ActionEvent evt)//GEN-FIRST:event_gotoButtonActionPerformed
{//GEN-HEADEREND:event_gotoButtonActionPerformed
final int row = cameraTable.getSelectionModel()
.getMinSelectionIndex();
final CameraPositionAdapter adapt = positions.get(row);
display.setCameraPosition(adapt.getPos());
}//GEN-LAST:event_gotoButtonActionPerformed
private void downButtonActionPerformed(java.awt.event.ActionEvent evt)//GEN-FIRST:event_downButtonActionPerformed
{//GEN-HEADEREND:event_downButtonActionPerformed
final CameraTableModel model =
(CameraTableModel) cameraTable.getModel();
final int row = cameraTable.getSelectionModel()
.getMinSelectionIndex();
final CameraPositionAdapter adapt = positions.remove(row);
positions.add(row + 1, adapt);
model.moveRow(row, row + 1);
}//GEN-LAST:event_downButtonActionPerformed
private void upButtonActionPerformed(java.awt.event.ActionEvent evt)//GEN-FIRST:event_upButtonActionPerformed
{//GEN-HEADEREND:event_upButtonActionPerformed
final CameraTableModel model =
(CameraTableModel) cameraTable.getModel();
final int row = cameraTable.getSelectionModel()
.getMinSelectionIndex();
final CameraPositionAdapter adapt = positions.remove(row);
positions.add(row - 1, adapt);
model.moveRow(row, row - 1);
}//GEN-LAST:event_upButtonActionPerformed
private void deleteButtonActionPerformed(java.awt.event.ActionEvent evt)//GEN-FIRST:event_deleteButtonActionPerformed
{//GEN-HEADEREND:event_deleteButtonActionPerformed
final CameraTableModel model =
(CameraTableModel) cameraTable.getModel();
final int row = cameraTable.getSelectionModel()
.getMinSelectionIndex();
model.removeRow(row);
positions.remove(row);
}//GEN-LAST:event_deleteButtonActionPerformed
private void jButton5ActionPerformed(java.awt.event.ActionEvent evt)//GEN-FIRST:event_jButton5ActionPerformed
{//GEN-HEADEREND:event_jButton5ActionPerformed
final CameraTableModel model =
(CameraTableModel) cameraTable.getModel();
final JButton button = (JButton) evt.getSource();
model.changeVisibility(!model.getVisibility());
if (model.getVisibility())
{
button.setText("<< Less");
}
else
{
button.setText("More >>");
}
}//GEN-LAST:event_jButton5ActionPerformed
private void addButtonActionPerformed(java.awt.event.ActionEvent evt)//GEN-FIRST:event_addButtonActionPerformed
{//GEN-HEADEREND:event_addButtonActionPerformed
final Object[] newRow = new Object[5];
final CameraTableModel model =
(CameraTableModel) cameraTable.getModel();
final CameraPosition pos =
new CameraPosition(display.getCameraPosition());
final CameraPositionAdapter adapt = new CameraPositionAdapter(pos);
positions.add(adapt);
model.addRow(adapt.toObjectArray());
}//GEN-LAST:event_addButtonActionPerformed
// Variables declaration - do not modify//GEN-BEGIN:variables
private javax.swing.JButton addButton;
private javax.swing.JTable cameraTable;
private javax.swing.JButton deleteButton;
private javax.swing.JButton downButton;
private javax.swing.JButton gotoButton;
private javax.swing.JButton jButton5;
private javax.swing.JScrollPane jScrollPane1;
private javax.swing.JButton upButton;
// End of variables declaration//GEN-END:variables
}
|
madebyjeffrey/TerraJ
|
src/main/java/com/alvermont/terraj/fracplanet/ui/CameraPosDialog.java
|
Java
|
gpl-2.0
| 19,778
|
//
// BNWorkbenchViewController.h
// BNApp
//
// Created by wujianqiang on 15/1/17.
// Copyright (c) 2015年 wujianqiang. All rights reserved.
//
#import <UIKit/UIKit.h>
@interface BNWorkbenchViewController : UITabBarController
@end
|
xwxz/BNApp
|
BNApp/BNApp/Controllers/BNWorkbenchViewController.h
|
C
|
gpl-2.0
| 240
|
<div>
<h2><span translate="tpjpaApp.electronique.detail.title">Electronique</span> {{electronique.id}}</h2>
<div class="table-responsive">
<table class="table table-striped">
<thead>
<tr>
<th translate="entity.detail.field">Field</th>
<th translate="entity.detail.value">Value</th>
</tr>
</thead>
<tbody>
</tbody>
</table>
</div>
<button type="submit"
ui-sref="electronique"
class="btn btn-info">
<span class="glyphicon glyphicon-arrow-left"></span> <span translate="entity.action.back"> Back</span>
</button>
</div>
|
CherifAbdoul/TpSirM1MIAGE_Ierlomenko-Kinfack-Haidara
|
tpJpa/src/main/webapp/scripts/app/entities/electronique/electronique-detail.html
|
HTML
|
gpl-2.0
| 712
|
/*
* Copyright (C) 2010, 2012-2014 ARM Limited. All rights reserved.
*
* This program is free software and is provided to you under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation, and any use by you of this program is subject to the terms of such GNU licence.
*
* A copy of the licence is included with the program, and can also be obtained from Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#ifndef __UMP_KERNEL_LINUX_H__
#define __UMP_KERNEL_LINUX_H__
int ump_kernel_device_initialize(void);
void ump_kernel_device_terminate(void);
#endif /* __UMP_KERNEL_H__ */
|
stargo/android_kernel_amazon_ford
|
drivers/misc/mediatek/gpu/mt8127/mali/ump/linux/ump_kernel_linux.h
|
C
|
gpl-2.0
| 685
|
//------------------------------------------------------------------------------
// <auto-generated>
// This code was generated by a tool.
// Runtime Version:4.0.30319.34011
//
// Changes to this file may cause incorrect behavior and will be lost if
// the code is regenerated.
// </auto-generated>
//------------------------------------------------------------------------------
namespace Steganography.Properties {
[global::System.Runtime.CompilerServices.CompilerGeneratedAttribute()]
[global::System.CodeDom.Compiler.GeneratedCodeAttribute("Microsoft.VisualStudio.Editors.SettingsDesigner.SettingsSingleFileGenerator", "11.0.0.0")]
internal sealed partial class Settings : global::System.Configuration.ApplicationSettingsBase {
private static Settings defaultInstance = ((Settings)(global::System.Configuration.ApplicationSettingsBase.Synchronized(new Settings())));
public static Settings Default {
get {
return defaultInstance;
}
}
}
}
|
jimbojetset/Steganography
|
Steganography/Properties/Settings.Designer.cs
|
C#
|
gpl-2.0
| 1,070
|
<?php
/**
* @file
* Drupal site-specific configuration file.
*
* IMPORTANT NOTE:
* This file may have been set to read-only by the Drupal installation program.
* If you make changes to this file, be sure to protect it again after making
* your modifications. Failure to remove write permissions to this file is a
* security risk.
*
* The configuration file to be loaded is based upon the rules below. However
* if the multisite aliasing file named sites/sites.php is present, it will be
* loaded, and the aliases in the array $sites will override the default
* directory rules below. See sites/example.sites.php for more information about
* aliases.
*
* The configuration directory will be discovered by stripping the website's
* hostname from left to right and pathname from right to left. The first
* configuration file found will be used and any others will be ignored. If no
* other configuration file is found then the default configuration file at
* 'sites/default' will be used.
*
* For example, for a fictitious site installed at
* http://www.drupal.org:8080/mysite/test/, the 'settings.php' file is searched
* for in the following directories:
*
* - sites/8080.www.drupal.org.mysite.test
* - sites/www.drupal.org.mysite.test
* - sites/drupal.org.mysite.test
* - sites/org.mysite.test
*
* - sites/8080.www.drupal.org.mysite
* - sites/www.drupal.org.mysite
* - sites/drupal.org.mysite
* - sites/org.mysite
*
* - sites/8080.www.drupal.org
* - sites/www.drupal.org
* - sites/drupal.org
* - sites/org
*
* - sites/default
*
* Note that if you are installing on a non-standard port number, prefix the
* hostname with that number. For example,
* http://www.drupal.org:8080/mysite/test/ could be loaded from
* sites/8080.www.drupal.org.mysite.test/.
*
* @see example.sites.php
* @see conf_path()
*/
/**
* Database settings:
*
* The $databases array specifies the database connection or
* connections that Drupal may use. Drupal is able to connect
* to multiple databases, including multiple types of databases,
* during the same request.
*
* Each database connection is specified as an array of settings,
* similar to the following:
* @code
* array(
* 'driver' => 'mysql',
* 'database' => 'databasename',
* 'username' => 'username',
* 'password' => 'password',
* 'host' => 'localhost',
* 'port' => 3306,
* 'prefix' => 'myprefix_',
* 'collation' => 'utf8_general_ci',
* );
* @endcode
*
* The "driver" property indicates what Drupal database driver the
* connection should use. This is usually the same as the name of the
* database type, such as mysql or sqlite, but not always. The other
* properties will vary depending on the driver. For SQLite, you must
* specify a database file name in a directory that is writable by the
* webserver. For most other drivers, you must specify a
* username, password, host, and database name.
*
* Some database engines support transactions. In order to enable
* transaction support for a given database, set the 'transactions' key
* to TRUE. To disable it, set it to FALSE. Note that the default value
* varies by driver. For MySQL, the default is FALSE since MyISAM tables
* do not support transactions.
*
* For each database, you may optionally specify multiple "target" databases.
* A target database allows Drupal to try to send certain queries to a
* different database if it can but fall back to the default connection if not.
* That is useful for master/slave replication, as Drupal may try to connect
* to a slave server when appropriate and if one is not available will simply
* fall back to the single master server.
*
* The general format for the $databases array is as follows:
* @code
* $databases['default']['default'] = $info_array;
* $databases['default']['slave'][] = $info_array;
* $databases['default']['slave'][] = $info_array;
* $databases['extra']['default'] = $info_array;
* @endcode
*
* In the above example, $info_array is an array of settings described above.
* The first line sets a "default" database that has one master database
* (the second level default). The second and third lines create an array
* of potential slave databases. Drupal will select one at random for a given
* request as needed. The fourth line creates a new database with a name of
* "extra".
*
* For a single database configuration, the following is sufficient:
* @code
* $databases['default']['default'] = array(
* 'driver' => 'mysql',
* 'database' => 'databasename',
* 'username' => 'username',
* 'password' => 'password',
* 'host' => 'localhost',
* 'prefix' => 'main_',
* 'collation' => 'utf8_general_ci',
* );
* @endcode
*
* You can optionally set prefixes for some or all database table names
* by using the 'prefix' setting. If a prefix is specified, the table
* name will be prepended with its value. Be sure to use valid database
* characters only, usually alphanumeric and underscore. If no prefixes
* are desired, leave it as an empty string ''.
*
* To have all database names prefixed, set 'prefix' as a string:
* @code
* 'prefix' => 'main_',
* @endcode
* To provide prefixes for specific tables, set 'prefix' as an array.
* The array's keys are the table names and the values are the prefixes.
* The 'default' element is mandatory and holds the prefix for any tables
* not specified elsewhere in the array. Example:
* @code
* 'prefix' => array(
* 'default' => 'main_',
* 'users' => 'shared_',
* 'sessions' => 'shared_',
* 'role' => 'shared_',
* 'authmap' => 'shared_',
* ),
* @endcode
* You can also use a reference to a schema/database as a prefix. This may be
* useful if your Drupal installation exists in a schema that is not the default
* or you want to access several databases from the same code base at the same
* time.
* Example:
* @code
* 'prefix' => array(
* 'default' => 'main.',
* 'users' => 'shared.',
* 'sessions' => 'shared.',
* 'role' => 'shared.',
* 'authmap' => 'shared.',
* );
* @endcode
* NOTE: MySQL and SQLite's definition of a schema is a database.
*
* Advanced users can add or override initial commands to execute when
* connecting to the database server, as well as PDO connection settings. For
* example, to enable MySQL SELECT queries to exceed the max_join_size system
* variable, and to reduce the database connection timeout to 5 seconds:
*
* @code
* $databases['default']['default'] = array(
* 'init_commands' => array(
* 'big_selects' => 'SET SQL_BIG_SELECTS=1',
* ),
* 'pdo' => array(
* PDO::ATTR_TIMEOUT => 5,
* ),
* );
* @endcode
*
* WARNING: These defaults are designed for database portability. Changing them
* may cause unexpected behavior, including potential data loss.
*
* @see DatabaseConnection_mysql::__construct
* @see DatabaseConnection_pgsql::__construct
* @see DatabaseConnection_sqlite::__construct
*
* Database configuration format:
* @code
* $databases['default']['default'] = array(
* 'driver' => 'mysql',
* 'database' => 'databasename',
* 'username' => 'username',
* 'password' => 'password',
* 'host' => 'localhost',
* 'prefix' => '',
* );
* $databases['default']['default'] = array(
* 'driver' => 'pgsql',
* 'database' => 'databasename',
* 'username' => 'username',
* 'password' => 'password',
* 'host' => 'localhost',
* 'prefix' => '',
* );
* $databases['default']['default'] = array(
* 'driver' => 'sqlite',
* 'database' => '/path/to/databasefilename',
* );
* @endcode
*/
$databases = array (
'default' =>
array (
'default' =>
array (
'database' => 'shenzhi',
'username' => 'root',
'password' => '',
'host' => 'localhost',
'port' => '',
'driver' => 'mysql',
'prefix' => '',
),
),
);
/**
* Access control for update.php script.
*
* If you are updating your Drupal installation using the update.php script but
* are not logged in using either an account with the "Administer software
* updates" permission or the site maintenance account (the account that was
* created during installation), you will need to modify the access check
* statement below. Change the FALSE to a TRUE to disable the access check.
* After finishing the upgrade, be sure to open this file again and change the
* TRUE back to a FALSE!
*/
$update_free_access = FALSE;
/**
* Salt for one-time login links and cancel links, form tokens, etc.
*
* This variable will be set to a random value by the installer. All one-time
* login links will be invalidated if the value is changed. Note that if your
* site is deployed on a cluster of web servers, you must ensure that this
* variable has the same value on each server. If this variable is empty, a hash
* of the serialized database credentials will be used as a fallback salt.
*
* For enhanced security, you may set this variable to a value using the
* contents of a file outside your docroot that is never saved together
* with any backups of your Drupal files and database.
*
* Example:
* $drupal_hash_salt = file_get_contents('/home/example/salt.txt');
*
*/
$drupal_hash_salt = 'WKQH1DYn97HB_Fvx2QSYTMYu-LJHY6VsZmqQbW9S0_I';
/**
* Base URL (optional).
*
* If Drupal is generating incorrect URLs on your site, which could
* be in HTML headers (links to CSS and JS files) or visible links on pages
* (such as in menus), uncomment the Base URL statement below (remove the
* leading hash sign) and fill in the absolute URL to your Drupal installation.
*
* You might also want to force users to use a given domain.
* See the .htaccess file for more information.
*
* Examples:
* $base_url = 'http://www.example.com';
* $base_url = 'http://www.example.com:8888';
* $base_url = 'http://www.example.com/drupal';
* $base_url = 'https://www.example.com:8888/drupal';
*
* It is not allowed to have a trailing slash; Drupal will add it
* for you.
*/
# $base_url = 'http://www.example.com'; // NO trailing slash!
/**
* PHP settings:
*
* To see what PHP settings are possible, including whether they can be set at
* runtime (by using ini_set()), read the PHP documentation:
* http://www.php.net/manual/en/ini.list.php
* See drupal_environment_initialize() in includes/bootstrap.inc for required
* runtime settings and the .htaccess file for non-runtime settings. Settings
* defined there should not be duplicated here so as to avoid conflict issues.
*/
/**
* Some distributions of Linux (most notably Debian) ship their PHP
* installations with garbage collection (gc) disabled. Since Drupal depends on
* PHP's garbage collection for clearing sessions, ensure that garbage
* collection occurs by using the most common settings.
*/
ini_set('session.gc_probability', 1);
ini_set('session.gc_divisor', 100);
/**
* Set session lifetime (in seconds), i.e. the time from the user's last visit
* to the active session may be deleted by the session garbage collector. When
* a session is deleted, authenticated users are logged out, and the contents
* of the user's $_SESSION variable is discarded.
*/
ini_set('session.gc_maxlifetime', 200000);
/**
* Set session cookie lifetime (in seconds), i.e. the time from the session is
* created to the cookie expires, i.e. when the browser is expected to discard
* the cookie. The value 0 means "until the browser is closed".
*/
ini_set('session.cookie_lifetime', 2000000);
/**
* If you encounter a situation where users post a large amount of text, and
* the result is stripped out upon viewing but can still be edited, Drupal's
* output filter may not have sufficient memory to process it. If you
* experience this issue, you may wish to uncomment the following two lines
* and increase the limits of these variables. For more information, see
* http://php.net/manual/en/pcre.configuration.php.
*/
# ini_set('pcre.backtrack_limit', 200000);
# ini_set('pcre.recursion_limit', 200000);
/**
* Drupal automatically generates a unique session cookie name for each site
* based on its full domain name. If you have multiple domains pointing at the
* same Drupal site, you can either redirect them all to a single domain (see
* comment in .htaccess), or uncomment the line below and specify their shared
* base domain. Doing so assures that users remain logged in as they cross
* between your various domains. Make sure to always start the $cookie_domain
* with a leading dot, as per RFC 2109.
*/
# $cookie_domain = '.example.com';
/**
* Variable overrides:
*
* To override specific entries in the 'variable' table for this site,
* set them here. You usually don't need to use this feature. This is
* useful in a configuration file for a vhost or directory, rather than
* the default settings.php. Any configuration setting from the 'variable'
* table can be given a new value. Note that any values you provide in
* these variable overrides will not be modifiable from the Drupal
* administration interface.
*
* The following overrides are examples:
* - site_name: Defines the site's name.
* - theme_default: Defines the default theme for this site.
* - anonymous: Defines the human-readable name of anonymous users.
* Remove the leading hash signs to enable.
*/
# $conf['site_name'] = 'My Drupal site';
# $conf['theme_default'] = 'garland';
# $conf['anonymous'] = 'Visitor';
/**
* A custom theme can be set for the offline page. This applies when the site
* is explicitly set to maintenance mode through the administration page or when
* the database is inactive due to an error. It can be set through the
* 'maintenance_theme' key. The template file should also be copied into the
* theme. It is located inside 'modules/system/maintenance-page.tpl.php'.
* Note: This setting does not apply to installation and update pages.
*/
# $conf['maintenance_theme'] = 'bartik';
/**
* Reverse Proxy Configuration:
*
* Reverse proxy servers are often used to enhance the performance
* of heavily visited sites and may also provide other site caching,
* security, or encryption benefits. In an environment where Drupal
* is behind a reverse proxy, the real IP address of the client should
* be determined such that the correct client IP address is available
* to Drupal's logging, statistics, and access management systems. In
* the most simple scenario, the proxy server will add an
* X-Forwarded-For header to the request that contains the client IP
* address. However, HTTP headers are vulnerable to spoofing, where a
* malicious client could bypass restrictions by setting the
* X-Forwarded-For header directly. Therefore, Drupal's proxy
* configuration requires the IP addresses of all remote proxies to be
* specified in $conf['reverse_proxy_addresses'] to work correctly.
*
* Enable this setting to get Drupal to determine the client IP from
* the X-Forwarded-For header (or $conf['reverse_proxy_header'] if set).
* If you are unsure about this setting, do not have a reverse proxy,
* or Drupal operates in a shared hosting environment, this setting
* should remain commented out.
*
* In order for this setting to be used you must specify every possible
* reverse proxy IP address in $conf['reverse_proxy_addresses'].
* If a complete list of reverse proxies is not available in your
* environment (for example, if you use a CDN) you may set the
* $_SERVER['REMOTE_ADDR'] variable directly in settings.php.
* Be aware, however, that it is likely that this would allow IP
* address spoofing unless more advanced precautions are taken.
*/
# $conf['reverse_proxy'] = TRUE;
/**
* Specify every reverse proxy IP address in your environment.
* This setting is required if $conf['reverse_proxy'] is TRUE.
*/
# $conf['reverse_proxy_addresses'] = array('a.b.c.d', ...);
/**
* Set this value if your proxy server sends the client IP in a header
* other than X-Forwarded-For.
*/
# $conf['reverse_proxy_header'] = 'HTTP_X_CLUSTER_CLIENT_IP';
/**
* Page caching:
*
* By default, Drupal sends a "Vary: Cookie" HTTP header for anonymous page
* views. This tells a HTTP proxy that it may return a page from its local
* cache without contacting the web server, if the user sends the same Cookie
* header as the user who originally requested the cached page. Without "Vary:
* Cookie", authenticated users would also be served the anonymous page from
* the cache. If the site has mostly anonymous users except a few known
* editors/administrators, the Vary header can be omitted. This allows for
* better caching in HTTP proxies (including reverse proxies), i.e. even if
* clients send different cookies, they still get content served from the cache.
* However, authenticated users should access the site directly (i.e. not use an
* HTTP proxy, and bypass the reverse proxy if one is used) in order to avoid
* getting cached pages from the proxy.
*/
# $conf['omit_vary_cookie'] = TRUE;
/**
* CSS/JS aggregated file gzip compression:
*
* By default, when CSS or JS aggregation and clean URLs are enabled Drupal will
* store a gzip compressed (.gz) copy of the aggregated files. If this file is
* available then rewrite rules in the default .htaccess file will serve these
* files to browsers that accept gzip encoded content. This allows pages to load
* faster for these users and has minimal impact on server load. If you are
* using a webserver other than Apache httpd, or a caching reverse proxy that is
* configured to cache and compress these files itself you may want to uncomment
* one or both of the below lines, which will prevent gzip files being stored.
*/
# $conf['css_gzip_compression'] = FALSE;
# $conf['js_gzip_compression'] = FALSE;
/**
* String overrides:
*
* To override specific strings on your site with or without enabling the Locale
* module, add an entry to this list. This functionality allows you to change
* a small number of your site's default English language interface strings.
*
* Remove the leading hash signs to enable.
*/
# $conf['locale_custom_strings_en'][''] = array(
# 'forum' => 'Discussion board',
# '@count min' => '@count minutes',
# );
/**
*
* IP blocking:
*
* To bypass database queries for denied IP addresses, use this setting.
* Drupal queries the {blocked_ips} table by default on every page request
* for both authenticated and anonymous users. This allows the system to
* block IP addresses from within the administrative interface and before any
* modules are loaded. However on high traffic websites you may want to avoid
* this query, allowing you to bypass database access altogether for anonymous
* users under certain caching configurations.
*
* If using this setting, you will need to add back any IP addresses which
* you may have blocked via the administrative interface. Each element of this
* array represents a blocked IP address. Uncommenting the array and leaving it
* empty will have the effect of disabling IP blocking on your site.
*
* Remove the leading hash signs to enable.
*/
# $conf['blocked_ips'] = array(
# 'a.b.c.d',
# );
/**
* Fast 404 pages:
*
* Drupal can generate fully themed 404 pages. However, some of these responses
* are for images or other resource files that are not displayed to the user.
* This can waste bandwidth, and also generate server load.
*
* The options below return a simple, fast 404 page for URLs matching a
* specific pattern:
* - 404_fast_paths_exclude: A regular expression to match paths to exclude,
* such as images generated by image styles, or dynamically-resized images.
* If you need to add more paths, you can add '|path' to the expression.
* - 404_fast_paths: A regular expression to match paths that should return a
* simple 404 page, rather than the fully themed 404 page. If you don't have
* any aliases ending in htm or html you can add '|s?html?' to the expression.
* - 404_fast_html: The html to return for simple 404 pages.
*
* Add leading hash signs if you would like to disable this functionality.
*/
$conf['404_fast_paths_exclude'] = '/\/(?:styles)\//';
$conf['404_fast_paths'] = '/\.(?:txt|png|gif|jpe?g|css|js|ico|swf|flv|cgi|bat|pl|dll|exe|asp)$/i';
$conf['404_fast_html'] = '<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN" "http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd"><html xmlns="http://www.w3.org/1999/xhtml"><head><title>404 Not Found</title></head><body><h1>Not Found</h1><p>The requested URL "@path" was not found on this server.</p></body></html>';
/**
* By default the page request process will return a fast 404 page for missing
* files if they match the regular expression set in '404_fast_paths' and not
* '404_fast_paths_exclude' above. 404 errors will simultaneously be logged in
* the Drupal system log.
*
* You can choose to return a fast 404 page earlier for missing pages (as soon
* as settings.php is loaded) by uncommenting the line below. This speeds up
* server response time when loading 404 error pages and prevents the 404 error
* from being logged in the Drupal system log. In order to prevent valid pages
* such as image styles and other generated content that may match the
* '404_fast_html' regular expression from returning 404 errors, it is necessary
* to add them to the '404_fast_paths_exclude' regular expression above. Make
* sure that you understand the effects of this feature before uncommenting the
* line below.
*/
# drupal_fast_404();
/**
* External access proxy settings:
*
* If your site must access the Internet via a web proxy then you can enter
* the proxy settings here. Currently only basic authentication is supported
* by using the username and password variables. The proxy_user_agent variable
* can be set to NULL for proxies that require no User-Agent header or to a
* non-empty string for proxies that limit requests to a specific agent. The
* proxy_exceptions variable is an array of host names to be accessed directly,
* not via proxy.
*/
# $conf['proxy_server'] = '';
# $conf['proxy_port'] = 8080;
# $conf['proxy_username'] = '';
# $conf['proxy_password'] = '';
# $conf['proxy_user_agent'] = '';
# $conf['proxy_exceptions'] = array('127.0.0.1', 'localhost');
/**
* Authorized file system operations:
*
* The Update manager module included with Drupal provides a mechanism for
* site administrators to securely install missing updates for the site
* directly through the web user interface. On securely-configured servers,
* the Update manager will require the administrator to provide SSH or FTP
* credentials before allowing the installation to proceed; this allows the
* site to update the new files as the user who owns all the Drupal files,
* instead of as the user the webserver is running as. On servers where the
* webserver user is itself the owner of the Drupal files, the administrator
* will not be prompted for SSH or FTP credentials (note that these server
* setups are common on shared hosting, but are inherently insecure).
*
* Some sites might wish to disable the above functionality, and only update
* the code directly via SSH or FTP themselves. This setting completely
* disables all functionality related to these authorized file operations.
*
* @see http://drupal.org/node/244924
*
* Remove the leading hash signs to disable.
*/
# $conf['allow_authorize_operations'] = FALSE;
|
tonysh518/shenzhi
|
sites/default/settings.php
|
PHP
|
gpl-2.0
| 23,496
|
/*
* linux/include/asm-arm/mmu_context.h
*
* Copyright (C) 1996 Russell King.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* Changelog:
* 27-06-1996 RMK Created
*/
#ifndef __ASM_ARM_MMU_CONTEXT_H
#define __ASM_ARM_MMU_CONTEXT_H
#include <linux/compiler.h>
#include <asm/cacheflush.h>
#include <asm/proc-fns.h>
#include <asm-generic/mm_hooks.h>
void __check_kvm_seq(struct mm_struct *mm);
#ifdef CONFIG_CPU_HAS_ASID
/*
* On ARMv6, we have the following structure in the Context ID:
*
* 31 7 0
* +-------------------------+-----------+
* | process ID | ASID |
* +-------------------------+-----------+
* | context ID |
* +-------------------------------------+
*
* The ASID is used to tag entries in the CPU caches and TLBs.
* The context ID is used by debuggers and trace logic, and
* should be unique within all running processes.
*/
#define ASID_BITS 8
#define ASID_MASK ((~0) << ASID_BITS)
#define ASID_FIRST_VERSION (1 << ASID_BITS)
extern unsigned int cpu_last_asid;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm);
void __new_context(struct mm_struct *mm);
static inline void check_context(struct mm_struct *mm)
{
if (unlikely((mm->context.id ^ cpu_last_asid) >> ASID_BITS))
__new_context(mm);
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
}
#define init_new_context(tsk,mm) (__init_new_context(tsk,mm),0)
#else
#ifdef CONFIG_MMU
static inline void check_context(struct mm_struct *mm)
{
if (unlikely(mm->context.kvm_seq != init_mm.context.kvm_seq))
__check_kvm_seq(mm);
}
#endif /* CONFIG_MMU */
#define init_new_context(tsk,mm) 0
#endif
#define destroy_context(mm) do { } while(0)
/*
* This is called when "tsk" is about to enter lazy TLB mode.
*
* mm: describes the currently active mm context
* tsk: task which is entering lazy tlb
* cpu: cpu number which is entering lazy tlb
*
* tsk->mm will be NULL
*/
static inline void
enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
/*
* This is the actual mm switch as far as the scheduler
* is concerned. No registers are touched. We avoid
* calling the CPU specific function when the mm hasn't
* actually changed.
*/
static inline void
switch_mm(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk)
{
#ifdef CONFIG_MMU
unsigned int cpu = smp_processor_id();
if (!cpu_test_and_set(cpu, next->cpu_vm_mask) || prev != next) {
check_context(next);
cpu_switch_mm(next->pgd, next);
if (cache_is_vivt())
cpu_clear(cpu, prev->cpu_vm_mask);
}
#endif
}
#define deactivate_mm(tsk,mm) do { } while (0)
#define activate_mm(prev,next) switch_mm(prev, next, NULL)
#endif
|
kidmaple/CoolWall
|
linux-2.6.x/include/asm-arm/mmu_context.h
|
C
|
gpl-2.0
| 2,914
|
#ifndef RGRAPHICSSCENEQV_H
#define RGRAPHICSSCENEQV_H
#include <QGraphicsScene>
#include <QStack>
#include "RGraphicsScene.h"
#include "RLine.h"
#include "RPoint.h"
#include "RCircle.h"
#include "RArc.h"
class RDocumentInterface;
class RGraphicsItem;
class RGraphicsViewQV;
/**
* Graphics scene. This class implements the exporter interface to
* export entities into a scene.
* \scriptable
*/
class RGraphicsSceneQV : public QGraphicsScene, public RGraphicsScene {
Q_OBJECT
public:
RGraphicsSceneQV(RDocumentInterface& di, QObject* parent=0);
~RGraphicsSceneQV();
virtual void exportLine(const RVector& p1, const RVector& p2);
virtual void exportPoint(const RPoint& point) {
}
virtual void exportCircle(const RCircle& circle) {
}
virtual void exportArc(const RArc& arc, double /*offset*/= RNANDOUBLE) {
}
virtual void exportArcSegment(const RArc& arc) {
}
void highlightItem(QList<QGraphicsItem*> items);
void highlightItem(QGraphicsItem* item);
virtual void exportLine(const RLine& line, double offset = RNANDOUBLE);
virtual void exportLineSegment(const RLine& line);
virtual void exportTriangle(const RTriangle& triangle);
virtual void highlightEntity(REntity& entity);
virtual void highlightReferencePoint(const RVector& position);
signals:
void mouseMoved(const QPointF& pos, const QPointF& relPos);
protected:
virtual void mouseMoveEvent(QGraphicsSceneMouseEvent* event);
virtual void wheelEvent(QGraphicsSceneWheelEvent* event);
virtual void mousePressEvent(QGraphicsSceneMouseEvent* event);
virtual void mouseReleaseEvent(QGraphicsSceneMouseEvent* event);
RGraphicsViewQV* getGraphicsView(QGraphicsSceneEvent* event);
RGraphicsItem* highlightedItem;
};
Q_DECLARE_METATYPE(RGraphicsSceneQV*)
#endif
|
biluna/biluna
|
gv/src/gui/qt/qgraphicsview/RGraphicsSceneQV.h
|
C
|
gpl-2.0
| 1,833
|
/*****************************************************************************
1 Í·Îļþ°üº¬
*****************************************************************************/
#include "vos.h"
#include "pslog.h"
#include "PsCommonDef.h"
#include "rabminclude.h"
#include "NasRabmMain.h"
#include "NasRabmMsgProc.h"
#include "AtRabmInterface.h"
#include "NasUtranCtrlCommFunc.h"
#include "NasNvInterface.h"
#include "TafNvInterface.h"
#ifdef __cplusplus
#if __cplusplus
extern "C" {
#endif
#endif
#define THIS_FILE_ID PS_FILE_ID_NAS_RABM_MGR_C
/*****************************************************************************
2 È«¾Ö±äÁ¿¶¨Òå
*****************************************************************************/
/* µ±Ç°ÏµÍ³Ä£Ê½ */
GMM_RABM_NET_RAT_ENUM_UINT32 g_enNasRabmSysMode;
/* »ùÓÚϵͳģʽµÄº¯Êý´¦ÀíÖ¸Õë±í */
NAS_RABM_PROC_FUNC_TBL_STRU *g_pstNasRabmProcFuncTbl[NAS_MML_NET_RAT_TYPE_BUTT];
NAS_RABM_FASTDORM_CTX_STRU g_stNasRabmFastDormCtx;
/*****************************************************************************
3 º¯ÊýʵÏÖ
*****************************************************************************/
VOS_VOID NAS_RABM_RegProcFuncTbl(
GMM_RABM_NET_RAT_ENUM_UINT32 enSysMode,
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFucTbl
)
{
/* ϵͳģʽÓÐЧÐÔ¼ì²é */
if (NAS_MML_NET_RAT_TYPE_BUTT == enSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RegProcFuncTbl: Wrong system mode.");
return;
}
NAS_RABM_SetProFuncTblAddr(enSysMode, pstProcFucTbl);
}
VOS_VOID NAS_RABM_CtxInit(VOS_VOID)
{
VOS_UINT32 i;
#if (FEATURE_ON == FEATURE_LTE)
/* ³õʼ»¯ÏµÍ³Ä£Ê½: ĬÈÏΪLTE */
NAS_RABM_SetSysMode(NAS_MML_NET_RAT_TYPE_LTE);
#else
/* ³õʼ»¯ÏµÍ³Ä£Ê½: ĬÈÏΪWCDMA */
NAS_RABM_SetSysMode(NAS_MML_NET_RAT_TYPE_WCDMA);
#endif
/* ³õʼ»¯º¯Êý´¦ÀíÖ¸Õë±íΪNULL */
for ( i = 0; i < NAS_MML_NET_RAT_TYPE_BUTT; i++)
{
NAS_RABM_SetProFuncTblAddr(i, VOS_NULL_PTR);
}
/* ³õʼ»¯RABMʵÌå */
Rabm_Init();
NAS_Rabm2GInit();
/* ³õʼ»¯¹ÒÆð±êÖ¾ */
NAS_RABM_ClearDataSuspendFlg();
/* ³õʼ»¯´¦Àíº¯Êý±í */
NAS_RABM_InitProcFuncTbl();
}
VOS_VOID NAS_RABM_MsgProc(
struct MsgCB *pMsg
)
{
MSG_HEADER_STRU *pstMsg;
struct MsgCB *pstDestMsg;
pstDestMsg = VOS_NULL_PTR;
/* Èë¿ÚÏûÏ¢·Ç¿Õ¼ì²é */
if (VOS_NULL_PTR == pMsg)
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_MsgProc: Message is NULL.");
return;
}
/* Ö§³ÖTD-SCDMAÌØÐÔʱ£¬Ðè¶ÔÏûÏ¢½øÐÐÊÊÅä´¦Àí£¬½«TDÄ£µÄ·¢ËÍPIDÊÊÅäΪWÄ£µÄPID */
if ( VOS_TRUE == NAS_UTRANCTRL_MsgProc(pMsg, &pstDestMsg) )
{
/* ÏûÏ¢ÔÚUTRANCTRLÄ£¿é´¦ÀíÍê³É£¬Ö±½Ó·µ»Ø£¬²»½øÈëºóÐø´¦Àí */
return;
}
/* Ìæ»»ÏûÏ¢Ö¸Õë */
pstMsg = (MSG_HEADER_STRU*)pstDestMsg;
/* ´¦ÀíÏûÏ¢ */
switch (pstMsg->ulSenderPid)
{
case WUEPS_PID_SM:
NAS_RABM_RcvSmMsg(pstDestMsg);
break;
case WUEPS_PID_GMM:
NAS_RABM_RcvGmmMsg(pstDestMsg);
break;
case WUEPS_PID_AT:
NAS_RABM_RcvAtMsg(pstDestMsg);
break;
case UEPS_PID_CDS:
NAS_RABM_RcvCdsMsg(pstDestMsg);
break;
default:
NAS_RABM_RcvCommMsg(pstDestMsg);
break;
}
}
VOS_VOID NAS_RABM_RcvSmMsg(
struct MsgCB *pMsg
)
{
MSG_HEADER_STRU *pstMsg;
pstMsg = (MSG_HEADER_STRU*)pMsg;
switch (pstMsg->ulMsgName)
{
case RABMSM_ACTIVATE_IND:
NAS_RABM_RcvSmActivateInd((RABMSM_ACTIVATE_IND_STRU*)pMsg);
break;
case RABMSM_DEACTIVATE_IND:
NAS_RABM_RcvSmDeactivateInd((RABMSM_DEACTIVATE_IND_STRU*)pMsg);
break;
case RABMSM_MODIFY_IND:
NAS_RABM_RcvSmModifyInd((RABMSM_MODIFY_IND_STRU*)pMsg);
break;
#if (FEATURE_ON == FEATURE_LTE)
case ID_SM_RABM_BEARER_ACTIVATE_IND:
NAS_RABM_RcvSmBearerActivateInd((SM_RABM_BEARER_ACTIVATE_IND_STRU*)pMsg);
break;
case ID_SM_RABM_BEARER_DEACTIVATE_IND:
NAS_RABM_RcvSmBearerDeactivateInd((SM_RABM_BEARER_DEACTIVATE_IND_STRU*)pMsg);
break;
case ID_SM_RABM_BEARER_MODIFY_IND:
NAS_RABM_RcvSmBearerModifyInd((SM_RABM_BEARER_MODIFY_IND_STRU*)pMsg);
break;
#endif
default:
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmMsg: Unknown message from SM.");
break;
}
}
VOS_VOID NAS_RABM_RcvGmmMsg(
struct MsgCB *pMsg
)
{
MSG_HEADER_STRU *pstMsg;
pstMsg = (MSG_HEADER_STRU*)pMsg;
switch (pstMsg->ulMsgName)
{
case ID_GMM_RABM_REESTABLISH_CNF:
NAS_RABM_RcvGmmReestablishCnf((GMMRABM_REESTABLISH_CNF_STRU*)pMsg);
break;
case ID_GMM_RABM_SYS_SRV_CHG_IND:
NAS_RABM_RcvGmmSysSrvChgInd((GMM_RABM_SYS_SRV_CHG_IND_STRU*)pMsg);
break;
case ID_GMM_RABM_MML_PROC_STATUS_QRY_CNF:
NAS_RABM_RcvGmmMmlProcStatusQryCnf(pMsg);
break;
default:
NAS_RABM_RcvCommMsg(pMsg);
break;
}
}
VOS_VOID NAS_RABM_RcvSmActivateInd(
RABMSM_ACTIVATE_IND_STRU *pstSmActivateInd
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmActivateInd: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pSmActivateIndProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pSmActivateIndProcFunc(pstSmActivateInd);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmActivateInd: pSmActivateIndProcFunc is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmActivateInd: Msg is received in wrong system mode.");
}
}
VOS_VOID NAS_RABM_RcvSmDeactivateInd(
RABMSM_DEACTIVATE_IND_STRU *pstSmDeactivateInd
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmDeactivateInd: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pSmDeactivateIndProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pSmDeactivateIndProcFunc(pstSmDeactivateInd);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmDeactivateInd: pSmDeactivateIndProcFunc is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmDeactivateInd: Msg is received in wrong system mode.");
}
}
VOS_VOID NAS_RABM_RcvSmModifyInd(
RABMSM_MODIFY_IND_STRU *pstSmModifyInd
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmModifyInd: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pSmModifyIndProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pSmModifyIndProcFunc(pstSmModifyInd);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmModifyInd: pSmModifyIndProcFunc is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSmModifyInd: Msg is received in wrong system mode.");
}
}
#if (FEATURE_ON == FEATURE_LTE)
VOS_VOID NAS_RABM_RcvSmBearerActivateInd(
SM_RABM_BEARER_ACTIVATE_IND_STRU *pstSmBearerActivateInd
)
{
RABM_ENTITY_PS_STRU *pstPsEnt;
VOS_UINT8 ucEntId;
/* »ñÈ¡PSÓòRABMʵÌåÖ¸Õë */
ucEntId = (VOS_UINT8)(pstSmBearerActivateInd->ulNsapi - RABM_NSAPI_OFFSET);
pstPsEnt = NAS_RABM_GetWPsEntAddr(ucEntId);
/* ¸üÐÂQOS */
pstPsEnt->QoS.ulQosLength = pstSmBearerActivateInd->stQos.ulLength;
PS_MEM_CPY(pstPsEnt->QoS.aucQosValue,
pstSmBearerActivateInd->stQos.aucQosValue,
NAS_RABM_MAX_QOS_LEN);
/* ¸üÐÂPPP±êʶ */
pstPsEnt->ucPppFlg = RABM_SM_IP_PROT;
/* ´´½¨RAB_MAPʵÌå */
NAS_RABM_CreateRabMapEntity((VOS_UINT8)pstSmBearerActivateInd->ulNsapi,
(VOS_UINT8)pstSmBearerActivateInd->ulLinkdNsapi,
(VOS_UINT8)pstSmBearerActivateInd->ulNsapi);
/* ¸üÐÂRABM״̬ÖÁ¹ÒÆð״̬ */
RABM_SetWState(ucEntId, RABM_DATA_TRANSFER_STOP);
return;
}
VOS_VOID NAS_RABM_RcvSmBearerModifyInd(
SM_RABM_BEARER_MODIFY_IND_STRU *pstSmBearerModifyInd
)
{
RABM_ENTITY_PS_STRU *pstPsEnt;
VOS_UINT8 ucEntId;
/* »ñÈ¡PSÓòRABMʵÌåË÷Òý */
ucEntId = (VOS_UINT8)(pstSmBearerModifyInd->ulNsapi - RABM_NSAPI_OFFSET);
/* ¼ì²éPSÓòRABMʵÌå״̬ÊÇ·ñÕýÈ· */
if (RABM_NULL == NAS_RABM_GetWPsEntState(ucEntId))
{
NAS_ERROR_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvSmBearerModifyInd: Wrong state of RABM entity. NSAPI:",
pstSmBearerModifyInd->ulNsapi);
return;
}
/* »ñÈ¡PSÓòRABMʵÌåÖ¸Õë */
pstPsEnt = NAS_RABM_GetWPsEntAddr(ucEntId);
/* ¸üÐÂQOS */
pstPsEnt->QoS.ulQosLength = pstSmBearerModifyInd->stQos.ulLength;
PS_MEM_CPY(pstPsEnt->QoS.aucQosValue,
pstSmBearerModifyInd->stQos.aucQosValue,
NAS_RABM_MAX_QOS_LEN);
return;
}
VOS_VOID NAS_RABM_RcvSmBearerDeactivateInd(
SM_RABM_BEARER_DEACTIVATE_IND_STRU *pstSmBearerDeactivateInd
)
{
VOS_UINT8 ucEntId;
/* »ñÈ¡PSÓòRABMʵÌåÖ¸Õë */
ucEntId = (VOS_UINT8)(pstSmBearerDeactivateInd->ulNsapi - RABM_NSAPI_OFFSET);
if (NAS_MML_NET_RAT_TYPE_GSM == NAS_RABM_GetCurrentSysMode())
{
/* GʵÌå״̬ÖÃΪRABM_2G_NULL */
RABM_SetGState((RABM_NSAPI_ENUM)ucEntId, RABM_2G_NULL);
/* Çå³ýGʵÌåÐÅÏ¢ */
NAS_RABM_ClearRabmGPsEnt(ucEntId);
}
else
{
/* WʵÌå״̬ÖÃΪRABM_NULL */
RABM_SetWState(ucEntId, RABM_NULL);
/* Çå³ýGʵÌåÐÅÏ¢ */
NAS_RABM_ClearRabmWPsEnt(ucEntId);
}
/* ɾ³ýRAB_MAPʵÌå */
RABM_DelRabMap((VOS_UINT8)pstSmBearerDeactivateInd->ulNsapi);
}
#endif
VOS_VOID NAS_RABM_RcvGmmReestablishCnf(
GMMRABM_REESTABLISH_CNF_STRU *pstGmmReestablishCnf
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvGmmReestablishCnf: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pGmmReestablishCnfProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pGmmReestablishCnfProcFunc(pstGmmReestablishCnf);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvGmmReestablishCnf: pGmmReestablishCnfProcFunc is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvGmmReestablishCnf: Message is received in wrong system mode.");
}
}
VOS_VOID NAS_RABM_RcvGmmSysSrvChgInd(
GMM_RABM_SYS_SRV_CHG_IND_STRU *pstGmmSysSrvChgInd
)
{
GMM_RABM_NET_RAT_ENUM_UINT32 enOldSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enOldSysMode = NAS_RABM_GetCurrentSysMode();
/* ¸ù¾Ý¹ÒÆðָʾ±êÖ¾ÊÇ·ñÉèÖÃ, Ö´ÐÐ¹ÒÆð»ò»Ö¸´´¦Àí */
if (VOS_TRUE == pstGmmSysSrvChgInd->bDataSuspendFlg)
{
NAS_RABM_RcvDataSuspendInd(pstGmmSysSrvChgInd->bRatChangeFlg);
NAS_RABM_RcvSysModeChgInd(pstGmmSysSrvChgInd->enSysMode);
}
else
{
NAS_RABM_RcvSysModeChgInd(pstGmmSysSrvChgInd->enSysMode);
/* GMM֪ͨRABMÄ£¿é½øÐлָ´Ê±ÐèҪ֪ͨRABMÄ£¿éÊÇ·ñÐèÒª½øÐÐRABµÄ»Ö¸´£¬
RABMÄ£¿é½øÐлָ´Ê±£¬ÅжÏÊÇ·ñÐèÒªµÈ´ý½ÓÈë²ã½øÐÐRAB»Ö¸´ */
NAS_RABM_RcvDataResumeInd(pstGmmSysSrvChgInd->ucRebuildRabFlag);
}
NAS_RABM_SysModeChgProcFastDorm(enOldSysMode,pstGmmSysSrvChgInd->enSysMode);
}
VOS_VOID NAS_RABM_RcvDataSuspendInd(
VOS_BOOL bRatChangeFlg
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvDataSuspendInd: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pDataSuspendIndProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pDataSuspendIndProcFunc(bRatChangeFlg);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvDataSuspendInd: pDataSuspendIndProcFunc is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvDataSuspendInd: Message is received in wrong system mode.");
}
}
VOS_VOID NAS_RABM_RcvDataResumeInd(
VOS_UINT8 ucRebuildRabFlag
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvDataResumeInd: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pDataResumeIndProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pDataResumeIndProcFunc(ucRebuildRabFlag);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvDataResumeInd: pDataResumeIndProcFunc is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvDataResumeInd: Message is received in wrong system mode.");
}
}
VOS_VOID NAS_RABM_RcvSysModeChgInd(
GMM_RABM_NET_RAT_ENUM_UINT32 enNewSysMode
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enOldSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enOldSysMode = NAS_RABM_GetCurrentSysMode();
/* ¼ì²âϵͳģʽÓÐЧÐÔ, ÈôÎÞЧ, ²»¸üÐÂϵͳģʽ */
if (NAS_MML_NET_RAT_TYPE_BUTT == enNewSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSysModeChgInd: Invalid System Mode.");
return;
}
/* ¸üÐÂϵͳģʽ */
NAS_RABM_SetSysMode(enNewSysMode);
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enOldSysMode);
/* È·ÈϺ¯Êý´¦Àí±íÒѾע²á */
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pSysModeChgIndProcFunc)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pSysModeChgIndProcFunc(enOldSysMode, enNewSysMode);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSysModeChgInd: pDataResumeIndProcFunc is not registered.");
}
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_RcvSysModeChgInd: Message is received in wrong system mode.");
}
}
VOS_VOID NAS_RABM_RcvCommMsg(
struct MsgCB *pMsg
)
{
NAS_RABM_PROC_FUNC_TBL_STRU *pstProcFuncTbl;
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if (NAS_MML_NET_RAT_TYPE_BUTT == enCurrentSysMode)
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_ProcSysSrvChgInd: Invalid System Mode.");
return;
}
/* ¸ù¾Ýµ±Ç°ÏµÍ³Ä£Ê½»ñÈ¡¶ÔÓ¦µÄ´¦Àíº¯Êý±íµÄÖ¸Õë */
pstProcFuncTbl = NAS_RABM_GetProFuncTblAddr(enCurrentSysMode);
if (VOS_NULL_PTR != pstProcFuncTbl)
{
if (VOS_NULL_PTR != pstProcFuncTbl->pTaskEntry)
{
/* µ÷ÓöÔӦģʽµÄ´¦Àíº¯Êý */
pstProcFuncTbl->pTaskEntry(pMsg);
}
else
{
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_ProcessCommMsg: TaskEntry is not registered.");
}
}
else
{
NAS_ERROR_LOG(WUEPS_PID_RABM,
"NAS_RABM_ProcessCommMsg: Msg is received in wrong system mode.");
}
}
VOS_UINT32 NAS_RABM_IsPdpActiveInWCDMA(VOS_VOID)
{
VOS_UINT32 ulEntId;
VOS_UINT8 ucState;
for (ulEntId = 0; ulEntId < RABM_3G_PS_MAX_ENT_NUM; ulEntId++)
{
ucState = NAS_RABM_GetWPsEntState(ulEntId);
if ( (RABM_NSAPI_ACTIVE_NO_RAB == ucState)
|| (RABM_NSAPI_ACTIVE_WITH_RAB == ucState)
|| (RABM_DATA_TRANSFER_STOP == ucState) )
{
return VOS_TRUE;
}
}
return VOS_FALSE;
}
VOS_VOID NAS_RABM_ConvertAtFasTDormTypeToWrrFasTDormType(
AT_RABM_FASTDORM_OPERATION_ENUM_UINT32 enAtFastDormType,
RRC_FASTDORM_START_TYPE_ENUM_UINT32 *penWrrFastDormType
)
{
switch(enAtFastDormType)
{
case AT_RABM_FASTDORM_START_FD_ONLY:
*penWrrFastDormType = RRC_FASTDORM_START_TYPE_FD;
break;
case AT_RABM_FASTDORM_START_ASCR_ONLY:
*penWrrFastDormType = RRC_FASTDORM_START_TYPE_ASCR;
break;
case AT_RABM_FASTDORM_START_FD_ASCR:
*penWrrFastDormType = RRC_FASTDORM_START_TYPE_FD_ASCR;
break;
default:
NAS_WARNING_LOG(WUEPS_PID_RABM,
"NAS_RABM_ConvertAtFasTDormTypeToWrrFasTDormType: Wrong Type.");
break;
}
}
VOS_VOID NAS_RABM_RcvSetFastDormParaReq(
AT_RABM_SET_FASTDORM_PARA_REQ_STRU *pstFastDormPara
)
{
NAS_RABM_NVIM_FASTDORM_ENABLE_FLG_STRU stEnableFlg;
VOS_UINT32 ulLength;
ulLength = 0;
stEnableFlg.ucEnableFlag = VOS_FALSE;
/* ¶ÁNVʧ°Ü£¬ÈÏΪ²»ÔÊÐíFastdormancy */
NV_GetLength(en_NV_Item_Fastdorm_Enable_Flag, &ulLength);
if (NV_OK != NV_Read(en_NV_Item_Fastdorm_Enable_Flag, &stEnableFlg,
ulLength))
{
NAS_WARNING_LOG(WUEPS_PID_RABM, "NAS_RABM_RcvSetFastDormParaReq:WARNING:NV_Read faild!");
}
/* ²»ÔÊÐíFastdormancy */
if (VOS_FALSE == stEnableFlg.ucEnableFlag)
{
/* »Ø¸´¸øAT ID_RABM_AT_FASTDORM_START_CNF */
NAS_RABM_SndAtSetFastDormParaCnf(pstFastDormPara->usClientId,
pstFastDormPara->ucOpId,
AT_RABM_PARA_SET_RSLT_FAIL);
return;
}
if (AT_RABM_FASTDORM_STOP_FD_ASCR == pstFastDormPara->stFastDormPara.enFastDormOperationType)
{
/* µ÷Óú¯Êý£¬Í£Ö¹FAST DORMANCY */
NAS_RABM_RcvAtFastDormStopReq(pstFastDormPara);
}
else
{
/* µ÷Óú¯Êý Æô¶¯»òÕßÉèÖÃFAST DORMANCY */
NAS_RABM_RcvAtFastDormStartReq(pstFastDormPara);
}
}
VOS_VOID NAS_RABM_RcvGetFastDormParaReq(
AT_RABM_QRY_FASTDORM_PARA_REQ_STRU *pstFastDormPara
)
{
NAS_RABM_NVIM_FASTDORM_ENABLE_FLG_STRU stEnableFlag;
VOS_UINT32 ulLength;
ulLength = 0;
stEnableFlag.ucEnableFlag = VOS_FALSE;
/* ¶ÁNVʧ°Ü£¬ÈÏΪ²»ÔÊÐíFastdormancy */
NV_GetLength(en_NV_Item_Fastdorm_Enable_Flag, &ulLength);
if (NV_OK != NV_Read(en_NV_Item_Fastdorm_Enable_Flag, &stEnableFlag,
ulLength))
{
NAS_WARNING_LOG(WUEPS_PID_RABM, "NAS_RABM_RcvGetFastDormParaReq:WARNING:NV_Read faild!");
}
/* »Ø¸´²éѯ½á¹û */
NAS_RABM_SndAtQryFastDormParaCnf(pstFastDormPara->usClientId,
pstFastDormPara->ucOpId,
stEnableFlag.ucEnableFlag);
}
VOS_VOID NAS_RABM_SuspendFastDorm(VOS_VOID)
{
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* Èç¹ûµ±Ç°ÔÚNAS_RABM_FASTDORM_RUNNING״̬£¬²¢ÇÒÔÚWÍøÂçÏ£¬ÐèÒªÏòWRR·¢ËÍÍ£Ö¹
²Ù×÷ÇëÇó£¬Í£Ö¹FAST DORMANCYÌØÐÔ */
/* Åжϵ±Ç°µÄϵͳģʽÊÇ·ñÓÐЧ */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
if ((NAS_RABM_FASTDORM_RUNNING == NAS_RABM_GetCurrFastDormStatus())
&& (NAS_MML_NET_RAT_TYPE_WCDMA == enCurrentSysMode))
{
/* ÏòWAS·¢ËÍRRRABM_FASTDORM_STOP_REQ */
NAS_RABM_SndWasFastDormStopReq();
}
/* µ±Ç°²»ÊÇNAS_RABM_FASTDORM_STOP״̬£¬ÔòÐèÒª¹ÒÆðµ±Ç°µÄÔËÐУ¬½øÈë
SUSPEND״̬ */
if (NAS_RABM_FASTDORM_STOP != NAS_RABM_GetCurrFastDormStatus())
{
/* ÎÞÁ÷Á¿Ê±³¤¼ÆÊýÇå0 */
NAS_RABM_SetFastDormCurrNoFluxCntValue(0);
/* Í£Ö¹¶¨Ê±Æ÷RABM_TIMER_FASTDORM_FLUX_DETECT */
/* Í£Ö¹¶¨Ê±Æ÷RABM_TIMER_FASTDORM_RETRY */
/* Í£Ö¹¶¨Ê±Æ÷RABM_TIMER_FASTDORM_WAIT_GMM_QRY_RESULT*/
NAS_RabmStopTimer(RABM_TIMER_NAME_FD_FLUX_DETECT,RABM_TIMER_FASTDORM_FLUX_DETECT);
NAS_RabmStopTimer(RABM_TIMER_NAME_FD_RETRY,RABM_TIMER_FASTDORM_RETRY);
NAS_RabmStopTimer(RABM_TIMER_NAME_COMMON,RABM_TIMER_FASTDORM_WAIT_GMM_QRY_RESULT);
/* ÉèÖÃΪNAS_RABM_FASTDORM_SUSPEND״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_SUSPEND);
NAS_RABM_SndOmFastdormStatus();
}
/* stop״̬£¬fdûÓпªÆô£¬ÖжÏreleaserrcÁ÷³Ì */
/* FD¹¦ÄÜ¿ªÆôʱ, ÒÔFDµÄ´¦ÀíÁ÷³ÌΪ׼ */
if ((NAS_RABM_FASTDORM_STOP == NAS_RABM_GetCurrFastDormStatus())
&& (VOS_TRUE == NAS_RABM_GET_FD_REL_RRC_EXEC_FLG()))
{
NAS_RABM_CLR_FD_REL_RRC_EXEC_FLG();
NAS_RABM_SndWasFastDormStopReq();
}
}
VOS_VOID NAS_RABM_ResumeFastDorm(VOS_VOID)
{
/* Èç¹ûÒìϵͳºó£¬·¢ÏÖµ±Ç°PDPÊǼ¤»îµÄ£¬ÔòÐèÒª¿ªÊ¼Á÷Á¿¼ì²â£¬×¼±¸½øÈë
FAST DORMANCY£¬·ñÔòÈÔÈ»ÔÚSUSPEND״̬ */
if (VOS_TRUE == NAS_RABM_IsPdpActiveInWCDMA())
{
/* Èç¹ûµ±Ç°ÔÚNAS_RABM_FASTDORM_SUSPEND״̬£¬ËµÃ÷֮ǰÆô¶¯ÁËFAST DORMANCY */
if (NAS_RABM_FASTDORM_SUSPEND == NAS_RABM_GetCurrFastDormStatus())
{
/* ÉèÖÃΪNAS_RABM_FASTDORM_INIT_DETECT״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_INIT_DETECT);
NAS_RABM_SndOmFastdormStatus();
/* FDÊý¾Ý¼ÆÊýÆ÷ÇåÁã */
NAS_RABM_ClrFastDormUlDataCnt();
NAS_RABM_ClrFastDormDlDataCnt();
/* Æô¶¯ÖÜÆÚÐÔÁ÷Á¿¼ì²â¶¨Ê±Æ÷ */
NAS_RabmStartTimer( RABM_TIMER_NAME_FD_FLUX_DETECT, RABM_TIMER_FASTDORM_FLUX_DETECT );
}
}
}
VOS_VOID NAS_RABM_SysModeChgProcFastDorm(
GMM_RABM_NET_RAT_ENUM_UINT32 enOldSysMode,
GMM_RABM_NET_RAT_ENUM_UINT32 enNewSysMode
)
{
/* ϵͳ±ä»¯£¬Åж¨Èç¹ûµ±Ç°´ÓWÄ£µ½ÆäËûÄ££¬¹ÒÆðFAST DORMANCY */
if ( (NAS_MML_NET_RAT_TYPE_WCDMA == enOldSysMode)
&& (NAS_MML_NET_RAT_TYPE_WCDMA != enNewSysMode)
&& (NAS_MML_NET_RAT_TYPE_BUTT != enNewSysMode))
{
NAS_RABM_SuspendFastDorm();
}
/* ϵͳ±ä»¯£¬Åж¨Èç¹ûµ±Ç°´ÓÆäËûÄ£µ½WÄ££¬»Ö¸´FAST DORMANCY */
if ( (NAS_MML_NET_RAT_TYPE_WCDMA != enOldSysMode)
&& (NAS_MML_NET_RAT_TYPE_WCDMA == enNewSysMode) )
{
NAS_RABM_ResumeFastDorm();
}
}
VOS_VOID NAS_RABM_RcvAtFastDormStartReq(
AT_RABM_SET_FASTDORM_PARA_REQ_STRU *pstFastDormPara
)
{
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* »ñÈ¡µ±Ç°Ëù´¦ÍøÂç */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
/* »º´æ²ÎÊý */
NAS_RABM_SetFastDormOperationType(pstFastDormPara->stFastDormPara.enFastDormOperationType);
NAS_RABM_SetFastDormUserDefNoFluxCntValue(pstFastDormPara->stFastDormPara.ulTimeLen);
/* ÓÉÓÚFAST DORMANCYΪWÏÂÌØÐÔ£¬Èç¹û²»ÔÚWÍøÂçÏ£¬ÔòÒ»¸ÄÉ趨ΪSUSPEND״̬ */
if (NAS_MML_NET_RAT_TYPE_WCDMA != enCurrentSysMode)
{
/* ÉèÖÃΪNAS_RABM_FASTDORM_SUSPEND״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_SUSPEND);
}
else
{
/* µ±Ç°ÔÚPDP·Ç¼¤»î״̬£¬Ò»ÂÉÉèÖÃΪÔÚNAS_RABM_FASTDORM_SUSPEND״̬,Èç¹û
ÒѾÔÚPDP¼¤»î״̬£¬Ôò¸ù¾Ý²»Í¬×´Ì¬£¬×ö²»Í¬µÄ´¦Àí */
if (VOS_TRUE == NAS_RABM_IsPdpActiveInWCDMA())
{
/* µ±Ç°ÊÇNAS_RABM_FASTDORM_INIT_DETECT£¬ËµÃ÷֮ǰÒѾÆô¶¯ÁËFAST DORMANCY */
if (NAS_RABM_FASTDORM_INIT_DETECT == NAS_RABM_GetCurrFastDormStatus())
{
/* Èç¹ûÎÞÁ÷Á¿Ê±³¤ÒѾ³¬¹ýеÄÉ趨ʱ³¤ÃÅÏÞÖµ£¬´ËʱÈÏΪ¿ÉÒÔ
¿ÉÒÔ·¢ÆðFAST DORMANCY */
if (NAS_RABM_GetFastDormUserDefNoFluxCntValue()
< NAS_RABM_GetFastDormCurrNoFluxCntValue())
{
/* ÏòGMM·¢ËͲéѯÏûÏ¢ */
NAS_RABM_SndGmmMmlProcStatusQryReq(RABM_FASTDORM_ENUM);
/* ÎÞÁ÷Á¿Ê±³¤¼ÆÊýÇå0 */
NAS_RABM_SetFastDormCurrNoFluxCntValue(0);
/* ÉèÖÃΪNAS_RABM_FASTDORM_WAIT_GMM_QRY_RESULT״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_WAIT_GMM_QRY_RESULT);
/* Æô¶¯µÈ´ý²éѯ½á¹ûµÄ±£»¤¶¨Ê±Æ÷ */
NAS_RabmStartTimer( RABM_TIMER_NAME_COMMON, RABM_TIMER_FASTDORM_WAIT_GMM_QRY_RESULT );
}
}
/* if (µ±Ç°ÊÇNAS_RABM_FASTDORM_STOP״̬) */
if (NAS_RABM_FASTDORM_STOP == NAS_RABM_GetCurrFastDormStatus())
{
/* ÉèÖÃΪNAS_RABM_FASTDORM_INIT_DETECT״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_INIT_DETECT);
/* FDÊý¾Ý¼ÆÊýÆ÷ÇåÁã */
NAS_RABM_ClrFastDormUlDataCnt();
NAS_RABM_ClrFastDormDlDataCnt();
/* Æô¶¯ÖÜÆÚÐÔÁ÷Á¿¼ì²â¶¨Ê±Æ÷ */
NAS_RabmStartTimer( RABM_TIMER_NAME_FD_FLUX_DETECT, RABM_TIMER_FASTDORM_FLUX_DETECT );
}
/* if (µ±Ç°ÊÇNAS_RABM_FASTDORM_RUNNING״̬) */
if (NAS_RABM_FASTDORM_RUNNING == NAS_RABM_GetCurrFastDormStatus())
{
/* Ö±½Ó·¢ËÍÏûÏ¢ */
NAS_RABM_SndWasFastDormStartReq(RABM_FASTDORM_ENUM);
}
}
else
{
/* ÉèÖÃΪNAS_RABM_FASTDORM_SUSPEND״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_SUSPEND);
}
}
/* »Ø¸´¸øAT ID_RABM_AT_FASTDORM_START_CNF */
NAS_RABM_SndAtSetFastDormParaCnf(pstFastDormPara->usClientId,
pstFastDormPara->ucOpId,
AT_RABM_PARA_SET_RSLT_SUCC);
NAS_RABM_SndOmFastdormStatus();
}
VOS_VOID NAS_RABM_RcvAtFastDormStopReq(
AT_RABM_SET_FASTDORM_PARA_REQ_STRU *pstFastDormPara
)
{
/* Èç¹ûµ±Ç°ÔÚNAS_RABM_FASTDORM_RUNNING״̬£¬ÐèÒªÏòWRR·¢ËÍÍ£Ö¹
²Ù×÷ÇëÇó£¬Í£Ö¹FAST DORMANCYÌØÐÔ */
if (NAS_RABM_FASTDORM_RUNNING == NAS_RABM_GetCurrFastDormStatus())
{
/* ÏòWAS·¢ËÍRRRABM_FASTDORM_STOP_REQ */
NAS_RABM_SndWasFastDormStopReq();
}
/* ÎÞÁ÷Á¿Ê±³¤¼ÆÊýÇå0 */
NAS_RABM_SetFastDormCurrNoFluxCntValue(0);
/* Í£Ö¹¶¨Ê±Æ÷RABM_TIMER_FASTDORM_FLUX_DETECT */
/* Í£Ö¹¶¨Ê±Æ÷RABM_TIMER_FASTDORM_RETRY */
/* Í£Ö¹¶¨Ê±Æ÷RABM_TIMER_FASTDORM_WAIT_GMM_QRY_RESULT*/
NAS_RabmStopTimer(RABM_TIMER_NAME_FD_FLUX_DETECT,RABM_TIMER_FASTDORM_FLUX_DETECT);
NAS_RabmStopTimer(RABM_TIMER_NAME_FD_RETRY,RABM_TIMER_FASTDORM_RETRY);
NAS_RabmStopTimer(RABM_TIMER_NAME_COMMON,RABM_TIMER_FASTDORM_WAIT_GMM_QRY_RESULT);
NAS_RABM_SetFastDormOperationType(pstFastDormPara->stFastDormPara.enFastDormOperationType);
/* ÉèÖÃΪNAS_RABM_FASTDORM_STOP״̬ */
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_STOP);
NAS_RABM_SndOmFastdormStatus();
/* »Ø¸´¸øAT ID_RABM_AT_SET_FASTDORM_PARA_CNF */
NAS_RABM_SndAtSetFastDormParaCnf(pstFastDormPara->usClientId,
pstFastDormPara->ucOpId,
AT_RABM_PARA_SET_RSLT_SUCC);
}
VOS_VOID NAS_RABM_RcvAtMsg(
struct MsgCB *pstMsg
)
{
MSG_HEADER_STRU *pstMsgHeader;
pstMsgHeader = (MSG_HEADER_STRU*)pstMsg;
switch (pstMsgHeader->ulMsgName)
{
case ID_AT_RABM_SET_FASTDORM_PARA_REQ:
NAS_RABM_RcvSetFastDormParaReq((AT_RABM_SET_FASTDORM_PARA_REQ_STRU*)pstMsg);
break;
case ID_AT_RABM_QRY_FASTDORM_PARA_REQ:
NAS_RABM_RcvGetFastDormParaReq((AT_RABM_QRY_FASTDORM_PARA_REQ_STRU*)pstMsg);
break;
case ID_AT_RABM_SET_RELEASE_RRC_REQ:
NAS_RABM_RcvReleaseRrcReq((AT_RABM_RELEASE_RRC_REQ_STRU *)pstMsg);
break;
default:
break;
}
}
VOS_VOID NAS_RABM_RcvWasFastDormInfoInd(
struct MsgCB *pMsg
)
{
VOS_UINT8 ucEntId;
RRRABM_FASTDORM_INFO_IND_STRU *pstInfoInd;
pstInfoInd = (RRRABM_FASTDORM_INFO_IND_STRU*)pMsg;
/* Èç¹ûÔÚNAS_RABM_FASTDORM_RUNNING״̬£¬ÔòÐèÒª½øÈëRETRY״̬ */
if ((NAS_RABM_FASTDORM_RUNNING == NAS_RABM_GetCurrFastDormStatus())
&&(RRRABM_FASTDORM_INFO_NEED_RETRY == pstInfoInd->enInfoType))
{
/* µ±Ç°Ö»ÓдæÔÚRABµÄʱºò£¬²ÅÐèÒªÆô¶¯retry¶¨Ê±Æ÷£¬·ñÔòÓ¦¸Ã¿ÉÒÔÖ±½Ó
return */
for ( ucEntId = 0; ucEntId < RABM_PS_MAX_ENT_NUM; ucEntId++ )
{
if (g_aRabmPsEnt[ucEntId].ucState == RABM_NSAPI_ACTIVE_WITH_RAB)
{
break;
}
}
if (ucEntId >= RABM_PS_MAX_ENT_NUM)
{
return;
}
/* µ±Ç°ÔÚNAS_RABM_FASTDORM_RETRY״̬ */;
NAS_RABM_SetCurrFastDormStatus(NAS_RABM_FASTDORM_RETRY);
NAS_RABM_SndOmFastdormStatus();
/* Æô¶¯RABM_TIMER_FASTDORM_RETRY¶¨Ê±Æ÷ */
NAS_RabmStartTimer( RABM_TIMER_NAME_FD_RETRY, RABM_TIMER_FASTDORM_RETRY );
}
}
VOS_VOID NAS_RABM_SetFastDormUserDefNoFluxCntValue(
VOS_UINT32 ulNoFluxCnt
)
{
g_stNasRabmFastDormCtx.ulUserDefNoFluxCnt = ulNoFluxCnt;
}
VOS_UINT32 NAS_RABM_GetFastDormUserDefNoFluxCntValue(VOS_VOID)
{
return g_stNasRabmFastDormCtx.ulUserDefNoFluxCnt;
}
VOS_VOID NAS_RABM_SetFastDormCurrNoFluxCntValue(
VOS_UINT32 ulNoFluxCnt
)
{
g_stNasRabmFastDormCtx.ulCurrNoFluxCnt = ulNoFluxCnt;
}
VOS_UINT32 NAS_RABM_GetFastDormCurrNoFluxCntValue(VOS_VOID)
{
return g_stNasRabmFastDormCtx.ulCurrNoFluxCnt;
}
VOS_VOID NAS_RABM_SetFastDormOperationType(
AT_RABM_FASTDORM_OPERATION_ENUM_UINT32 enFastDormOperationType
)
{
g_stNasRabmFastDormCtx.enFastDormOperationType = enFastDormOperationType;
}
AT_RABM_FASTDORM_OPERATION_ENUM_UINT32 NAS_RABM_GetFastDormOperationType(VOS_VOID)
{
return g_stNasRabmFastDormCtx.enFastDormOperationType;
}
NAS_RABM_FASTDORM_STATUS_ENUM_UINT32 NAS_RABM_GetCurrFastDormStatus(VOS_VOID)
{
return g_stNasRabmFastDormCtx.enCurrFastDormStatus;
}
VOS_VOID NAS_RABM_SetCurrFastDormStatus(
NAS_RABM_FASTDORM_STATUS_ENUM_UINT32 enCurrFastDormStatus
)
{
g_stNasRabmFastDormCtx.enCurrFastDormStatus = enCurrFastDormStatus;
}
VOS_VOID NAS_RABM_InitFastDormCtx(VOS_VOID)
{
g_stNasRabmFastDormCtx.enCurrFastDormStatus = NAS_RABM_FASTDORM_STOP;
g_stNasRabmFastDormCtx.ulCurrNoFluxCnt = 0;
g_stNasRabmFastDormCtx.enFastDormOperationType = AT_RABM_FASTDORM_STOP_FD_ASCR;
g_stNasRabmFastDormCtx.ulUserDefNoFluxCnt = 0;
g_stNasRabmFastDormCtx.ulDlDataCnt = 0;
g_stNasRabmFastDormCtx.ulUlDataCnt = 0;
g_stNasRabmFastDormCtx.ulRelRrcExecFlg = VOS_FALSE;
}
VOS_VOID NAS_RABM_IncFastDormUlDataCnt(VOS_VOID)
{
g_stNasRabmFastDormCtx.ulUlDataCnt++;
}
VOS_VOID NAS_RABM_IncFastDormDlDataCnt(VOS_VOID)
{
g_stNasRabmFastDormCtx.ulDlDataCnt++;
}
VOS_UINT32 NAS_RABM_GetFastDormUlDataCnt(VOS_VOID)
{
return g_stNasRabmFastDormCtx.ulUlDataCnt;
}
VOS_UINT32 NAS_RABM_GetFastDormDlDataCnt(VOS_VOID)
{
return g_stNasRabmFastDormCtx.ulDlDataCnt;
}
VOS_VOID NAS_RABM_ClrFastDormUlDataCnt(VOS_VOID)
{
g_stNasRabmFastDormCtx.ulUlDataCnt = 0;
}
VOS_VOID NAS_RABM_ClrFastDormDlDataCnt(VOS_VOID)
{
g_stNasRabmFastDormCtx.ulDlDataCnt = 0;
}
VOS_VOID NAS_RABM_RcvCdsMsg(
struct MsgCB *pstMsg
)
{
MSG_HEADER_STRU *pstMsgHeader;
pstMsgHeader = (MSG_HEADER_STRU*)pstMsg;
switch (pstMsgHeader->ulMsgName)
{
case ID_CDS_RABM_SERVICE_IND:
NAS_RABM_RcvCdsServiceInd((CDS_RABM_SERVICE_IND_STRU *)pstMsg);
break;
default:
break;
}
}
VOS_UINT32 NAS_RABM_IsDataServiceRequestPending(VOS_VOID)
{
if (VOS_TRUE == NAS_RABM_GetRabRsestTimerFlg())
{
return VOS_TRUE;
}
return VOS_FALSE;
}
VOS_UINT32 NAS_RABM_IsRabReestablishPending(VOS_VOID)
{
VOS_UINT8 ucEntId;
if (VOS_TRUE == NAS_RABM_GetRabRsestTimerFlg())
{
return VOS_FALSE;
}
for (ucEntId = 0; ucEntId < RABM_3G_PS_MAX_ENT_NUM; ucEntId++)
{
if (VOS_TRUE == NAS_RABM_GetWPsEntRabReestFlg(ucEntId))
{
return VOS_TRUE;
}
}
return VOS_FALSE;
}
VOS_VOID NAS_RABM_RcvCdsServiceInd(
CDS_RABM_SERVICE_IND_STRU *pstCdsServiceInd
)
{
/* ¼ì²éRABIDÓÐЧÐÔ */
if (!RAB_MAP_RAB_IS_VALID(pstCdsServiceInd->ucRabId))
{
NAS_ERROR_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: Invalid RABID. <RABID>", pstCdsServiceInd->ucRabId);
return;
}
switch (NAS_RABM_GetWPsEntState(pstCdsServiceInd->ucRabId - RABM_NSAPI_OFFSET))
{
case RABM_NSAPI_ACTIVE_WITH_RAB:
NAS_NORMAL_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: RABID is already active. <RABID>", pstCdsServiceInd->ucRabId);
NAS_RABM_SndCdsSendBuffDataInd(pstCdsServiceInd->ucRabId, CDS_RABM_SEND_BUFF_DATA_ALLOWED_TYPE_SERVICE_SUCC);
break;
case RABM_NSAPI_ACTIVE_NO_RAB:
if (VOS_TRUE == NAS_RABM_GetWPsEntRabReestFlg(pstCdsServiceInd->ucRabId - RABM_NSAPI_OFFSET))
{
NAS_NORMAL_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: Reestablish is in process. <RABID>", pstCdsServiceInd->ucRabId);
}
else if (VOS_TRUE == NAS_RABM_IsDataServiceRequestPending())
{
NAS_NORMAL_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: Data service request is sent, set reestablish flag. <RABID>", pstCdsServiceInd->ucRabId);
NAS_RABM_SetWPsEntRabReestFlg(pstCdsServiceInd->ucRabId - RABM_NSAPI_OFFSET);
}
else if (VOS_TRUE == NAS_RABM_IsRabReestablishPending())
{
NAS_NORMAL_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: Start RB setup protect timer. <RABID>", pstCdsServiceInd->ucRabId);
NAS_RABM_SetWPsEntRabReestFlg(pstCdsServiceInd->ucRabId - RABM_NSAPI_OFFSET);
NAS_RABM_StartReestRabPendingTmr(pstCdsServiceInd->ucRabId);
}
else
{
NAS_NORMAL_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: Send data service request. <RABID>", pstCdsServiceInd->ucRabId);
NAS_RABM_SetWPsEntRabReestFlg(pstCdsServiceInd->ucRabId - RABM_NSAPI_OFFSET);
NAS_RABM_SetRabRsestTimerFlg();
RABM_TimerStart(RABM_REESTABLISH_REQ_SENT, RABM_RABM_REEST_PROT_TIMER_LEN);
RABM_SndRabReestReq();
}
break;
default:
NAS_WARNING_LOG1(WUEPS_PID_RABM,
"NAS_RABM_RcvCdsServiceInd: Can't reestablish RAB. <RABID>", pstCdsServiceInd->ucRabId);
NAS_RABM_SndCdsFreeBuffDataInd(pstCdsServiceInd->ucRabId);
break;
}
return;
}
VOS_VOID NAS_RABM_RcvReleaseRrcReq(
AT_RABM_RELEASE_RRC_REQ_STRU *pstMsg
)
{
GMM_RABM_NET_RAT_ENUM_UINT32 enCurrentSysMode;
/* »ñÈ¡µ±Ç°Ëù´¦ÍøÂç */
enCurrentSysMode = NAS_RABM_GetCurrentSysMode();
/* Èç¹û²»ÔÚWÍøÂçÏ£¬Ôò²»ÄÜÊÍ·ÅRRCÁ¬½Ó */
if (NAS_MML_NET_RAT_TYPE_WCDMA != enCurrentSysMode)
{
NAS_RABM_SndAtReleaseRrcCnf(pstMsg->usClientId, pstMsg->ucOpId, VOS_ERR);
}
else
{
/* Ö±½Ó»Ø¸´rrcÊͷųɹ¦ */
NAS_RABM_SndAtReleaseRrcCnf(pstMsg->usClientId, pstMsg->ucOpId, VOS_OK);
/* ÏòGMM·¢ËͲéѯÏûÏ¢ */
NAS_RABM_SndGmmMmlProcStatusQryReq(RABM_RELEASE_RRC_ENUM);
}
return;
}
VOS_VOID NAS_RABM_AbortRelRrcProcedure(VOS_VOID)
{
/* ·Çwcdmaģʽ£¬Ö±½Ó·µ»Ø */
if (NAS_MML_NET_RAT_TYPE_WCDMA != NAS_RABM_GetCurrentSysMode())
{
return;
}
/* FD¹¦ÄÜ¿ªÆôʱ, ÒÔFDµÄ´¦ÀíÁ÷³ÌΪ׼ */
if (NAS_RABM_FASTDORM_STOP != NAS_RABM_GetCurrFastDormStatus())
{
return;
}
if (VOS_TRUE == NAS_RABM_GET_FD_REL_RRC_EXEC_FLG())
{
NAS_RABM_CLR_FD_REL_RRC_EXEC_FLG();
NAS_RABM_SndWasFastDormStopReq();
}
return;
}
#ifdef __cplusplus
#if __cplusplus
}
#endif
#endif
|
gabry3795/android_kernel_huawei_mt7_l09
|
drivers/vendor/hisi/modem/ps/nas/gu/src/Rabm/src/NasRabmMain.c
|
C
|
gpl-2.0
| 40,673
|
/*
* Crossfire -- cooperative multi-player graphical RPG and adventure game
*
* Copyright (c) 1999-2014 Mark Wedel and the Crossfire Development Team
* Copyright (c) 1992 Frank Tore Johansen
*
* Crossfire is free software and comes with ABSOLUTELY NO WARRANTY. You are
* welcome to redistribute it under certain conditions. For details, please
* see COPYING and LICENSE.
*
* The authors can be reached via e-mail at <crossfire@metalforge.org>.
*/
/**
* @file
* This handles triggers, buttons, altars and associated objects.
*/
#include <string.h>
#include "global.h"
#include "sproto.h"
static objectlink *get_button_links(const object *button);
/**
* Trigger every object in an objectlink. This was originally
* part of push_button but has been extracted to make it
* possible to trigger the connected object on a map
* from a plugin without requiring a source object.
* This method will take care of calling EVENT_TRIGGER of all
* elligible object in list (see state parameter)
* @param ol the objectlink to trigger. This can be acquire from map
* @param cause the object that cause this path to trigger, may be NULL
* @param state which object to apply.
* 0=all object with FLAG_ACTIVATE_ON_PUSH
* other=all object with FLAG_ACTIVATE_ON_RELEASE
*/
void trigger_connected(objectlink *ol, object *cause, const int state) {
object *tmp;
for (; ol; ol = ol->next) {
object *part;
if (!ol->ob || ol->ob->count != ol->id) {
LOG( llevError, "Internal error in trigger_connect. No object associated with link id (%u) (cause='%s'.\n",
ol->id, (cause ? cause->name.c_str() : ""));
continue;
}
/* a button link object can become freed when the map is saving. As
* a map is saved, objects are removed and freed, and if an object is
* on top of a button, this function is eventually called. If a map
* is getting moved out of memory, the status of buttons and levers
* probably isn't important - it will get sorted out when the map is
* re-loaded. As such, just exit this function if that is the case.
*/
if (QUERY_FLAG(ol->ob, FLAG_FREED))
return;
tmp = ol->ob;
/* if the criteria isn't appropriate, don't do anything */
if (state && !QUERY_FLAG(tmp, FLAG_ACTIVATE_ON_PUSH))
continue;
if (!state && !QUERY_FLAG(tmp, FLAG_ACTIVATE_ON_RELEASE))
continue;
/*
* (tchize) call the triggers of the activated object.
* tmp = activated object
* op is activator (aka button)
*/
if (execute_event(tmp, EVENT_TRIGGER, cause, NULL, NULL, SCRIPT_FIX_ALL) != 0)
continue;
switch (tmp->type) {
case GATE:
case HOLE:
tmp->value = tmp->stats.maxsp ? !state : state;
tmp->speed = 0.5;
object_update_speed(tmp);
break;
case CF_HANDLE:
SET_ANIMATION(tmp, (tmp->value = tmp->stats.maxsp ? !state : state));
object_update(tmp, UP_OBJ_FACE);
break;
case SIGN:
if (!tmp->stats.food || tmp->last_eat < tmp->stats.food) {
ext_info_map(NDI_UNIQUE|NDI_NAVY, tmp->map,
MSG_TYPE_SIGN, MSG_SUBTYPE_NONE,
tmp->msg);
if (tmp->stats.food)
tmp->last_eat++;
}
break;
case ALTAR:
tmp->value = 1;
SET_ANIMATION(tmp, tmp->value);
object_update(tmp, UP_OBJ_FACE);
break;
case BUTTON:
case PEDESTAL:
tmp->value = state;
SET_ANIMATION(tmp, tmp->value);
object_update(tmp, UP_OBJ_FACE);
break;
case TIMED_GATE:
for (part = tmp; tmp != NULL; tmp = tmp->more) {
part->speed = tmp->arch->clone.speed;
part->value = tmp->arch->clone.value;
part->stats.sp = 1;
part->stats.hp = tmp->stats.maxhp;
object_update_speed(part);
}
break;
case DIRECTOR:
case FIREWALL:
if (!QUERY_FLAG(tmp, FLAG_ANIMATE) && tmp->type == FIREWALL)
move_firewall(tmp);
else {
if ((tmp->stats.sp += tmp->stats.maxsp) > 8) /* next direction */
tmp->stats.sp = ((tmp->stats.sp-1)%8)+1;
animate_turning(tmp);
}
break;
default:
ob_trigger(tmp, cause, state);
}
}
}
/**
* Push the specified object. This can affect other buttons/gates/handles
* altars/pedestals/holes in the whole map.
* Changed the routine to loop through _all_ linked objects.
* Better hurry with that linked list...
* @param op
* object to push.
*/
void push_button(object *op) {
/* LOG(llevDebug, "push_button: %s (%d)\n", op->name, op->count); */
trigger_connected(get_button_links(op), op, op->value);
}
/**
* Updates everything connected with the button op.
* After changing the state of a button, this function must be called
* to make sure that all gates and other buttons connected to the
* button reacts to the (eventual) change of state.
* @param op
* object to update.
*/
void update_button(object *op) {
object *tmp, *head;
int tot, any_down = 0, old_value = op->value;
objectlink *ol;
/* LOG(llevDebug, "update_button: %s (%d)\n", op->name, op->count); */
for (ol = get_button_links(op); ol; ol = ol->next) {
if (!ol->ob || ol->ob->count != ol->id) {
LOG(llevDebug, "Internal error in update_button (%s).\n", op->name.c_str());
continue;
}
tmp = ol->ob;
if (tmp->type == BUTTON) {
tot = 0;
FOR_ABOVE_PREPARE(tmp, ab)
/* Bug? The pedestal code below looks for the head of
* the object, this bit doesn't. I'd think we should check
* for head here also. Maybe it also makese sense to
* make the for ab=tmp->above loop common, and alter
* behaviour based on object within that loop?
*/
/* Basically, if the move_type matches that on what the
* button wants, we count it. The second check is so that
* objects don't move (swords, etc) will count. Note that
* this means that more work is needed to make buttons
* that are only triggered by flying objects.
*/
if ((ab->move_type&tmp->move_on) || ab->move_type == 0)
tot += ab->weight*(ab->nrof ? ab->nrof : 1)+ab->carrying;
FOR_ABOVE_FINISH();
tmp->value = (tot >= tmp->weight) ? 1 : 0;
if (tmp->value)
any_down = 1;
}
else if (tmp->type == PEDESTAL) {
tmp->value = 0;
FOR_ABOVE_PREPARE(tmp, ab) {
head = ab->head ? ab->head : ab;
/* Same note regarding move_type for buttons above apply here. */
if ( ((head->move_type&tmp->move_on) || ab->move_type == 0)
&& (!tmp->slaying.compare(head->race)
|| ((head->type == SPECIAL_KEY) && (head->slaying == tmp->slaying))
|| (!tmp->slaying.compare("player") && head->type == PLAYER)) )
{
tmp->value = 1;
}
}
FOR_ABOVE_FINISH();
if (tmp->value) {
any_down = 1;
}
}
}
if (any_down) /* If any other buttons were down, force this to remain down */
op->value = 1;
/* If this button hasn't changed, don't do anything */
if (op->value != old_value) {
SET_ANIMATION(op, op->value);
object_update(op, UP_OBJ_FACE);
push_button(op); /* Make all other buttons the same */
}
}
/**
* Updates every button on the map (by calling update_button() for them).
*/
void update_buttons(mapstruct *m) {
objectlink *ol;
oblinkpt *obp;
for (obp = m->buttons; obp; obp = obp->next)
for (ol = obp->link; ol; ol = ol->next) {
if (!ol->ob || ol->ob->count != ol->id) {
LOG(llevError, "Internal error in update_button (%s (%dx%d):%u, connected %ld).\n",
ol->ob ? ol->ob->name.c_str() : "null",
ol->ob ? ol->ob->x : -1,
ol->ob ? ol->ob->y : -1,
ol->id,
obp->value );
continue;
}
if (ol->ob->type == BUTTON || ol->ob->type == PEDESTAL) {
update_button(ol->ob);
break;
}
}
}
/**
* Toggles the state of specified button.
* @param op
* object to toggle.
*/
void use_trigger(object *op) {
/* Toggle value */
op->value = !op->value;
push_button(op);
}
/**
* Animates one step of object.
* @param op
* object to animate.
* @note
* animate_object() should be used instead of this,
* but it can't handle animations in the 8 directions
* @todo
* check if object is really animated?
*/
void animate_turning(object *op) {
if (++op->state >= NUM_ANIMATIONS(op)/8)
op->state = 0;
SET_ANIMATION(op, (op->stats.sp-1)*NUM_ANIMATIONS(op)/8+op->state);
object_update(op, UP_OBJ_FACE);
}
#define NROF_SACRIFICE(xyz) ((uint32_t)(xyz)->stats.food)
/**
* Helper function to check if the item matches altar's requested sacrifice.
* The number of objects is not taken into account.
*
* @param altar
* altar we're checking for. Can't be NULL.
* @param sacrifice
* what object to check for. Can't be NULL.
* @return
* 1 if object is suitable for the altar (number not taken into account), 0 else.
*/
static int matches_sacrifice(const object *altar, const object *sacrifice) {
if ( ( QUERY_FLAG(sacrifice, FLAG_ALIVE)
&& object_get_value(altar, "accept_alive").empty() )
|| QUERY_FLAG(sacrifice, FLAG_IS_LINKED)
|| sacrifice->type == PLAYER)
{
return 0;
}
const std::string name = query_base_name(sacrifice, false);
if ( !altar->slaying.compare(sacrifice->arch->name)
|| !altar->slaying.compare(sacrifice->name)
|| !altar->slaying.compare(sacrifice->slaying)
|| !altar->slaying.compare(name)
|| (!altar->slaying.compare("money") && sacrifice->type == MONEY) )
{
return 1;
}
return 0;
}
/**
* Checks whether the altar has enough to sacrifice.
*
* Function put in (0.92.1) so that identify altars won't grab money
* unnecessarily - we can see if there is sufficient money, see if something
* needs to be identified, and then remove money if needed.
*
* 0.93.4: Linked objects (ie, objects that are connected) can not be
* sacrificed. This fixes a bug of trying to put multiple altars/related
* objects on the same space that take the same sacrifice.
*
* The function will now check for all items sitting on the altar, so that the player
* can put various matching but non merging items on the altar.
*
* This function can potentially remove other items, if remove_others is set.
*
* @param altar
* item to which there is a sacrifice
* @param sacrifice
* object that may be sacrifed
* @param remove_others
* if 1, will remove enough items apart sacrifice to compensate for not having enough in sacrifice itself.
* @param[out] toremove
* will contain the nrof of sacrifice to really remove to finish operating. Will be set if not NULL only
* if the function returns 1.
* @return
* 1 if the sacrifice meets the needs of the altar, 0 else
*/
int check_altar_sacrifice(const object *altar, const object *sacrifice, int remove_others, int *toremove) {
uint32_t wanted, rest;
if (!matches_sacrifice(altar, sacrifice))
/* New dropped object doesn't match the altar, other objects already on top are not enough to
* activate altar, else they would have disappeared. */
return 0;
/* Check item is paid for. */
if (QUERY_FLAG(sacrifice, FLAG_UNPAID)) {
return 0;
}
bool money = (altar->slaying.compare("money") == 0);
/* Easy checks: newly dropped object is enough for sacrifice. */
if (money && sacrifice->nrof*sacrifice->value >= NROF_SACRIFICE(altar)) {
if (toremove) {
*toremove = NROF_SACRIFICE(altar)/sacrifice->value;
/* Round up any sacrifices. Altars don't make change either */
if (NROF_SACRIFICE(altar)%sacrifice->value)
(*toremove)++;
}
return 1;
}
if (!money && NROF_SACRIFICE(altar) <= (sacrifice->nrof ? sacrifice->nrof : 1)) {
if (toremove)
*toremove = NROF_SACRIFICE(altar);
return 1;
}
if (money) {
wanted = NROF_SACRIFICE(altar)-sacrifice->nrof*sacrifice->value;
} else {
wanted = NROF_SACRIFICE(altar)-(sacrifice->nrof ? sacrifice->nrof : 1);
}
rest = wanted;
/* Ok, now we check if we got enough with other items.
* We only check items above altar, and not checking again sacrifice.
*/
FOR_ABOVE_PREPARE(altar, tmp) {
if (wanted <= 0)
break;
if (tmp == sacrifice || !matches_sacrifice(altar, tmp))
continue;
if (money)
wanted -= tmp->nrof*tmp->value;
else
wanted -= (tmp->nrof ? tmp->nrof : 1);
} FOR_ABOVE_FINISH();
if (wanted > 0)
/* Not enough value, let's bail out. */
return 0;
/* From there on, we do have enough objects for the altar. */
/* Last dropped object will be totally eaten in any case. */
if (toremove)
*toremove = sacrifice->nrof ? sacrifice->nrof : 1;
if (!remove_others)
return 1;
/* We loop again, this time to remove what we need. */
FOR_ABOVE_PREPARE(altar, tmp) {
if (rest <= 0)
break;
if (tmp == sacrifice || !matches_sacrifice(altar, tmp))
continue;
if (money) {
wanted = tmp->nrof*tmp->value;
if (rest > wanted) {
object_remove(tmp);
rest -= wanted;
} else {
wanted = rest/tmp->value;
if (rest%tmp->value)
wanted++;
object_decrease_nrof(tmp, wanted);
return 1;
}
} else
if (rest > (tmp->nrof ? tmp->nrof : 1)) {
rest -= (tmp->nrof ? tmp->nrof : 1);
object_remove(tmp);
} else {
object_decrease_nrof(tmp, rest);
return 1;
}
} FOR_ABOVE_FINISH();
/* Something went wrong, we'll be nice and accept the sacrifice anyway. */
LOG( llevError, "check_altar_sacrifice on %s: found objects to sacrifice, but couldn't remove them??\n",
altar->map->path.c_str() );
return 1;
}
/**
* Checks if sacrifice was accepted and removes sacrificed
* objects. Might be better to
* call check_altar_sacrifice (above) than depend on the return value,
* since operate_altar will remove the sacrifice also.
*
* If this function returns 1, '*sacrifice' is modified to point to the
* remaining sacrifice, or is set to NULL if the sacrifice was used up.
*
* @param altar
* item to which there is a sacrifice
* @param sacrifice
* object that may be sacrifed
* @return
* 1 if sacrifice was accepted, else 0
*/
int operate_altar(object *altar, object **sacrifice) {
int number;
if (!altar->map) {
LOG(llevError, "BUG: operate_altar(): altar has no map\n");
return 0;
}
if (altar->slaying.empty() || altar->value) {
return 0;
}
if (!check_altar_sacrifice(altar, *sacrifice, 1, &number)) {
return 0;
}
/* check_altar_sacrifice fills in number for us. */
*sacrifice = object_decrease_nrof(*sacrifice, number);
if (!altar->msg.empty()) {
ext_info_map(NDI_BLACK, altar->map, MSG_TYPE_DIALOG, MSG_TYPE_DIALOG_ALTAR, altar->msg);
}
return 1;
}
/**
* @todo document?
*/
static void trigger_move(object *op, int state) { /* 1 down and 0 up */
op->stats.wc = state;
if (state) {
use_trigger(op);
if (op->stats.exp > 0) /* check sanity */
op->speed = 1.0/op->stats.exp;
else
op->speed = 1.0;
object_update_speed(op);
op->speed_left = -1;
} else {
use_trigger(op);
op->speed = 0;
object_update_speed(op);
}
}
/**
* @todo document properly
* cause != NULL: something has moved on top of op
*
* cause == NULL: nothing has moved, we have been called from
* animate_trigger().
*
* TRIGGER_ALTAR: Returns 1 if 'cause' was destroyed, 0 if not.
*
* TRIGGER: Returns 1 if handle could be moved, 0 if not.
*
* TRIGGER_BUTTON, TRIGGER_PEDESTAL: Returns 0.
*/
int check_trigger(object *op, object *cause) {
int push = 0, tot = 0;
int in_movement = op->stats.wc || op->speed;
switch (op->type) {
case TRIGGER_BUTTON:
if (op->weight > 0) {
if (cause) {
FOR_ABOVE_PREPARE(op, tmp)
/* Comment reproduced from update_buttons():
* Basically, if the move_type matches that on what the
* button wants, we count it. The second check is so that
* objects that don't move (swords, etc) will count. Note that
* this means that more work is needed to make buttons
* that are only triggered by flying objects.
*/
if ((tmp->move_type&op->move_on) || tmp->move_type == 0) {
tot += tmp->weight*(tmp->nrof ? tmp->nrof : 1)+tmp->carrying;
}
FOR_ABOVE_FINISH();
if (tot >= op->weight)
push = 1;
if (op->stats.ac == push)
return 0;
op->stats.ac = push;
if (NUM_ANIMATIONS(op) > 1) {
SET_ANIMATION(op, push);
object_update(op, UP_OBJ_FACE);
}
if (in_movement || !push)
return 0;
}
trigger_move(op, push);
}
return 0;
case TRIGGER_PEDESTAL:
if (cause) {
FOR_ABOVE_PREPARE(op, tmp) {
object *head = tmp->head ? tmp->head : tmp;
/* See comment in TRIGGER_BUTTON about move_types */
if ( ((head->move_type & op->move_on) || head->move_type == 0)
&& (head->race == op->slaying || (!op->slaying.compare("player") && head->type == PLAYER)))
{
push = 1;
break;
}
}
FOR_ABOVE_FINISH();
if (op->stats.ac == push) {
return 0;
}
op->stats.ac = push;
if (NUM_ANIMATIONS(op) > 1) {
SET_ANIMATION(op, push);
object_update(op, UP_OBJ_FACE);
}
if (in_movement || !push)
return 0;
}
trigger_move(op, push);
return 0;
case TRIGGER_ALTAR:
if (cause) {
if (in_movement)
return 0;
if (operate_altar(op, &cause)) {
if (NUM_ANIMATIONS(op) > 1) {
SET_ANIMATION(op, 1);
object_update(op, UP_OBJ_FACE);
}
if (op->last_sp >= 0) {
trigger_move(op, 1);
if (op->last_sp > 0)
op->last_sp = -op->last_sp;
} else {
/* for trigger altar with last_sp, the ON/OFF
* status (-> +/- value) is "simulated":
*/
op->value = !op->value;
trigger_move(op, 1);
op->last_sp = -op->last_sp;
op->value = !op->value;
}
return cause == NULL;
} else {
return 0;
}
} else {
if (NUM_ANIMATIONS(op) > 1) {
SET_ANIMATION(op, 0);
object_update(op, UP_OBJ_FACE);
}
/* If trigger_altar has "last_sp > 0" set on the map,
* it will push the connected value only once per sacrifice.
* Otherwise (default), the connected value will be
* pushed twice: First by sacrifice, second by reset! -AV
*/
if (!op->last_sp)
trigger_move(op, 0);
else {
op->stats.wc = 0;
op->value = !op->value;
op->speed = 0;
object_update_speed(op);
}
}
return 0;
case TRIGGER:
if (cause) {
if (in_movement)
return 0;
push = 1;
}
if (NUM_ANIMATIONS(op) > 1) {
SET_ANIMATION(op, push);
object_update(op, UP_OBJ_FACE);
}
trigger_move(op, push);
return 1;
default:
LOG(llevDebug, "Unknown trigger type: %s (%d)\n", op->name.c_str(), op->type);
return 0;
}
}
/**
* Links specified object in the map.
* @param button
* object to link. Must not be NULL.
* @param map
* map we are on. Should not be NULL.
* @param connected
* connection value for the item.
*/
void add_button_link(object *button, mapstruct *map, int connected) {
oblinkpt *obp;
objectlink *ol = get_objectlink();
if (!map) {
LOG(llevError, "Tried to add button-link without map.\n");
free_objectlink(ol);
return;
}
SET_FLAG(button, FLAG_IS_LINKED);
ol->ob = button;
ol->id = button->count;
for (obp = map->buttons; obp && obp->value != connected; obp = obp->next)
;
if (obp) {
ol->next = obp->link;
obp->link = ol;
} else {
obp = get_objectlinkpt();
obp->value = connected;
obp->next = map->buttons;
map->buttons = obp;
obp->link = ol;
}
}
/**
* Remove the object from the linked lists of buttons in the map.
* This is only needed by editors.
* @param op
* object to remove. Must be on a map, and linked.
*/
void remove_button_link(object *op) {
oblinkpt *obp;
objectlink **olp, *ol;
if (op->map == NULL) {
LOG(llevError, "remove_button_link() in object without map.\n");
return;
}
if (!QUERY_FLAG(op, FLAG_IS_LINKED)) {
LOG(llevError, "remove_button_linked() in unlinked object.\n");
return;
}
for (obp = op->map->buttons; obp; obp = obp->next)
for (olp = &obp->link; (ol = *olp); olp = &ol->next)
if (ol->ob == op) {
/* LOG(llevDebug, "Removed link %d in button %s and map %s.\n",
obp->value, op->name, op->map->path);
*/
*olp = ol->next;
free(ol);
return;
}
LOG(llevError, "remove_button_linked(): couldn't find object.\n");
CLEAR_FLAG(op, FLAG_IS_LINKED);
}
/**
* Return the first objectlink in the objects linked to this one
* @param button
* object to check. Must not be NULL.
* @return
* ::objectlink for this object, or NULL.
*/
static objectlink *get_button_links(const object *button) {
oblinkpt *obp;
objectlink *ol;
if (!button->map)
return NULL;
for (obp = button->map->buttons; obp; obp = obp->next)
for (ol = obp->link; ol; ol = ol->next)
if (ol->ob == button && ol->id == button->count)
return obp->link;
return NULL;
}
/**
* Returns the first value linked to this button.
* Made as a separate function to increase efficiency
* @param button
* object to check. Must not be NULL.
* @return
* connection value, or 0 if not connected.
*/
int get_button_value(const object *button) {
oblinkpt *obp;
objectlink *ol;
if (!button->map)
return 0;
for (obp = button->map->buttons; obp; obp = obp->next)
for (ol = obp->link; ol; ol = ol->next)
if (ol->ob == button && ol->id == button->count)
return obp->value;
return 0;
}
/**
* Checks object and its inventory for specific item.
*
* It will descend through containers to find the object.
* slaying = match object slaying flag
* race = match object archetype name flag
* hp = match object type (excpt type '0'== PLAYER)
* title = match object title
* Searching by title only is not recommended, as it can be a rather slow
* operation; use it in combination with archetype or type.
* @param op
* object of which to search inventory
* @param trig
* what to search
* @return
* object that matches, or NULL if none matched.
*/
object *check_inv_recursive(object *op, const object *trig) {
object *ret = NULL;
/* First check the object itself. */
if ( (!trig->stats.hp || (op->type == trig->stats.hp))
&& (!op->slaying.compare(trig->slaying))
&& (trig->race.empty() || !trig->race.compare(op->arch->name))
&& (trig->title.empty() || !op->title.compare(trig->title)) )
{
return op;
}
FOR_INV_PREPARE(op, tmp) {
if (tmp->inv) {
ret = check_inv_recursive(tmp, trig);
if (ret) {
return ret;
}
}
else if ( (!trig->stats.hp || (tmp->type == trig->stats.hp))
&& (!tmp->slaying.compare(trig->slaying))
&& (trig->race.empty() || !trig->race.compare(tmp->arch->name))
&& (trig->title.empty() || !tmp->title.compare(trig->title)) )
{
return tmp;
}
}
FOR_INV_FINISH();
return NULL;
}
/**
* Function to search the inventory,
* of a player and then based on a set of conditions,
* the square will activate connected items.
*
* Monsters can't trigger this square (for now)
* Values are: last_sp = 1/0 obj/no obj triggers
* last_heal = 1/0 remove/dont remove obj if triggered
* -b.t. (thomas@nomad.astro.psu.edu
*
* @param op
* object to check. Must be a player.
* @param trig
* trigger object that may be activated.
*/
void check_inv(object *op, object *trig) {
object *match;
if (op->type != PLAYER)
return;
match = check_inv_recursive(op, trig);
if (match && trig->last_sp) {
if (trig->last_heal)
object_decrease_nrof_by_one(match);
use_trigger(trig);
} else if (!match && !trig->last_sp)
use_trigger(trig);
}
/**
* This does a minimal check of the button link consistency for object
* map. All it really does it much sure the object id link that is set
* matches what the object has.
*
* Will log to error level.
*
* @param map
* map to check.
*/
void verify_button_links(const mapstruct *map) {
oblinkpt *obp;
objectlink *ol;
if (!map) {
return;
}
for (obp = map->buttons; obp; obp = obp->next) {
for (ol = obp->link; ol; ol = ol->next) {
if (ol->id != ol->ob->count) {
LOG(llevError, "verify_button_links: object %s on list is corrupt (%u!=%u)\n",
ol->ob->name.c_str(), ol->id, ol->ob->count);
}
}
}
}
|
tdalman/crossfire
|
common/button.cc
|
C++
|
gpl-2.0
| 27,720
|
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">
<!-- NewPage -->
<html lang="en">
<head>
<!-- Generated by javadoc (version 1.7.0_79) on Wed Jul 15 14:25:17 NZST 2015 -->
<title>All Classes</title>
<meta name="date" content="2015-07-15">
<link rel="stylesheet" type="text/css" href="stylesheet.css" title="Style">
</head>
<body>
<h1 class="bar">All Classes</h1>
<div class="indexContainer">
<ul>
<li><a href="pairtree/Morty.html" title="class in pairtree">Morty</a></li>
<li><a href="pairtree/PairPath.html" title="class in pairtree">PairPath</a></li>
<li><a href="pairtree/PairPathIterator.html" title="class in pairtree">PairPathIterator</a></li>
<li><a href="pairtree/PairTree.html" title="class in pairtree">PairTree</a></li>
<li><a href="pairtree/PairTreeIterator.html" title="class in pairtree">PairTreeIterator</a></li>
<li><a href="pairtree/Shorty.html" title="class in pairtree">Shorty</a></li>
</ul>
</div>
</body>
</html>
|
andrewjw1995/pairtree-java
|
doc/allclasses-noframe.html
|
HTML
|
gpl-2.0
| 988
|
# [Screenshot Captor](https://donationcoder.com/Software/Mouser/screenshotcaptor)
- Version: 4.16.1
- Date: 2015-12-12
|
crowds0urce/source
|
tech/software/capture/screenshot_captor.md
|
Markdown
|
gpl-3.0
| 118
|
// Copyleft 2013 Chris Korda
// This program is free software; you can redistribute it and/or modify it
// under the terms of the GNU General Public License as published by the Free
// Software Foundation; either version 2 of the License, or any later version.
/*
chris korda
revision history:
rev date comments
00 14oct13 initial version
01 23apr14 add tooltip support
02 05aug14 add OnCommandHelp
options property page
*/
#if !defined(AFX_OPTIONSPAGE_H__84776AB1_689B_46EE_84E6_931C1542871D__INCLUDED_)
#define AFX_OPTIONSPAGE_H__84776AB1_689B_46EE_84E6_931C1542871D__INCLUDED_
#if _MSC_VER > 1000
#pragma once
#endif // _MSC_VER > 1000
// OptionsPage.h : header file
//
/////////////////////////////////////////////////////////////////////////////
// COptionsPage dialog
#include "OptionsInfo.h"
class COptionsPage : public CPropertyPage
{
// Construction
public:
COptionsPage(COptionsInfo& Info, UINT nIDTemplate, UINT nIDCaption = 0);
// Overrides
// ClassWizard generated virtual function overrides
//{{AFX_VIRTUAL(COptionsPage)
//}}AFX_VIRTUAL
// Implementation
protected:
// Generated message map functions
//{{AFX_MSG(COptionsPage)
virtual BOOL OnInitDialog();
afx_msg LRESULT OnCommandHelp(WPARAM wParam, LPARAM lParam);
//}}AFX_MSG
afx_msg LRESULT OnKickIdle(WPARAM, LPARAM);
afx_msg BOOL OnToolTipNeedText(UINT id, NMHDR* pNMHDR, LRESULT* pResult);
DECLARE_MESSAGE_MAP()
// Dialog data
//{{AFX_DATA(COptionsPage)
//}}AFX_DATA
// Member data
COptionsInfo& m_oi; // reference to parent's options info
};
//{{AFX_INSERT_LOCATION}}
// Microsoft Visual C++ will insert additional declarations immediately before the previous line.
#endif // !defined(AFX_OPTIONSPAGE_H__84776AB1_689B_46EE_84E6_931C1542871D__INCLUDED_)
|
victimofleisure/ChordEase
|
trunk/OptionsPage.h
|
C
|
gpl-3.0
| 1,807
|
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have purchased from
# Numenta, Inc. a separate commercial license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file exports all NetworkInspectors.
"""
import os
import glob
import nupic
# Import NetworkInspector and NetworkInspectorHandler
from nupic.analysis.inspectors.network.NetworkInspector import *
# Create networkInspectors as a list of all network inspector subclasses
files = [os.path.splitext(os.path.split(x)[1])[0] for x in
glob.glob(os.path.join(os.path.split(__file__)[0], '*.py'))]
files.remove('__init__')
files.remove('NetworkInspector')
#files = [(f, f[:-1]) for f in files if f.endswith('2')]
files = [(f, f) for f in files]
for f in files:
exec('from nupic.analysis.inspectors.network.%s import %s' % (f[0], f[1]))
networkInspectors = map(eval, [f[1] for f in files])
|
tkaitchuck/nupic
|
py/nupic/analysis/inspectors/network/__init__.py
|
Python
|
gpl-3.0
| 1,690
|
#include <stdio.h>
#include <wiringPi.h>
#include <sys/time.h>
int main()
{
wiringPiSetup();
pinMode(8,OUTPUT);
pinMode(9,INPUT);
int val;
while(1)
{
digitalWrite(8,LOW);
digitalWrite(8,HIGH);
delayMicroseconds(10);
digitalWrite(8,LOW);
struct timeval start,end;
while(1)
{
val=digitalRead(9);
if(val == HIGH)
{
printf("start\n");
break;
}
else
continue;
}
gettimeofday(&start,NULL);
while(1)
{
val = digitalRead(9);
if(val== LOW)
{
printf("end\n");
break;
}
else
continue;
}
gettimeofday(&end,NULL);
long t1 = start.tv_sec * 1000000 + start.tv_usec;
long t2 = end.tv_sec * 1000000 + end.tv_usec;
float dis = (float)(t2-t1)/1000000*34000/2;
printf("%fcm\n",dis);
delay(2000);
}
return 0;
}
|
GuidengLi/hello
|
hc_sr04.c
|
C
|
gpl-3.0
| 799
|
/*******************************************************************************
* Copyright 2009, 2017 Martin Davis
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.proj4j;
/**
* Signals that a parameter in a CRS specification
* is not currently supported, or unknown.
*
* @author mbdavis
*
*/
public class UnsupportedParameterException extends Proj4jException
{
public UnsupportedParameterException(String message) {
super(message);
}
}
|
iCarto/siga
|
proj4j/src/main/java/org/locationtech/proj4j/UnsupportedParameterException.java
|
Java
|
gpl-3.0
| 993
|
using UnityEngine;
using System.Collections;
namespace Triangle.ItemSystem
{
public class ISEquipmentSlot : IISEquipmentSlot
{
[SerializeField] string _name;
[SerializeField] Sprite _icon;
public string Name
{
get { return _name; }
set { _name = value; }
}
public Sprite Icon
{
get { return _icon; }
set { _icon = value; }
}
}
}
|
zachstratton/Player
|
PlayerGame/Assets/ItemSystem(IS)/Scripts/ISEquipmentSlot.cs
|
C#
|
gpl-3.0
| 460
|
---
layout: politician2
title: kishor kumar
profile:
party: IPP
constituency: Kushi Nagar
state: Uttar Pradesh
education:
level: Graduate
details: professional b.com,llb,allahabad university
photo:
sex:
caste:
religion:
current-office-title:
crime-accusation-instances: 1
date-of-birth: 1970
profession:
networth:
assets: 33,87,500
liabilities:
pan:
twitter:
website:
youtube-interview:
wikipedia:
candidature:
- election: Lok Sabha 2009
myneta-link: http://myneta.info/ls2009/candidate.php?candidate_id=1379
affidavit-link:
expenses-link:
constituency: Kushi Nagar
party: IPP
criminal-cases: 1
assets: 33,87,500
liabilities:
result:
crime-record:
- crime: accussed
ipc: 147, 148, 149, 353, 332, 224, 225, 504, 506, 186, 427 IPC
details: "3 P.C.D. ACT"
date: 2014-01-28
version: 0.0.5
tags:
---
##Summary
##Education
{% include "education.html" %}
##Political Career
{% include "political-career.html" %}
##Criminal Record
{% include "criminal-record.html" %}
##Personal Wealth
{% include "personal-wealth.html" %}
##Public Office Track Record
{% include "track-record.html" %}
##References
{% include "references.html" %}
|
vaibhavb/wisevoter
|
site/politicians/_posts/2013-12-18-kishor-kumar.md
|
Markdown
|
gpl-3.0
| 1,264
|
package legacy;
import fbrec.control.Config;
import fbrec.error.TaggingException;
import fbrec.tagging.FbConnector.FbMessage;
import fbrec.tagging.FbConnector.FbStatus;
import fbrec.model.Tag;
import fbrec.tagging.module.Module;
import legacy.TfidfMatrix;
import legacy.TermMatrix.Doc;
import java.util.ArrayList;
import java.util.List;
import org.apache.log4j.Logger;
/**
*
* @author Daniel
*/
public class TfIdfTextModule extends Module {
//data from facebook
private List<FbStatus> statuses;
private List<FbMessage> outbox;
public TfIdfTextModule(double weight, int numResults) {
super(weight, numResults);
}
@Override
protected boolean retrieveData() {
statuses = profile.statuses();
outbox = profile.outbox();
if(statuses == null && outbox == null){
Logger.getLogger(Config.EVENT_LOGGER).warn("no permission to retrieve posts from facebook.");
return false;
}
return true;
}
@Override
protected void generateTags() throws TaggingException{
try {
//vars
List<String> topTerms;
List<Doc> postDocuments;
TfidfMatrix tfidfMatrix;
Tag tag;
double tfidf;
postDocuments = documents(); //get list of documents
tfidfMatrix = new TfidfMatrix(postDocuments); //get tfidf matrix
for(Doc doc : postDocuments){ //iterate through docs to determine top-terms
topTerms = tfidfMatrix.topTerms(doc.ID, 2); //get top-terms
for(String tagText : topTerms){ //add top terms to tag-tree
tfidf = tfidfMatrix.tfidf(doc.ID, tagText);
tag = new Tag(tagText, tfidf, getClass(), "posts");
tags.add(tag);
}
}
} catch (Exception ex) {
throw new TaggingException(ex, this.getClass());
}
}
/**
* Generates a List of documents from the posts-list retrieved from facebook.
* For Use in tf-idf matrix
* @return
*/
private List<Doc> documents(){
List<Doc> result = new ArrayList<Doc>();
int count = 0;
if(statuses != null){
for(FbStatus status: statuses){
result.add(new Doc(count, status.message));
count++;
}
}else{
Logger.getLogger(Config.EVENT_LOGGER).warn("status messages could not be retrieved.");
}
if(outbox != null){
for(FbMessage message: outbox){
result.add(new Doc(count, message.message));
count++;
}
}else{
Logger.getLogger(Config.EVENT_LOGGER).warn("outbox messages could not be retrieved.");
}
return result;
}
}
|
dburgmann/fbRecommender
|
src/java/legacy/TfIdfTextModule.java
|
Java
|
gpl-3.0
| 3,104
|
package org.chaseme.gps;
import android.app.AlertDialog;
import android.app.Service;
import android.content.Context;
import android.content.DialogInterface;
import android.content.Intent;
import android.location.Location;
import android.location.LocationListener;
import android.location.LocationManager;
import android.os.Bundle;
import android.os.IBinder;
import android.provider.Settings;
import android.util.Log;
public class GPS extends Service implements LocationListener {
private final Context mContext;
// flag for GPS status
boolean isGPSEnabled = false;
// flag for network status
boolean isNetworkEnabled = false;
// flag for GPS status
boolean canGetLocation = false;
Location location; // location
double latitude; // latitude
double longitude; // longitude
// The minimum distance to change Updates in meters
private static final long MIN_DISTANCE_CHANGE_FOR_UPDATES = 10; // 10 meters
// The minimum time between updates in milliseconds
private static final long MIN_TIME_BW_UPDATES = 1000 * 60 * 1; // 1 minute
// Declaring a Location Manager
protected LocationManager locationManager;
public GPS(Context context) {
this.mContext = context;
getLocation();
}
public Location getLocation() {
try {
locationManager = (LocationManager) mContext
.getSystemService(LOCATION_SERVICE);
// getting GPS status
isGPSEnabled = locationManager
.isProviderEnabled(LocationManager.GPS_PROVIDER);
// getting network status
isNetworkEnabled = locationManager
.isProviderEnabled(LocationManager.NETWORK_PROVIDER);
if (!isGPSEnabled && !isNetworkEnabled) {
// no network provider is enabled
} else {
this.canGetLocation = true;
// First get location from Network Provider
if (isNetworkEnabled) {
locationManager.requestLocationUpdates(
LocationManager.NETWORK_PROVIDER,
MIN_TIME_BW_UPDATES,
MIN_DISTANCE_CHANGE_FOR_UPDATES, this);
Log.d("Network", "Network");
if (locationManager != null) {
location = locationManager
.getLastKnownLocation(LocationManager.NETWORK_PROVIDER);
if (location != null) {
latitude = location.getLatitude();
longitude = location.getLongitude();
}
}
}
// if GPS Enabled get lat/long using GPS Services
if (isGPSEnabled) {
if (location == null) {
locationManager.requestLocationUpdates(
LocationManager.GPS_PROVIDER,
MIN_TIME_BW_UPDATES,
MIN_DISTANCE_CHANGE_FOR_UPDATES, this);
Log.d("GPS Enabled", "GPS Enabled");
if (locationManager != null) {
location = locationManager
.getLastKnownLocation(LocationManager.GPS_PROVIDER);
if (location != null) {
latitude = location.getLatitude();
longitude = location.getLongitude();
}
}
}
}
}
} catch (Exception e) {
e.printStackTrace();
}
return location;
}
/**
* Stop using GPS listener
* Calling this function will stop using GPS in your app
* */
public void stopUsingGPS(){
if(locationManager != null){
locationManager.removeUpdates(GPS.this);
}
}
/**
* Function to get latitude
* */
public double getLatitude(){
if(location != null){
latitude = location.getLatitude();
}
// return latitude
return latitude;
}
/**
* Function to get longitude
* */
public double getLongitude(){
if(location != null){
longitude = location.getLongitude();
}
// return longitude
return longitude;
}
/**
* Function to check GPS/wifi enabled
* @return boolean
* */
public boolean canGetLocation() {
return this.canGetLocation;
}
/**
* Function to show settings alert dialog
* On pressing Settings button will lauch Settings Options
* */
public void showSettingsAlert(){
AlertDialog.Builder alertDialog = new AlertDialog.Builder(mContext);
// Setting Dialog Title
alertDialog.setTitle("GPS is settings");
// Setting Dialog Message
alertDialog.setMessage("GPS is not enabled. Do you want to go to settings menu?");
// On pressing Settings button
alertDialog.setPositiveButton("Settings", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog,int which) {
Intent intent = new Intent(Settings.ACTION_LOCATION_SOURCE_SETTINGS);
mContext.startActivity(intent);
}
});
// on pressing cancel button
alertDialog.setNegativeButton("Cancel", new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int which) {
dialog.cancel();
}
});
// Showing Alert Message
alertDialog.show();
}
@Override
public void onLocationChanged(Location location) {
}
@Override
public void onProviderDisabled(String provider) {
}
@Override
public void onProviderEnabled(String provider) {
}
@Override
public void onStatusChanged(String provider, int status, Bundle extras) {
}
@Override
public IBinder onBind(Intent arg0) {
return null;
}
}
|
JayHuang/ChaseMe
|
ChaseMe/src/org/chaseme/gps/GPS.java
|
Java
|
gpl-3.0
| 6,302
|
<?php
namespace WinWin\Http\Middleware;
use Illuminate\Cookie\Middleware\EncryptCookies as BaseEncrypter;
class EncryptCookies extends BaseEncrypter
{
/**
* The names of the cookies that should not be encrypted.
*
* @var array
*/
protected $except = [
//
];
}
|
Yafuncl/winwin
|
app/Http/Middleware/EncryptCookies.php
|
PHP
|
gpl-3.0
| 303
|
#include <iostream>
#include <math.h>
using namespace std;
int main()
{
int temp,b,n,a;
float aa;
cin >> b >>n;
while(b!=0&&n!=0){
aa=pow(b,1.0/n);
a=floor(aa);
if((pow(a+1,n)-b)>(b-pow(a,n))){
cout << a <<endl;
}
else{
cout << a+1<<endl;
}
cin >> b >>n;
}
return 0;
}
|
PuzzlesLab/WPC
|
Past Competitions/WPC - 009/B/15453763_jiuntian_B.cpp
|
C++
|
gpl-3.0
| 372
|
namespace System.Net.NetworkInformation
{
public class PhysicalAddress
{
public static readonly PhysicalAddress None;
public PhysicalAddress(byte[] address)
{
throw new NotImplementedException();
}
public override int GetHashCode()
{
throw new NotImplementedException();
}
public override bool Equals(object comparand)
{
throw new NotImplementedException();
}
public override string ToString()
{
throw new NotImplementedException();
}
public byte[] GetAddressBytes()
{
throw new NotImplementedException();
}
public static PhysicalAddress Parse(string address)
{
throw new NotImplementedException();
}
}
}
|
zebraxxl/CIL2Java
|
StdLibs/System/System/Net/NetworkInformation/PhysicalAddress.cs
|
C#
|
gpl-3.0
| 961
|
/*
This script captures a snap shot of disk file I/O
The sample duration is set by the @Delay parameter.
Aggregate and average values are calcuated for each file
The @DBName parameter can be used to select a single database, otherwise all user dbs and tempdb
The @Drive parameter can be used to choose a single volume.
$Workfile: File_Get_IO_Stats_SnapShot.sql $
$Archive: /SQL/QueryWork/File_Get_IO_Stats_SnapShot.sql $
$Revision: 8 $ $Date: 15-11-25 16:22 $
*/
If OBJECT_ID('tempdb..#DataStart', 'U') is not null
Drop Table #DataStart;
Go
Set NoCount On;
Declare
@Delay Nchar(9)
, @DbName Nvarchar(50)
, @Drive NChar(1)
, @FileName Nvarchar(128)
;
Set @Delay = N'00:00:30';
Set @DbName = Null; -- Null gets all user dbs + TempDB
Set @Drive = Null; -- Data for all Disks
create Table #DataStart (
database_id int
,file_id int
,sampleMs int
,num_of_reads BigInt
,num_of_bytes_read BigInt
,io_stall_read_ms BigInt
,num_of_writes BigInt
,num_of_bytes_written BigInt
,io_stall_write_ms BigInt
,size_on_disk_bytes BigInt
);
Insert Into #DataStart(
database_id, file_id, sampleMs
,num_of_reads, num_of_bytes_read, io_stall_read_ms
,num_of_writes, num_of_bytes_written, io_stall_write_ms
,size_on_disk_bytes
)
Select
ivfs.database_id, ivfs.file_id, ivfs.sample_ms
,ivfs.num_of_reads, ivfs.num_of_bytes_read, ivfs.io_stall_read_ms
,ivfs.num_of_writes, ivfs.num_of_bytes_written, ivfs.io_stall_write_ms
,ivfs.size_on_disk_bytes
From
sys.dm_io_virtual_file_stats(null, null) as ivfs
;
WaitFor Delay @Delay;
;With
DataEnd As(
Select
ivfs.database_id, ivfs.file_id, ivfs.sample_ms
,ivfs.num_of_reads, ivfs.num_of_bytes_read, ivfs.io_stall_read_ms
,ivfs.num_of_writes, ivfs.num_of_bytes_written, ivfs.io_stall_write_ms
,ivfs.size_on_disk_bytes
From
sys.dm_io_virtual_file_stats(null, null) as ivfs
)
Select
[Drive] = subString(mf.physical_name, 1, 1)
,[Database_Name] = db.name
,[Logical_FName] = mf.name
,[Avg Stall(MS)/Read] = Case when (de.num_of_reads - ds.num_of_reads) = 0 then 0
else (de.io_stall_read_ms - ds.io_stall_read_ms) / (de.num_of_reads - ds.num_of_reads)
end
,[Read Bytes/Sec] = ((de.num_of_bytes_read - ds.num_of_bytes_read) / (de.sample_ms - ds.sampleMs)) * 1000.0
,[Reads/Sec] = CAST((CAST((de.num_of_reads - ds.num_of_reads) as FLOAT) / Cast((de.sample_ms - ds.sampleMs) as FLOAT)) * 1000.0 as DECIMAL(18,2))
,[Avg Bytes/Read] = Case when (de.num_of_reads - ds.num_of_reads) = 0 then 0
Else (de.num_of_bytes_read - ds.num_of_bytes_read) / (de.num_of_reads - ds.num_of_reads)
End
,[Avg Stall(MS)/Write] = Case When (de.num_of_writes - ds.num_of_writes) = 0 Then 0
Else (de.io_stall_write_ms - ds.io_stall_write_ms) / (de.num_of_writes - ds.num_of_writes)
end
,[Write Bytes/Sec] = ((de.num_of_bytes_written - ds.num_of_bytes_written) / (de.sample_ms - ds.sampleMs)) * 1000.0
,[Writes/Sec] = CAST((CAST((de.num_of_writes - ds.num_of_writes) as Float) / Cast((de.sample_ms - ds.sampleMs) as Float)) * 1000.0 as DECIMAL(18,2))
,[Avg Bytes/Write] = Case when (de.num_of_writes - ds.num_of_writes) = 0 then 0
Else (de.num_of_bytes_written - ds.num_of_bytes_written) / (de.num_of_writes - ds.num_of_writes)
End
,[File_Id] = mf.file_id
,[DE_SampleTime] = GETDATE()
,[DurationMs] = de.sample_ms - ds.sampleMs
--,[Num Reads] = de.num_of_reads - ds.num_of_reads
--,[Num Bytes Read] = de.num_of_bytes_read - ds.num_of_bytes_read
--,[Read Stall(ms)] = de.io_stall_read_ms - ds.io_stall_read_ms
--,[Num Writes] = de.num_of_writes - ds.num_of_writes
--,[Num Bytes Written] = de.num_of_bytes_written - ds.num_of_bytes_written
--,[Write Stall(ms)] = de.io_stall_write_ms - ds.io_stall_write_ms
--,[physical_name] = mf.physical_name
--,[DE_num_of_reads] = de.num_of_reads
--,[DS_num_of_reads] = ds.num_of_reads
--,[DE_num_of_bytes_read] = de.num_of_bytes_read
--,[DS_num_of_bytes_read] = ds.num_of_bytes_read
--,[DE_io_stall_read_ms] = de.io_stall_read_ms
--,[DS_io_stall_read_ms] = ds.io_stall_read_ms
--,[DE_num_of_writes] = de.num_of_writes
--,[DS_num_of_writes] = ds.num_of_writes
--,[DE_num_of_bytes_written] = de.num_of_bytes_written
--,[DS_num_of_bytes_written] = ds.num_of_bytes_written
--,[DE_io_stall_write_ms] = de.io_stall_write_ms
--,[DS_io_stall_write_ms] = ds.io_stall_write_ms
--,[Curr_size_bytes] = de.size_on_disk_bytes
--,[delta_size_bytes] = de.size_on_disk_bytes - ds.size_on_disk_bytes
From
#DataStart as ds
inner join DataEnd as de
on ds.database_id = de.database_id
and ds.file_id = de.file_id
Inner join sys.master_files as mf
on mf.database_id = ds.database_id
and mf.file_id = ds.file_id
inner join sys.databases as db
on db.database_id = ds.database_id
Where 1 = 1
and 1 = case when ((DB_ID(@DbName) is not null) and (ds.database_id) = DB_ID(@DbName))
then 1 -- check for single database specified.
when ((DB_ID(@DbName) is null) and (ds.database_id = 2)) then 1 -- TempDB
When ((DB_ID(@DbName) is null) and (ds.database_id > 4)) then 1 -- All User DBs
else 0 -- other wise skip it.
End
and 1 = Case When @Drive is null Then 1
When @Drive = subString(mf.physical_name, 1, 1) Then 1
Else 0
End
Order By
[Drive]
,Case when db_name(ds.database_id) = 'tempdb' then 0 else 1 end
--,[Drive]
,[Database_Name]
,[Logical_FName]
;
Return;
|
VaSquirrel/QueryWork
|
File_Get_IO_Stats_SnapShot.sql
|
SQL
|
gpl-3.0
| 5,522
|
/**
* Copyright 2010 Dejan Jovanovic.
*
* This file is part of cutsat.
*
* Cutsat is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Cutsat is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with cutsat. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <vector>
#include "parser/parser.h"
namespace cutsat {
class MpsParser : public Parser {
enum BoundType {
LowerBound,
UpperBound
};
void addConstraint(BoundType type, std::vector<Variable>& vars, std::vector<Integer> coeffNumerators,
std::vector<Integer> coeffDenominators, Integer& cNumerator, Integer& cDenominator);
void addIntegerBound(BoundType type, Variable var, Integer value);
public:
MpsParser(Solver& solver)
: Parser(solver) {}
void parse() throw(ParserException);
};
}
|
dddejan/cutsat
|
src/parser/mps_parser.h
|
C
|
gpl-3.0
| 1,282
|
package org.fhissen.settings;
import java.io.File;
import java.util.HashMap;
import java.util.Iterator;
import org.apache.commons.codec.binary.Base64;
import org.fhissen.utils.FileUtils;
public class KeyvalueFile {
private File f;
private HashMap<String, String> hm = new HashMap<>();
public KeyvalueFile(File file){
f = file;
}
public boolean exists(){
return f != null && f.exists() && f.isFile() && f.length() > 1;
}
public String getPath(){
if(f == null) return null;
try {
return f.getCanonicalPath();
} catch (Exception e) {
return f.getAbsolutePath();
}
}
public String getDirectoryPath(){
if(f == null) return null;
try {
return f.getParentFile().getCanonicalPath();
} catch (Exception e) {
return f.getParentFile().getAbsolutePath();
}
}
public boolean empty(){
return hm.size() == 0;
}
public void load(){
if(!f.exists() || !f.isFile()) return;
String raw = FileUtils.readFile(f.getAbsolutePath()).trim().replace("\r", "");
String[] props = raw.split("\n");
for(String s: props){
if(s == null) continue;
s = s.trim();
if(s.length() <= 1) continue;
int idx = s.indexOf("=");
if(idx <= 0) continue;
String key = s.substring(0, idx);
String val = s.substring(idx + 1, s.length());
hm.put(key.trim(), val.trim());
}
}
public String get(String key){
return hm.get(key);
}
public void set(String key, String val){
hm.put(key, val);
}
public String get(IKeys key){
return hm.get(key.name());
}
public int getInt(IKeys key){
try {
return Integer.parseInt(hm.get(key.name()));
} catch (Exception e) {
return 0;
}
}
public long getLong(IKeys key){
try {
return Long.parseLong(hm.get(key.name()));
} catch (Exception e) {
return 0;
}
}
public void set(IKeys key, String val){
hm.put(key.name(), val);
}
public void set(IKeys key, Object val){
hm.put(key.name(), val.toString());
}
public void setB64(IKeys key, byte[] b){
set(key, Base64.encodeBase64URLSafeString(b));
}
public byte[] getB64(IKeys key){
return Base64.decodeBase64(hm.get(key.name()));
}
public void save(){
if(f == null) return;
StringBuilder sb = new StringBuilder();
Iterator<String> keys = hm.keySet().iterator();
while(keys.hasNext()){
String key = keys.next();
sb.append(key);
sb.append('=');
sb.append(hm.get(key));
sb.append('\r');
sb.append('\n');
}
try {
File tmp = new File(f.getParentFile(), ".tmp");
FileUtils.writeFileUTF8(sb.toString(), tmp.getAbsolutePath());
f.delete();
tmp.renameTo(f);
} catch (Exception e) {
e.printStackTrace();
}
}
}
|
fhissen/CrococryptFile
|
CrococryptFile/common/org/fhissen/settings/KeyvalueFile.java
|
Java
|
gpl-3.0
| 2,800
|
/* Heroes Persist
Product which helps in organizing, broadcasting, celebrating events
Copyright (C) 2014 Sai Pranav
Email: rsaipranav92@gmail.com
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.heroespersist.sports.exception;
/**
* Custom Exception from Service Layer depending on DAO alterations.
*
* @author Sai Pranav
*
*/
public class SportsException extends RuntimeException{
public SportsException(String message){
super(message);
}
}
|
saipranav/Heroes-Persist
|
Sports/src/main/java/com/heroespersist/sports/exception/SportsException.java
|
Java
|
gpl-3.0
| 1,088
|
Ext.namespace("GEOR.Addons");
GEOR.Addons.Annotation = function(map, options) {
this.map = map;
this.options = options;
this.control = null;
this.item = null;
this.window = null;
};
// If required, may extend or compose with Ext.util.Observable
//Ext.extend(GEOR.Addons.Annotation, Ext.util.Observable, {
GEOR.Addons.Annotation.prototype = {
/**
* Method: init
*
* Parameters:
* record - {Ext.data.record} a record with the addon parameters
*/
init: function(record) {
var annotation = new GEOR.Annotation({
map: this.map,
popupOptions: {unpinnable: false, draggable: true}
});
this.window = new Ext.Window({
title: OpenLayers.i18n('annotation.drawing_tools'),
width: 440,
closable: false,
resizable: false,
border: false,
cls: 'annotation',
items: [{
xtype: 'toolbar',
border: false,
items: annotation.actions
}]
});
var lang = OpenLayers.Lang.getCode(),
item = new Ext.menu.CheckItem({
text: record.get("title")[lang] || record.get("title")["en"],
qtip: record.get("description")[lang] || record.get("description")["en"],
//iconCls: "addon-magnifier",
checked: false,
listeners: {
"checkchange": this.onCheckchange,
scope: this
}
});
this.item = item;
return item;
},
/**
* Method: onCheckchange
* Callback on checkbox state changed
*/
onCheckchange: function(item, checked) {
if (checked) {
this.window.show();
this.window.alignTo(
Ext.get(this.map.div),
"t-t",
[0, 5],
true
);
} else {
this.window.hide();
}
},
/**
* Method: destroy
* Called by GEOR_tools when deselecting this addon
*/
destroy: function() {
this.window.hide();
this.control = null;
this.map = null;
}
};
|
pgiraud/georchestra
|
mapfishapp/src/main/webapp/app/addons/annotation/js/main.js
|
JavaScript
|
gpl-3.0
| 2,237
|
base:
scripts/00_getbase_download.sh
scripts/01_getbase_busybox.sh
scripts/02_getbase_kernel_headers.sh
scripts/03_getbase_uclibc.sh
scripts/04_getbase_python.sh
scripts/05_getbase_samba.sh
scripts/06_getbase_dropbear.sh
download:
scripts/00_getbase_download.sh
busybox:
scripts/01_getbase_busybox.sh
kernel_headers:
scripts/02_getbase_kernel_headers.sh
uclibc:
scripts/03_getbase_uclibc.sh
python:
scripts/04_getbase_python.sh
samba:
scripts/05_getbase_samba.sh
dropbear:
scripts/06_getbase_dropbear.sh
kernel:
scripts/30_build_kernel.sh
initrd:
scripts/40_build_initrd.sh
rootfs:
scripts/50_build_rootfs.sh
boot:
scripts/60_get_boot.sh
help:
@echo ' all - Run all. Disabled.'
@echo ' base - Build base (busybox, uclibc, *python, samba)'
@echo ' download - (00 base) Download all src resources files you need git, hg and cvs'
@echo ' upchroot - (01 base) Build busybox'
@echo ' kernel_headers - (02 base) Download kernel src and install headers on target_linux_headers'
@echo ' uclibc - (03 base) Build uclibc libc libraries'
@echo ' python - (04 base) Build python, install on buildroot instead target'
@echo ' samba - (05 base) Build samba'
@echo ' dropbear - (06 base) Build dropbear'
@echo ' kernel - (30) Download linux src and build kernel/modules'
@echo ' initrd - (40) Create an initrd.gz'
@echo ' rootfs - (50) compress target with squashfs'
@echo ' boot - (60) Download boot files'
@echo ' help - This'
@echo ' '
@echo '*Python is builded because samba requered it. No python binary or libs are added to target'
|
NachE/PiNAS
|
Makefile
|
Makefile
|
gpl-3.0
| 1,713
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.